From 69e1312450048532e6ad58f78e805cb09acfeb38 Mon Sep 17 00:00:00 2001 From: Chris Martin Date: Mon, 24 Jun 2024 10:04:34 +0100 Subject: [PATCH 1/9] update alpine to latest version (#3749) Signed-off-by: Chris Martin Co-authored-by: Chris Martin --- .goreleaser.yml | 2 +- build/armada-load-tester/Dockerfile | 2 +- build/armada/Dockerfile | 2 +- build/armadactl/Dockerfile | 2 +- build/binoculars/Dockerfile | 2 +- build/eventingester/Dockerfile | 2 +- build/executor/Dockerfile | 2 +- build/fakeexecutor/Dockerfile | 2 +- build/jobservice/Dockerfile | 2 +- build/lookoutingesterv2/Dockerfile | 2 +- build/lookoutv2/Dockerfile | 2 +- build/scheduler/Dockerfile | 2 +- build/scheduleringester/Dockerfile | 2 +- build/testsuite/Dockerfile | 2 +- build_goreleaser/armadactl/Dockerfile | 2 +- build_goreleaser/binoculars/Dockerfile | 2 +- build_goreleaser/bundles/armada/Dockerfile | 2 +- build_goreleaser/bundles/full/Dockerfile | 2 +- build_goreleaser/bundles/lookout/Dockerfile | 2 +- build_goreleaser/eventingester/Dockerfile | 2 +- build_goreleaser/executor/Dockerfile | 2 +- build_goreleaser/fakeexecutor/Dockerfile | 2 +- build_goreleaser/jobservice/Dockerfile | 2 +- build_goreleaser/loadtester/Dockerfile | 2 +- build_goreleaser/lookoutingesterv2/Dockerfile | 4 ++-- build_goreleaser/lookoutv2/Dockerfile | 2 +- build_goreleaser/scheduler/Dockerfile | 2 +- build_goreleaser/scheduleringester/Dockerfile | 2 +- build_goreleaser/server/Dockerfile | 2 +- build_goreleaser/testsuite/Dockerfile | 2 +- cmd/armada-load-tester/cmd/loadtest.go | 2 +- developer/config/job.yaml | 4 ++-- e2e/armadactl_test/armadactl_test.go | 2 +- pkg/armadaevents/events_util_test.go | 2 +- third_party/airflow/README.md | 2 +- third_party/airflow/test/operators/test_armada.py | 2 +- 36 files changed, 38 insertions(+), 38 deletions(-) diff --git a/.goreleaser.yml b/.goreleaser.yml index 229b42622e2..251fe7001da 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -195,7 +195,7 @@ dockers: - --label=org.opencontainers.image.version={{ .Version }} - --label=org.opencontainers.image.created={{ time "2006-01-02T15:04:05Z07:00" }} - --label=org.opencontainers.image.revision={{ .FullCommit }} - - --label=org.opencontainers.image.base.name=alpine:3.18.3 + - --label=org.opencontainers.image.base.name=alpine:3.20.1 - --label=org.opencontainers.image.licenses=Apache-2.0 - --label=org.opencontainers.image.vendor=G-Research ids: diff --git a/build/armada-load-tester/Dockerfile b/build/armada-load-tester/Dockerfile index 09b8b4aeac9..9ecee0f7061 100644 --- a/build/armada-load-tester/Dockerfile +++ b/build/armada-load-tester/Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.18.3 +FROM alpine:3.20.1 RUN addgroup -S -g 2000 armada && adduser -S -u 1000 armada -G armada diff --git a/build/armada/Dockerfile b/build/armada/Dockerfile index 6614890e768..7d15d88abb1 100644 --- a/build/armada/Dockerfile +++ b/build/armada/Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.18.3 +FROM alpine:3.20.1 RUN addgroup -S -g 2000 armada && adduser -S -u 1000 armada -G armada diff --git a/build/armadactl/Dockerfile b/build/armadactl/Dockerfile index 1fb97defb9e..1c0e5518eab 100644 --- a/build/armadactl/Dockerfile +++ b/build/armadactl/Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.18.3 +FROM alpine:3.20.1 RUN addgroup -S -g 2000 armada && adduser -S -u 1000 armada -G armada diff --git a/build/binoculars/Dockerfile b/build/binoculars/Dockerfile index 640fd53b986..a2d05ef191e 100644 --- a/build/binoculars/Dockerfile +++ b/build/binoculars/Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.18.3 +FROM alpine:3.20.1 RUN addgroup -S -g 2000 armada && adduser -S -u 1000 armada -G armada diff --git a/build/eventingester/Dockerfile b/build/eventingester/Dockerfile index ea77f3c9ca0..e1c29decfb0 100644 --- a/build/eventingester/Dockerfile +++ b/build/eventingester/Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.18.3 +FROM alpine:3.20.1 RUN addgroup -S -g 2000 armada && adduser -S -u 1000 armada -G armada diff --git a/build/executor/Dockerfile b/build/executor/Dockerfile index 9a139fffbed..99dfe76151a 100644 --- a/build/executor/Dockerfile +++ b/build/executor/Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.18.3 +FROM alpine:3.20.1 RUN addgroup -S -g 2000 armada && adduser -S -u 1000 armada -G armada diff --git a/build/fakeexecutor/Dockerfile b/build/fakeexecutor/Dockerfile index 8f822b59581..7e92d6b1ec5 100644 --- a/build/fakeexecutor/Dockerfile +++ b/build/fakeexecutor/Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.18.3 +FROM alpine:3.20.1 RUN addgroup -S -g 2000 armada && adduser -S -u 1000 armada -G armada diff --git a/build/jobservice/Dockerfile b/build/jobservice/Dockerfile index 1f5bc9a9af2..b9340243bfd 100644 --- a/build/jobservice/Dockerfile +++ b/build/jobservice/Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.18.3 +FROM alpine:3.20.1 RUN addgroup -S -g 2000 armada && adduser -S -u 1000 armada -G armada diff --git a/build/lookoutingesterv2/Dockerfile b/build/lookoutingesterv2/Dockerfile index f8128d0cc9a..c2ef341b15d 100644 --- a/build/lookoutingesterv2/Dockerfile +++ b/build/lookoutingesterv2/Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.18.3 +FROM alpine:3.20.1 RUN addgroup -S -g 2000 armada && adduser -S -u 1000 armada -G armada diff --git a/build/lookoutv2/Dockerfile b/build/lookoutv2/Dockerfile index 0d463389398..1a672055b0b 100644 --- a/build/lookoutv2/Dockerfile +++ b/build/lookoutv2/Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.18.3 +FROM alpine:3.20.1 RUN addgroup -S -g 2000 armada && adduser -S -u 1000 armada -G armada diff --git a/build/scheduler/Dockerfile b/build/scheduler/Dockerfile index b9cab04aebc..28a11f01b8e 100644 --- a/build/scheduler/Dockerfile +++ b/build/scheduler/Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.18.3 +FROM alpine:3.20.1 RUN addgroup -S -g 2000 armada && adduser -S -u 1000 armada -G armada diff --git a/build/scheduleringester/Dockerfile b/build/scheduleringester/Dockerfile index 810c76e9a01..817dd8d297c 100644 --- a/build/scheduleringester/Dockerfile +++ b/build/scheduleringester/Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.18.3 +FROM alpine:3.20.1 RUN addgroup -S -g 2000 armada && adduser -S -u 1000 armada -G armada diff --git a/build/testsuite/Dockerfile b/build/testsuite/Dockerfile index b3a69121166..319eee90738 100644 --- a/build/testsuite/Dockerfile +++ b/build/testsuite/Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.18.3 +FROM alpine:3.20.1 RUN addgroup -S -g 2000 armada && adduser -S -u 1000 armada -G armada diff --git a/build_goreleaser/armadactl/Dockerfile b/build_goreleaser/armadactl/Dockerfile index b286a5ae77d..80d6b6a66df 100644 --- a/build_goreleaser/armadactl/Dockerfile +++ b/build_goreleaser/armadactl/Dockerfile @@ -1,4 +1,4 @@ -ARG BASE_IMAGE=alpine:3.18.3 +ARG BASE_IMAGE=alpine:3.20.1 FROM ${BASE_IMAGE} LABEL org.opencontainers.image.title=armadactl diff --git a/build_goreleaser/binoculars/Dockerfile b/build_goreleaser/binoculars/Dockerfile index a64955d0003..67e3c944ff1 100644 --- a/build_goreleaser/binoculars/Dockerfile +++ b/build_goreleaser/binoculars/Dockerfile @@ -1,4 +1,4 @@ -ARG BASE_IMAGE=alpine:3.18.3 +ARG BASE_IMAGE=alpine:3.20.1 FROM ${BASE_IMAGE} LABEL org.opencontainers.image.title=binoculars LABEL org.opencontainers.image.description="binoculars" diff --git a/build_goreleaser/bundles/armada/Dockerfile b/build_goreleaser/bundles/armada/Dockerfile index 133a11e853f..3403e3b620f 100644 --- a/build_goreleaser/bundles/armada/Dockerfile +++ b/build_goreleaser/bundles/armada/Dockerfile @@ -1,4 +1,4 @@ -ARG BASE_IMAGE=alpine:3.18.3 +ARG BASE_IMAGE=alpine:3.20.1 FROM ${BASE_IMAGE} LABEL org.opencontainers.image.title=armada LABEL org.opencontainers.image.description="Armada Bundle" diff --git a/build_goreleaser/bundles/full/Dockerfile b/build_goreleaser/bundles/full/Dockerfile index c4c09916783..a1e4ec658ae 100644 --- a/build_goreleaser/bundles/full/Dockerfile +++ b/build_goreleaser/bundles/full/Dockerfile @@ -1,6 +1,6 @@ ARG NODE_BUILD_IMAGE=node:16.14-buster ARG OPENAPI_BUILD_IMAGE=openapitools/openapi-generator-cli:v5.4.0 -ARG BASE_IMAGE=alpine:3.18.3 +ARG BASE_IMAGE=alpine:3.20.1 FROM ${OPENAPI_BUILD_IMAGE} AS OPENAPI LABEL org.opencontainers.image.title=armada-full-bundle diff --git a/build_goreleaser/bundles/lookout/Dockerfile b/build_goreleaser/bundles/lookout/Dockerfile index 3a180b7ce7b..ac1e173ca95 100644 --- a/build_goreleaser/bundles/lookout/Dockerfile +++ b/build_goreleaser/bundles/lookout/Dockerfile @@ -1,4 +1,4 @@ -ARG BASE_IMAGE=alpine:3.18.3 +ARG BASE_IMAGE=alpine:3.20.1 FROM ${BASE_IMAGE} LABEL org.opencontainers.image.title=armada-lookout-bundle diff --git a/build_goreleaser/eventingester/Dockerfile b/build_goreleaser/eventingester/Dockerfile index dd15e0a8a3a..70431bb3967 100644 --- a/build_goreleaser/eventingester/Dockerfile +++ b/build_goreleaser/eventingester/Dockerfile @@ -1,4 +1,4 @@ -ARG BASE_IMAGE=alpine:3.18.5 +ARG BASE_IMAGE=alpine:3.20.1 FROM ${BASE_IMAGE} LABEL org.opencontainers.image.title=eventingester diff --git a/build_goreleaser/executor/Dockerfile b/build_goreleaser/executor/Dockerfile index 36d7ceeb679..0b627d7f2ce 100644 --- a/build_goreleaser/executor/Dockerfile +++ b/build_goreleaser/executor/Dockerfile @@ -1,4 +1,4 @@ -ARG BASE_IMAGE=alpine:3.18.3 +ARG BASE_IMAGE=alpine:3.20.1 FROM ${BASE_IMAGE} LABEL org.opencontainers.image.title=executor diff --git a/build_goreleaser/fakeexecutor/Dockerfile b/build_goreleaser/fakeexecutor/Dockerfile index d7fa88edb17..444cdc5afb0 100644 --- a/build_goreleaser/fakeexecutor/Dockerfile +++ b/build_goreleaser/fakeexecutor/Dockerfile @@ -1,4 +1,4 @@ -ARG BASE_IMAGE=alpine:3.18.3 +ARG BASE_IMAGE=alpine:3.20.1 FROM ${BASE_IMAGE} LABEL org.opencontainers.image.title=fakeexecutor LABEL org.opencontainers.image.description="Fake Executor" diff --git a/build_goreleaser/jobservice/Dockerfile b/build_goreleaser/jobservice/Dockerfile index 9da0241774b..cb1540bbb7a 100644 --- a/build_goreleaser/jobservice/Dockerfile +++ b/build_goreleaser/jobservice/Dockerfile @@ -1,4 +1,4 @@ -ARG BASE_IMAGE=alpine:3.18.3 +ARG BASE_IMAGE=alpine:3.20.1 FROM ${BASE_IMAGE} LABEL org.opencontainers.image.title=jobservice diff --git a/build_goreleaser/loadtester/Dockerfile b/build_goreleaser/loadtester/Dockerfile index e716e2fa7e1..5ecb9156aa7 100644 --- a/build_goreleaser/loadtester/Dockerfile +++ b/build_goreleaser/loadtester/Dockerfile @@ -1,4 +1,4 @@ -ARG BASE_IMAGE=alpine:3.18.3 +ARG BASE_IMAGE=alpine:3.20.1 FROM ${BASE_IMAGE} LABEL org.opencontainers.image.title=loadtester diff --git a/build_goreleaser/lookoutingesterv2/Dockerfile b/build_goreleaser/lookoutingesterv2/Dockerfile index be74008b091..25595221e68 100644 --- a/build_goreleaser/lookoutingesterv2/Dockerfile +++ b/build_goreleaser/lookoutingesterv2/Dockerfile @@ -1,4 +1,4 @@ -ARG BASE_IMAGE=alpine:3.18.3 +ARG BASE_IMAGE=alpine:3.20.1 FROM ${BASE_IMAGE} LABEL org.opencontainers.image.title=lookoutingesterv2 @@ -13,4 +13,4 @@ COPY config/lookoutingesterv2/config.yaml /app/config/lookoutingesterv2/config.y WORKDIR /app -ENTRYPOINT ["./lookoutingesterv2"] \ No newline at end of file +ENTRYPOINT ["./lookoutingesterv2"] diff --git a/build_goreleaser/lookoutv2/Dockerfile b/build_goreleaser/lookoutv2/Dockerfile index b3c07af4097..7d0e1ebca53 100644 --- a/build_goreleaser/lookoutv2/Dockerfile +++ b/build_goreleaser/lookoutv2/Dockerfile @@ -1,6 +1,6 @@ ARG NODE_BUILD_IMAGE=node:16.14-buster ARG OPENAPI_BUILD_IMAGE=openapitools/openapi-generator-cli:v5.4.0 -ARG BASE_IMAGE=alpine:3.18.3 +ARG BASE_IMAGE=alpine:3.20.1 FROM ${OPENAPI_BUILD_IMAGE} AS OPENAPI diff --git a/build_goreleaser/scheduler/Dockerfile b/build_goreleaser/scheduler/Dockerfile index 6922cd3be2e..24de8d5b69e 100644 --- a/build_goreleaser/scheduler/Dockerfile +++ b/build_goreleaser/scheduler/Dockerfile @@ -1,4 +1,4 @@ -ARG BASE_IMAGE=alpine:3.18.3 +ARG BASE_IMAGE=alpine:3.20.1 FROM ${BASE_IMAGE} LABEL org.opencontainers.image.title=scheduler diff --git a/build_goreleaser/scheduleringester/Dockerfile b/build_goreleaser/scheduleringester/Dockerfile index 40a58a9e5b7..1d5096fc9ef 100644 --- a/build_goreleaser/scheduleringester/Dockerfile +++ b/build_goreleaser/scheduleringester/Dockerfile @@ -1,4 +1,4 @@ -ARG BASE_IMAGE=alpine:3.18.3 +ARG BASE_IMAGE=alpine:3.20.1 FROM ${BASE_IMAGE} LABEL org.opencontainers.image.title=scheduleringester diff --git a/build_goreleaser/server/Dockerfile b/build_goreleaser/server/Dockerfile index 9568aa50aad..614ef8591e9 100644 --- a/build_goreleaser/server/Dockerfile +++ b/build_goreleaser/server/Dockerfile @@ -1,4 +1,4 @@ -ARG BASE_IMAGE=alpine:3.18.3 +ARG BASE_IMAGE=alpine:3.20.1 FROM ${BASE_IMAGE} LABEL org.opencontainers.image.title=armada-server diff --git a/build_goreleaser/testsuite/Dockerfile b/build_goreleaser/testsuite/Dockerfile index 514c37566a8..c8dfccf4a95 100644 --- a/build_goreleaser/testsuite/Dockerfile +++ b/build_goreleaser/testsuite/Dockerfile @@ -1,4 +1,4 @@ -ARG BASE_IMAGE=alpine:3.18.3 +ARG BASE_IMAGE=alpine:3.20.1 FROM ${BASE_IMAGE} LABEL org.opencontainers.image.title=testsuite LABEL org.opencontainers.image.description="Test Suite" diff --git a/cmd/armada-load-tester/cmd/loadtest.go b/cmd/armada-load-tester/cmd/loadtest.go index 126379afd4d..ad5a47eea85 100644 --- a/cmd/armada-load-tester/cmd/loadtest.go +++ b/cmd/armada-load-tester/cmd/loadtest.go @@ -52,7 +52,7 @@ var loadtestCmd = &cobra.Command{ containers: - name: sleep imagePullPolicy: IfNotPresent - image: alpine:3.18.3 + image: alpine:3.20.1 command: - sh args: diff --git a/developer/config/job.yaml b/developer/config/job.yaml index aab2b8d257b..38906368930 100644 --- a/developer/config/job.yaml +++ b/developer/config/job.yaml @@ -11,7 +11,7 @@ jobs: containers: - name: sleep imagePullPolicy: IfNotPresent - image: alpine:3.10 + image: alpine:latest args: - "exit" - "1" @@ -26,4 +26,4 @@ jobs: timeout: "100s" expectedEvents: - submitted: - - failed: \ No newline at end of file + - failed: diff --git a/e2e/armadactl_test/armadactl_test.go b/e2e/armadactl_test/armadactl_test.go index f5ee2fd956d..419b4eaa114 100644 --- a/e2e/armadactl_test/armadactl_test.go +++ b/e2e/armadactl_test/armadactl_test.go @@ -175,7 +175,7 @@ jobs: containers: - name: ls imagePullPolicy: IfNotPresent - image: alpine:3.18.3 + image: alpine:3.20.1 command: - sh - -c diff --git a/pkg/armadaevents/events_util_test.go b/pkg/armadaevents/events_util_test.go index 7a3b1255e02..fde5e1be7ed 100644 --- a/pkg/armadaevents/events_util_test.go +++ b/pkg/armadaevents/events_util_test.go @@ -68,7 +68,7 @@ func generateFullES() ([]byte, error) { Containers: []v1.Container{ { Name: "container1", - Image: "alpine:3.10", + Image: "alpine:3.20.1", Args: []string{"sleep", "5s"}, Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{"cpu": cpu, "memory": memory}, diff --git a/third_party/airflow/README.md b/third_party/airflow/README.md index a09df8865c5..fe3bb0c8c18 100644 --- a/third_party/airflow/README.md +++ b/third_party/airflow/README.md @@ -39,7 +39,7 @@ def create_dummy_job(): containers=[ core_v1.Container( name="sleep", - image="alpine:3.16.2", + image="alpine:3.20.1", args=["sh", "-c", "for i in $(seq 1 60); do echo $i; sleep 1; done"], securityContext=core_v1.SecurityContext(runAsUser=1000), resources=core_v1.ResourceRequirements( diff --git a/third_party/airflow/test/operators/test_armada.py b/third_party/airflow/test/operators/test_armada.py index f1502bc206f..1f134ce3411 100644 --- a/third_party/airflow/test/operators/test_armada.py +++ b/third_party/airflow/test/operators/test_armada.py @@ -274,7 +274,7 @@ def test_templating(self): containers=[ core_v1.Container( name="sleep", - image="alpine:3.16.2", + image="alpine:3.20.1", args=[pod_arg], securityContext=core_v1.SecurityContext(runAsUser=1000), resources=core_v1.ResourceRequirements( From b1ae80efbd24fba4ac1aea6d1672f483c8342eca Mon Sep 17 00:00:00 2001 From: Chris Martin Date: Mon, 24 Jun 2024 10:15:02 +0100 Subject: [PATCH 2/9] Show Rejections in Lookout (#3663) * wip Signed-off-by: Chris Martin * wip Signed-off-by: Chris Martin * wip Signed-off-by: Chris Martin * wip Signed-off-by: Chris Martin * unit tests for ingester Signed-off-by: Chris Martin * wip Signed-off-by: Chris Martin * wip Signed-off-by: Chris Martin * fix tests Signed-off-by: Chris Martin * lint Signed-off-by: Chris Martin * extra test Signed-off-by: Chris Martin * code review comments Signed-off-by: Chris Martin --------- Signed-off-by: Chris Martin Co-authored-by: Chris Martin --- internal/armada/queryapi/query_api.go | 1 + internal/common/database/lookout/jobstates.go | 4 + internal/common/ingest/testfixtures/event.go | 19 + .../instructions/instructions.go | 20 +- .../instructions/instructions_test.go | 20 +- .../lookoutingesterv2/lookoutdb/insertion.go | 93 ++++- .../lookoutdb/insertion_test.go | 93 ++++- internal/lookoutingesterv2/model/model.go | 17 +- internal/lookoutv2/application.go | 14 + internal/lookoutv2/gen/models/job.go | 7 +- .../lookoutv2/gen/restapi/embedded_spec.go | 120 +++++- .../gen/restapi/operations/get_job_error.go | 154 ++++++++ .../operations/get_job_error_parameters.go | 83 ++++ .../operations/get_job_error_responses.go | 163 ++++++++ .../operations/get_job_error_urlbuilder.go | 84 ++++ .../gen/restapi/operations/lookout_api.go | 12 + internal/lookoutv2/repository/getjoberror.go | 44 +++ .../lookoutv2/repository/getjoberror_test.go | 53 +++ .../lookoutv2/repository/groupjobs_test.go | 3 + internal/lookoutv2/repository/util.go | 27 ++ .../schema/migrations/011_add_job_error.sql | 5 + internal/lookoutv2/swagger.yaml | 39 ++ pkg/api/api.swagger.go | 3 +- pkg/api/api.swagger.json | 3 +- pkg/api/submit.pb.go | 368 +++++++++--------- pkg/api/submit.proto | 1 + 26 files changed, 1231 insertions(+), 219 deletions(-) create mode 100644 internal/lookoutv2/gen/restapi/operations/get_job_error.go create mode 100644 internal/lookoutv2/gen/restapi/operations/get_job_error_parameters.go create mode 100644 internal/lookoutv2/gen/restapi/operations/get_job_error_responses.go create mode 100644 internal/lookoutv2/gen/restapi/operations/get_job_error_urlbuilder.go create mode 100644 internal/lookoutv2/repository/getjoberror.go create mode 100644 internal/lookoutv2/repository/getjoberror_test.go create mode 100644 internal/lookoutv2/schema/migrations/011_add_job_error.sql diff --git a/internal/armada/queryapi/query_api.go b/internal/armada/queryapi/query_api.go index a7853ac9744..890ec9cbe77 100644 --- a/internal/armada/queryapi/query_api.go +++ b/internal/armada/queryapi/query_api.go @@ -24,6 +24,7 @@ var JobStateMap = map[int16]api.JobState{ lookout.JobFailedOrdinal: api.JobState_FAILED, lookout.JobCancelledOrdinal: api.JobState_CANCELLED, lookout.JobPreemptedOrdinal: api.JobState_PREEMPTED, + lookout.JobRejectedOrdinal: api.JobState_REJECTED, } // JobRunStateMap is a mapping between database state and api Job Run states diff --git a/internal/common/database/lookout/jobstates.go b/internal/common/database/lookout/jobstates.go index 3c97e00cb77..418eeb04ac5 100644 --- a/internal/common/database/lookout/jobstates.go +++ b/internal/common/database/lookout/jobstates.go @@ -15,6 +15,7 @@ const ( JobCancelled JobState = "CANCELLED" JobPreempted JobState = "PREEMPTED" JobLeased JobState = "LEASED" + JobRejected JobState = "REJECTED" JobQueuedOrdinal = 1 JobPendingOrdinal = 2 @@ -24,6 +25,7 @@ const ( JobCancelledOrdinal = 6 JobPreemptedOrdinal = 7 JobLeasedOrdinal = 8 + JobRejectedOrdinal = 9 JobRunPending JobRunState = "RUN_PENDING" JobRunRunning JobRunState = "RUN_RUNNING" @@ -63,6 +65,7 @@ var ( JobFailed, JobCancelled, JobPreempted, + JobRejected, } JobStateMap = map[int]JobState{ @@ -74,6 +77,7 @@ var ( JobFailedOrdinal: JobFailed, JobCancelledOrdinal: JobCancelled, JobPreemptedOrdinal: JobPreempted, + JobRejectedOrdinal: JobRejected, } JobStateOrdinalMap = util.InverseMap(JobStateMap) diff --git a/internal/common/ingest/testfixtures/event.go b/internal/common/ingest/testfixtures/event.go index 018dc3a2565..fdd421e1be6 100644 --- a/internal/common/ingest/testfixtures/event.go +++ b/internal/common/ingest/testfixtures/event.go @@ -513,6 +513,25 @@ var JobPreempted = &armadaevents.EventSequence_Event{ }, } +var JobRejected = &armadaevents.EventSequence_Event{ + Created: &testfixtures.BaseTime, + Event: &armadaevents.EventSequence_Event_JobErrors{ + JobErrors: &armadaevents.JobErrors{ + JobId: JobIdProto, + Errors: []*armadaevents.Error{ + { + Terminal: true, + Reason: &armadaevents.Error_JobRejected{ + JobRejected: &armadaevents.JobRejected{ + Message: ErrMsg, + }, + }, + }, + }, + }, + }, +} + var JobFailed = &armadaevents.EventSequence_Event{ Created: &testfixtures.BaseTime, Event: &armadaevents.EventSequence_Event_JobErrors{ diff --git a/internal/lookoutingesterv2/instructions/instructions.go b/internal/lookoutingesterv2/instructions/instructions.go index 8d6268fd063..9b2be688563 100644 --- a/internal/lookoutingesterv2/instructions/instructions.go +++ b/internal/lookoutingesterv2/instructions/instructions.go @@ -143,10 +143,6 @@ func (c *InstructionConverter) handleSubmitJob( c.metrics.RecordPulsarMessageError(metrics.PulsarMessageErrorProcessing) return err } - if event.IsDuplicate { - log.Debugf("job %s is a duplicate, ignoring", jobId) - return nil - } // Try and marshall the job proto. This shouldn't go wrong but if it does, it's not a fatal error // Rather it means that the job spec won't be available in the ui @@ -201,9 +197,8 @@ func (c *InstructionConverter) handleSubmitJob( return err } -// extractUserAnnotations strips userAnnotationPrefix from all keys and -// truncates keys and values to their maximal lengths (as specified by -// maxAnnotationKeyLen and maxAnnotationValLen). +// extractUserAnnotations strips userAnnotationPrefix from all keys and truncates keys and values to their maximal +// lengths (as specified by maxAnnotationKeyLen and maxAnnotationValLen). func extractUserAnnotations(userAnnotationPrefix string, jobAnnotations map[string]string) map[string]string { result := make(map[string]string, len(jobAnnotations)) n := len(userAnnotationPrefix) @@ -286,11 +281,16 @@ func (c *InstructionConverter) handleJobErrors(ts time.Time, event *armadaevents } state := lookout.JobFailedOrdinal - switch e.Reason.(type) { - // We should have a JobPreempted event rather than relying on type of JobErrors - // For now this is how we can identify if the job was preempted or failed + switch reason := e.Reason.(type) { + // Preempted and Rejected jobs are modelled as Reasons on a JobErrors msg case *armadaevents.Error_JobRunPreemptedError: state = lookout.JobPreemptedOrdinal + case *armadaevents.Error_JobRejected: + state = lookout.JobRejectedOrdinal + update.JobErrorsToCreate = append(update.JobErrorsToCreate, &model.CreateJobErrorInstruction{ + JobId: jobId, + Error: tryCompressError(jobId, reason.JobRejected.Message, c.compressor), + }) } jobUpdate := model.UpdateJobInstruction{ diff --git a/internal/lookoutingesterv2/instructions/instructions_test.go b/internal/lookoutingesterv2/instructions/instructions_test.go index 827e8c997e0..ed99b6925b3 100644 --- a/internal/lookoutingesterv2/instructions/instructions_test.go +++ b/internal/lookoutingesterv2/instructions/instructions_test.go @@ -137,6 +137,18 @@ var expectedUnschedulable = model.UpdateJobRunInstruction{ Node: pointer.String(testfixtures.NodeName), } +var expectedRejected = model.UpdateJobInstruction{ + JobId: testfixtures.JobIdString, + State: pointer.Int32(lookout.JobRejectedOrdinal), + LastTransitionTime: &testfixtures.BaseTime, + LastTransitionTimeSeconds: pointer.Int64(testfixtures.BaseTime.Unix()), +} + +var expectedRejectedJobError = model.CreateJobErrorInstruction{ + JobId: testfixtures.JobIdString, + Error: []byte(testfixtures.ErrMsg), +} + var expectedPreempted = model.UpdateJobInstruction{ JobId: testfixtures.JobIdString, State: pointer.Int32(lookout.JobPreemptedOrdinal), @@ -370,13 +382,15 @@ func TestConvert(t *testing.T) { MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, }, }, - "duplicate submit is ignored": { + "job rejected": { events: &ingest.EventSequencesWithIds{ - EventSequences: []*armadaevents.EventSequence{testfixtures.NewEventSequence(testfixtures.SubmitDuplicate)}, + EventSequences: []*armadaevents.EventSequence{testfixtures.NewEventSequence(testfixtures.JobRejected)}, MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, }, expected: &model.InstructionSet{ - MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, + JobsToUpdate: []*model.UpdateJobInstruction{&expectedRejected}, + JobErrorsToCreate: []*model.CreateJobErrorInstruction{&expectedRejectedJobError}, + MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(1)}, }, }, "job preempted": { diff --git a/internal/lookoutingesterv2/lookoutdb/insertion.go b/internal/lookoutingesterv2/lookoutdb/insertion.go index f63378a0ea0..985ab99b2f6 100644 --- a/internal/lookoutingesterv2/lookoutdb/insertion.go +++ b/internal/lookoutingesterv2/lookoutdb/insertion.go @@ -51,7 +51,7 @@ func (l *LookoutDb) Store(ctx *armadacontext.Context, instructions *model.Instru // Now we can job updates, annotations and new job runs wg := sync.WaitGroup{} - wg.Add(2) + wg.Add(3) go func() { defer wg.Done() l.UpdateJobs(ctx, jobsToUpdate) @@ -60,6 +60,10 @@ func (l *LookoutDb) Store(ctx *armadacontext.Context, instructions *model.Instru defer wg.Done() l.CreateJobRuns(ctx, instructions.JobRunsToCreate) }() + go func() { + defer wg.Done() + l.CreateJobErrors(ctx, instructions.JobErrorsToCreate) + }() wg.Wait() @@ -121,6 +125,19 @@ func (l *LookoutDb) UpdateJobRuns(ctx *armadacontext.Context, instructions []*mo log.Infof("Updated %d job runs in %s", len(instructions), time.Since(start)) } +func (l *LookoutDb) CreateJobErrors(ctx *armadacontext.Context, instructions []*model.CreateJobErrorInstruction) { + if len(instructions) == 0 { + return + } + start := time.Now() + err := l.CreateJobErrorsBatch(ctx, instructions) + if err != nil { + log.WithError(err).Warn("Creating job errors via batch failed, will attempt to insert serially (this might be slow).") + l.CreateJobErrorsScalar(ctx, instructions) + } + log.Infof("Inserted %d job errors in %s", len(instructions), time.Since(start)) +} + func (l *LookoutDb) CreateJobsBatch(ctx *armadacontext.Context, instructions []*model.CreateJobInstruction) error { return l.withDatabaseRetryInsert(func() error { tmpTable := "job_create_tmp" @@ -621,6 +638,76 @@ func (l *LookoutDb) UpdateJobRunsScalar(ctx *armadacontext.Context, instructions } } +func (l *LookoutDb) CreateJobErrorsBatch(ctx *armadacontext.Context, instructions []*model.CreateJobErrorInstruction) error { + tmpTable := "job_error_create_tmp" + return l.withDatabaseRetryInsert(func() error { + createTmp := func(tx pgx.Tx) error { + _, err := tx.Exec(ctx, fmt.Sprintf(` + CREATE TEMPORARY TABLE %s ( + job_id varchar(32), + error bytea + ) ON COMMIT DROP;`, tmpTable)) + if err != nil { + l.metrics.RecordDBError(metrics.DBOperationCreateTempTable) + } + return err + } + + insertTmp := func(tx pgx.Tx) error { + _, err := tx.CopyFrom(ctx, + pgx.Identifier{tmpTable}, + []string{ + "job_id", + "error", + }, + pgx.CopyFromSlice(len(instructions), func(i int) ([]interface{}, error) { + return []interface{}{ + instructions[i].JobId, + instructions[i].Error, + }, nil + }), + ) + return err + } + + copyToDest := func(tx pgx.Tx) error { + _, err := tx.Exec( + ctx, + fmt.Sprintf(` + INSERT INTO job_error ( + job_id, + error + ) SELECT * from %s + ON CONFLICT DO NOTHING`, tmpTable)) + if err != nil { + l.metrics.RecordDBError(metrics.DBOperationInsert) + } + return err + } + return batchInsert(ctx, l.db, createTmp, insertTmp, copyToDest) + }) +} + +func (l *LookoutDb) CreateJobErrorsScalar(ctx *armadacontext.Context, instructions []*model.CreateJobErrorInstruction) { + sqlStatement := `INSERT INTO job_error (job_id, error) + VALUES ($1, $2) + ON CONFLICT DO NOTHING` + for _, i := range instructions { + err := l.withDatabaseRetryInsert(func() error { + _, err := l.db.Exec(ctx, sqlStatement, + i.JobId, + i.Error) + if err != nil { + l.metrics.RecordDBError(metrics.DBOperationInsert) + } + return err + }) + if err != nil { + log.WithError(err).Warnf("Create job error for job %s, failed", i.JobId) + } + } +} + func batchInsert(ctx *armadacontext.Context, db *pgxpool.Pool, createTmp func(pgx.Tx) error, insertTmp func(pgx.Tx) error, copyToDest func(pgx.Tx) error, ) error { @@ -656,7 +743,8 @@ func conflateJobUpdates(updates []*model.UpdateJobInstruction) []*model.UpdateJo return *p == lookout.JobFailedOrdinal || *p == lookout.JobSucceededOrdinal || *p == lookout.JobCancelledOrdinal || - *p == lookout.JobPreemptedOrdinal + *p == lookout.JobPreemptedOrdinal || + *p == lookout.JobRejectedOrdinal } } @@ -778,6 +866,7 @@ func (l *LookoutDb) filterEventsForTerminalJobs( lookout.JobFailedOrdinal, lookout.JobCancelledOrdinal, lookout.JobPreemptedOrdinal, + lookout.JobRejectedOrdinal, } return db.Query(ctx, "SELECT DISTINCT job_id, state FROM JOB where state = any($1) AND job_id = any($2)", terminalStates, jobIds) }) diff --git a/internal/lookoutingesterv2/lookoutdb/insertion_test.go b/internal/lookoutingesterv2/lookoutdb/insertion_test.go index 61cc13aa6f8..0806c841c83 100644 --- a/internal/lookoutingesterv2/lookoutdb/insertion_test.go +++ b/internal/lookoutingesterv2/lookoutdb/insertion_test.go @@ -35,11 +35,7 @@ const ( priority = 3 updatePriority = 4 priorityClass = "default" - updateState = 5 - podNumber = 6 - jobJson = `{"foo": "bar"}` jobProto = "hello world" - containerName = "testContainer" ) var m = metrics.Get() @@ -57,7 +53,7 @@ var ( fatalErrors = []*regexp.Regexp{regexp.MustCompile("SQLSTATE 22001")} ) -// An invalid job id that exceeds th varchar count +// An invalid job id that exceeds the varchar count var invalidId = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" type JobRow struct { @@ -97,12 +93,9 @@ type JobRunRow struct { ExitCode *int32 } -type UserAnnotationRow struct { - JobId string - Key string - Value string - Queue string - JobSet string +type JobErrorRow struct { + JobId string + Error []byte } func defaultInstructionSet() *model.InstructionSet { @@ -132,6 +125,10 @@ func defaultInstructionSet() *model.InstructionSet { JobRunState: pointer.Int32(lookout.JobRunSucceededOrdinal), ExitCode: pointer.Int32(0), }}, + JobErrorsToCreate: []*model.CreateJobErrorInstruction{{ + JobId: jobIdString, + Error: []byte(testfixtures.ErrMsg), + }}, MessageIds: []pulsar.MessageID{pulsarutils.NewMessageId(3)}, } } @@ -184,6 +181,11 @@ var expectedJobRun = JobRunRow{ JobRunState: lookout.JobRunPendingOrdinal, } +var expectedJobError = JobErrorRow{ + JobId: jobIdString, + Error: []byte(testfixtures.ErrMsg), +} + var expectedJobRunAfterUpdate = JobRunRow{ RunId: runIdString, JobId: jobIdString, @@ -593,6 +595,62 @@ func TestUpdateJobRunsScalar(t *testing.T) { assert.NoError(t, err) } +func TestCreateJobErrorsBatch(t *testing.T) { + err := lookout.WithLookoutDb(func(db *pgxpool.Pool) error { + ldb := NewLookoutDb(db, fatalErrors, m, 10) + + // Insert + err := ldb.CreateJobErrorsBatch(armadacontext.Background(), defaultInstructionSet().JobErrorsToCreate) + assert.Nil(t, err) + jobError := getJobError(t, db, jobIdString) + assert.Equal(t, expectedJobError, jobError) + + // Insert again and test that it's idempotent + err = ldb.CreateJobErrorsBatch(armadacontext.Background(), defaultInstructionSet().JobErrorsToCreate) + assert.Nil(t, err) + jobError = getJobError(t, db, jobIdString) + assert.Equal(t, expectedJobError, jobError) + + // Insert again with a different value and check we don't overwrite + jobErrors := defaultInstructionSet().JobErrorsToCreate + jobErrors[0].Error = []byte{} + err = ldb.CreateJobErrorsBatch(armadacontext.Background(), jobErrors) + assert.Nil(t, err) + jobError = getJobError(t, db, jobIdString) + assert.Equal(t, expectedJobError, jobError) + + // If a row is bad then we should return an error and no updates should happen + _, err = ldb.db.Exec(armadacontext.Background(), "DELETE FROM job_error") + assert.NoError(t, err) + invalidError := &model.CreateJobErrorInstruction{ + JobId: invalidId, + } + err = ldb.CreateJobErrorsBatch(armadacontext.Background(), append(defaultInstructionSet().JobErrorsToCreate, invalidError)) + assert.Error(t, err) + assertNoRows(t, db, "job_error") + return nil + }) + assert.NoError(t, err) +} + +func TestCreateJobErrorsScalar(t *testing.T) { + err := lookout.WithLookoutDb(func(db *pgxpool.Pool) error { + ldb := NewLookoutDb(db, fatalErrors, m, 10) + + // Insert + ldb.CreateJobErrorsScalar(armadacontext.Background(), defaultInstructionSet().JobErrorsToCreate) + jobError := getJobError(t, db, jobIdString) + assert.Equal(t, expectedJobError, jobError) + + // Insert again and test that it's idempotent + ldb.CreateJobErrorsScalar(armadacontext.Background(), defaultInstructionSet().JobErrorsToCreate) + jobError = getJobError(t, db, jobIdString) + assert.Equal(t, expectedJobError, jobError) + return nil + }) + assert.NoError(t, err) +} + func TestStoreWithEmptyInstructionSet(t *testing.T) { err := lookout.WithLookoutDb(func(db *pgxpool.Pool) error { ldb := NewLookoutDb(db, fatalErrors, m, 10) @@ -965,6 +1023,19 @@ func getJobRun(t *testing.T, db *pgxpool.Pool, runId string) JobRunRow { return run } +func getJobError(t *testing.T, db *pgxpool.Pool, jobId string) JobErrorRow { + errorRow := JobErrorRow{} + r := db.QueryRow( + armadacontext.Background(), + "SELECT job_id, error FROM job_error WHERE job_id = $1", + jobId) + + err := r.Scan(&errorRow.JobId, &errorRow.Error) + + assert.NoError(t, err) + return errorRow +} + func assertNoRows(t *testing.T, db *pgxpool.Pool, table string) { t.Helper() var count int diff --git a/internal/lookoutingesterv2/model/model.go b/internal/lookoutingesterv2/model/model.go index f45494c1af1..094203be1dc 100644 --- a/internal/lookoutingesterv2/model/model.go +++ b/internal/lookoutingesterv2/model/model.go @@ -64,15 +64,22 @@ type UpdateJobRunInstruction struct { ExitCode *int32 } +// CreateJobErrorInstruction is an instruction to crearte a new row in the job_error table +type CreateJobErrorInstruction struct { + JobId string + Error []byte +} + // InstructionSet represents a set of instructions to apply to the database. Each type of instruction is stored in its // own ordered list representing the order it was received. We also store the original message ids corresponding to // these instructions so that when they are saved to the database, we can ACK the corresponding messages. type InstructionSet struct { - JobsToCreate []*CreateJobInstruction - JobsToUpdate []*UpdateJobInstruction - JobRunsToCreate []*CreateJobRunInstruction - JobRunsToUpdate []*UpdateJobRunInstruction - MessageIds []pulsar.MessageID + JobsToCreate []*CreateJobInstruction + JobsToUpdate []*UpdateJobInstruction + JobRunsToCreate []*CreateJobRunInstruction + JobRunsToUpdate []*UpdateJobRunInstruction + JobErrorsToCreate []*CreateJobErrorInstruction + MessageIds []pulsar.MessageID } func (i *InstructionSet) GetMessageIDs() []pulsar.MessageID { diff --git a/internal/lookoutv2/application.go b/internal/lookoutv2/application.go index f377701b72e..47385dc9414 100644 --- a/internal/lookoutv2/application.go +++ b/internal/lookoutv2/application.go @@ -34,6 +34,7 @@ func Serve(configuration configuration.LookoutV2Config) error { getJobsRepo := repository.NewSqlGetJobsRepository(db) groupJobsRepo := repository.NewSqlGroupJobsRepository(db) decompressor := compress.NewThreadSafeZlibDecompressor() + getJobErrorRepo := repository.NewSqlGetJobErrorRepository(db, decompressor) getJobRunErrorRepo := repository.NewSqlGetJobRunErrorRepository(db, decompressor) getJobRunDebugMessageRepo := repository.NewSqlGetJobRunDebugMessageRepository(db, decompressor) getJobSpecRepo := repository.NewSqlGetJobSpecRepository(db, decompressor) @@ -121,6 +122,19 @@ func Serve(configuration configuration.LookoutV2Config) error { }, ) + api.GetJobErrorHandler = operations.GetJobErrorHandlerFunc( + func(params operations.GetJobErrorParams) middleware.Responder { + ctx := armadacontext.New(params.HTTPRequest.Context(), logger) + result, err := getJobErrorRepo.GetJobErrorMessage(ctx, params.GetJobErrorRequest.JobID) + if err != nil { + return operations.NewGetJobErrorBadRequest().WithPayload(conversions.ToSwaggerError(err.Error())) + } + return operations.NewGetJobErrorOK().WithPayload(&operations.GetJobErrorOKBody{ + ErrorString: result, + }) + }, + ) + api.GetJobSpecHandler = operations.GetJobSpecHandlerFunc( func(params operations.GetJobSpecParams) middleware.Responder { ctx := armadacontext.New(params.HTTPRequest.Context(), logger) diff --git a/internal/lookoutv2/gen/models/job.go b/internal/lookoutv2/gen/models/job.go index a2c7a89667f..5a1ee2afbc6 100644 --- a/internal/lookoutv2/gen/models/job.go +++ b/internal/lookoutv2/gen/models/job.go @@ -111,7 +111,7 @@ type Job struct { // state // Required: true - // Enum: [QUEUED PENDING RUNNING SUCCEEDED FAILED CANCELLED PREEMPTED LEASED] + // Enum: [QUEUED PENDING RUNNING SUCCEEDED FAILED CANCELLED PREEMPTED LEASED REJECTED] State string `json:"state"` // submitted @@ -396,7 +396,7 @@ var jobTypeStatePropEnum []interface{} func init() { var res []string - if err := json.Unmarshal([]byte(`["QUEUED","PENDING","RUNNING","SUCCEEDED","FAILED","CANCELLED","PREEMPTED","LEASED"]`), &res); err != nil { + if err := json.Unmarshal([]byte(`["QUEUED","PENDING","RUNNING","SUCCEEDED","FAILED","CANCELLED","PREEMPTED","LEASED","REJECTED"]`), &res); err != nil { panic(err) } for _, v := range res { @@ -429,6 +429,9 @@ const ( // JobStateLEASED captures enum value "LEASED" JobStateLEASED string = "LEASED" + + // JobStateREJECTED captures enum value "REJECTED" + JobStateREJECTED string = "REJECTED" ) // prop value enum diff --git a/internal/lookoutv2/gen/restapi/embedded_spec.go b/internal/lookoutv2/gen/restapi/embedded_spec.go index 8a11263dbf2..3edcd568f06 100644 --- a/internal/lookoutv2/gen/restapi/embedded_spec.go +++ b/internal/lookoutv2/gen/restapi/embedded_spec.go @@ -27,6 +27,63 @@ func init() { "version": "2.0.0" }, "paths": { + "/api/v1/jobError": { + "post": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "operationId": "getJobError", + "parameters": [ + { + "name": "getJobErrorRequest", + "in": "body", + "required": true, + "schema": { + "type": "object", + "required": [ + "jobId" + ], + "properties": { + "jobId": { + "type": "string", + "x-nullable": false + } + } + } + } + ], + "responses": { + "200": { + "description": "Returns error for specific job (if present)", + "schema": { + "type": "object", + "properties": { + "errorString": { + "description": "Error for job", + "type": "string", + "x-nullable": false + } + } + } + }, + "400": { + "description": "Error response", + "schema": { + "$ref": "#/definitions/error" + } + }, + "default": { + "description": "Error response", + "schema": { + "$ref": "#/definitions/error" + } + } + } + } + }, "/api/v1/jobGroups": { "post": { "consumes": [ @@ -636,7 +693,8 @@ func init() { "FAILED", "CANCELLED", "PREEMPTED", - "LEASED" + "LEASED", + "REJECTED" ], "x-nullable": false }, @@ -762,6 +820,63 @@ func init() { "version": "2.0.0" }, "paths": { + "/api/v1/jobError": { + "post": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "operationId": "getJobError", + "parameters": [ + { + "name": "getJobErrorRequest", + "in": "body", + "required": true, + "schema": { + "type": "object", + "required": [ + "jobId" + ], + "properties": { + "jobId": { + "type": "string", + "x-nullable": false + } + } + } + } + ], + "responses": { + "200": { + "description": "Returns error for specific job (if present)", + "schema": { + "type": "object", + "properties": { + "errorString": { + "description": "Error for job", + "type": "string", + "x-nullable": false + } + } + } + }, + "400": { + "description": "Error response", + "schema": { + "$ref": "#/definitions/error" + } + }, + "default": { + "description": "Error response", + "schema": { + "$ref": "#/definitions/error" + } + } + } + } + }, "/api/v1/jobGroups": { "post": { "consumes": [ @@ -1400,7 +1515,8 @@ func init() { "FAILED", "CANCELLED", "PREEMPTED", - "LEASED" + "LEASED", + "REJECTED" ], "x-nullable": false }, diff --git a/internal/lookoutv2/gen/restapi/operations/get_job_error.go b/internal/lookoutv2/gen/restapi/operations/get_job_error.go new file mode 100644 index 00000000000..89205fd9e7d --- /dev/null +++ b/internal/lookoutv2/gen/restapi/operations/get_job_error.go @@ -0,0 +1,154 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "context" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// GetJobErrorHandlerFunc turns a function with the right signature into a get job error handler +type GetJobErrorHandlerFunc func(GetJobErrorParams) middleware.Responder + +// Handle executing the request and returning a response +func (fn GetJobErrorHandlerFunc) Handle(params GetJobErrorParams) middleware.Responder { + return fn(params) +} + +// GetJobErrorHandler interface for that can handle valid get job error params +type GetJobErrorHandler interface { + Handle(GetJobErrorParams) middleware.Responder +} + +// NewGetJobError creates a new http.Handler for the get job error operation +func NewGetJobError(ctx *middleware.Context, handler GetJobErrorHandler) *GetJobError { + return &GetJobError{Context: ctx, Handler: handler} +} + +/* + GetJobError swagger:route POST /api/v1/jobError getJobError + +GetJobError get job error API +*/ +type GetJobError struct { + Context *middleware.Context + Handler GetJobErrorHandler +} + +func (o *GetJobError) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewGetJobErrorParams() + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} + +// GetJobErrorBody get job error body +// +// swagger:model GetJobErrorBody +type GetJobErrorBody struct { + + // job Id + // Required: true + JobID string `json:"jobId"` +} + +// Validate validates this get job error body +func (o *GetJobErrorBody) Validate(formats strfmt.Registry) error { + var res []error + + if err := o.validateJobID(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *GetJobErrorBody) validateJobID(formats strfmt.Registry) error { + + if err := validate.RequiredString("getJobErrorRequest"+"."+"jobId", "body", o.JobID); err != nil { + return err + } + + return nil +} + +// ContextValidate validates this get job error body based on context it is used +func (o *GetJobErrorBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (o *GetJobErrorBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *GetJobErrorBody) UnmarshalBinary(b []byte) error { + var res GetJobErrorBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} + +// GetJobErrorOKBody get job error o k body +// +// swagger:model GetJobErrorOKBody +type GetJobErrorOKBody struct { + + // Error for job + ErrorString string `json:"errorString,omitempty"` +} + +// Validate validates this get job error o k body +func (o *GetJobErrorOKBody) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this get job error o k body based on context it is used +func (o *GetJobErrorOKBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (o *GetJobErrorOKBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *GetJobErrorOKBody) UnmarshalBinary(b []byte) error { + var res GetJobErrorOKBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} diff --git a/internal/lookoutv2/gen/restapi/operations/get_job_error_parameters.go b/internal/lookoutv2/gen/restapi/operations/get_job_error_parameters.go new file mode 100644 index 00000000000..865ee37d4e2 --- /dev/null +++ b/internal/lookoutv2/gen/restapi/operations/get_job_error_parameters.go @@ -0,0 +1,83 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "io" + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/validate" +) + +// NewGetJobErrorParams creates a new GetJobErrorParams object +// +// There are no default values defined in the spec. +func NewGetJobErrorParams() GetJobErrorParams { + + return GetJobErrorParams{} +} + +// GetJobErrorParams contains all the bound params for the get job error operation +// typically these are obtained from a http.Request +// +// swagger:parameters getJobError +type GetJobErrorParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + Required: true + In: body + */ + GetJobErrorRequest GetJobErrorBody +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewGetJobErrorParams() beforehand. +func (o *GetJobErrorParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + if runtime.HasBody(r) { + defer r.Body.Close() + var body GetJobErrorBody + if err := route.Consumer.Consume(r.Body, &body); err != nil { + if err == io.EOF { + res = append(res, errors.Required("getJobErrorRequest", "body", "")) + } else { + res = append(res, errors.NewParseError("getJobErrorRequest", "body", "", err)) + } + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(context.Background()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.GetJobErrorRequest = body + } + } + } else { + res = append(res, errors.Required("getJobErrorRequest", "body", "")) + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/internal/lookoutv2/gen/restapi/operations/get_job_error_responses.go b/internal/lookoutv2/gen/restapi/operations/get_job_error_responses.go new file mode 100644 index 00000000000..7a8ca53ce9f --- /dev/null +++ b/internal/lookoutv2/gen/restapi/operations/get_job_error_responses.go @@ -0,0 +1,163 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/armadaproject/armada/internal/lookoutv2/gen/models" +) + +// GetJobErrorOKCode is the HTTP code returned for type GetJobErrorOK +const GetJobErrorOKCode int = 200 + +/* +GetJobErrorOK Returns error for specific job (if present) + +swagger:response getJobErrorOK +*/ +type GetJobErrorOK struct { + + /* + In: Body + */ + Payload *GetJobErrorOKBody `json:"body,omitempty"` +} + +// NewGetJobErrorOK creates GetJobErrorOK with default headers values +func NewGetJobErrorOK() *GetJobErrorOK { + + return &GetJobErrorOK{} +} + +// WithPayload adds the payload to the get job error o k response +func (o *GetJobErrorOK) WithPayload(payload *GetJobErrorOKBody) *GetJobErrorOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get job error o k response +func (o *GetJobErrorOK) SetPayload(payload *GetJobErrorOKBody) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetJobErrorOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// GetJobErrorBadRequestCode is the HTTP code returned for type GetJobErrorBadRequest +const GetJobErrorBadRequestCode int = 400 + +/* +GetJobErrorBadRequest Error response + +swagger:response getJobErrorBadRequest +*/ +type GetJobErrorBadRequest struct { + + /* + In: Body + */ + Payload *models.Error `json:"body,omitempty"` +} + +// NewGetJobErrorBadRequest creates GetJobErrorBadRequest with default headers values +func NewGetJobErrorBadRequest() *GetJobErrorBadRequest { + + return &GetJobErrorBadRequest{} +} + +// WithPayload adds the payload to the get job error bad request response +func (o *GetJobErrorBadRequest) WithPayload(payload *models.Error) *GetJobErrorBadRequest { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get job error bad request response +func (o *GetJobErrorBadRequest) SetPayload(payload *models.Error) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetJobErrorBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(400) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +/* +GetJobErrorDefault Error response + +swagger:response getJobErrorDefault +*/ +type GetJobErrorDefault struct { + _statusCode int + + /* + In: Body + */ + Payload *models.Error `json:"body,omitempty"` +} + +// NewGetJobErrorDefault creates GetJobErrorDefault with default headers values +func NewGetJobErrorDefault(code int) *GetJobErrorDefault { + if code <= 0 { + code = 500 + } + + return &GetJobErrorDefault{ + _statusCode: code, + } +} + +// WithStatusCode adds the status to the get job error default response +func (o *GetJobErrorDefault) WithStatusCode(code int) *GetJobErrorDefault { + o._statusCode = code + return o +} + +// SetStatusCode sets the status to the get job error default response +func (o *GetJobErrorDefault) SetStatusCode(code int) { + o._statusCode = code +} + +// WithPayload adds the payload to the get job error default response +func (o *GetJobErrorDefault) WithPayload(payload *models.Error) *GetJobErrorDefault { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the get job error default response +func (o *GetJobErrorDefault) SetPayload(payload *models.Error) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *GetJobErrorDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(o._statusCode) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} diff --git a/internal/lookoutv2/gen/restapi/operations/get_job_error_urlbuilder.go b/internal/lookoutv2/gen/restapi/operations/get_job_error_urlbuilder.go new file mode 100644 index 00000000000..468b4fba8f8 --- /dev/null +++ b/internal/lookoutv2/gen/restapi/operations/get_job_error_urlbuilder.go @@ -0,0 +1,84 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package operations + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" +) + +// GetJobErrorURL generates an URL for the get job error operation +type GetJobErrorURL struct { + _basePath string +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *GetJobErrorURL) WithBasePath(bp string) *GetJobErrorURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *GetJobErrorURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *GetJobErrorURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/api/v1/jobError" + + _basePath := o._basePath + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *GetJobErrorURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *GetJobErrorURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *GetJobErrorURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on GetJobErrorURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on GetJobErrorURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *GetJobErrorURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/internal/lookoutv2/gen/restapi/operations/lookout_api.go b/internal/lookoutv2/gen/restapi/operations/lookout_api.go index 1791b8e29ff..f4d99c99ee4 100644 --- a/internal/lookoutv2/gen/restapi/operations/lookout_api.go +++ b/internal/lookoutv2/gen/restapi/operations/lookout_api.go @@ -46,6 +46,9 @@ func NewLookoutAPI(spec *loads.Document) *LookoutAPI { GetHealthHandler: GetHealthHandlerFunc(func(params GetHealthParams) middleware.Responder { return middleware.NotImplemented("operation GetHealth has not yet been implemented") }), + GetJobErrorHandler: GetJobErrorHandlerFunc(func(params GetJobErrorParams) middleware.Responder { + return middleware.NotImplemented("operation GetJobError has not yet been implemented") + }), GetJobRunDebugMessageHandler: GetJobRunDebugMessageHandlerFunc(func(params GetJobRunDebugMessageParams) middleware.Responder { return middleware.NotImplemented("operation GetJobRunDebugMessage has not yet been implemented") }), @@ -102,6 +105,8 @@ type LookoutAPI struct { // GetHealthHandler sets the operation handler for the get health operation GetHealthHandler GetHealthHandler + // GetJobErrorHandler sets the operation handler for the get job error operation + GetJobErrorHandler GetJobErrorHandler // GetJobRunDebugMessageHandler sets the operation handler for the get job run debug message operation GetJobRunDebugMessageHandler GetJobRunDebugMessageHandler // GetJobRunErrorHandler sets the operation handler for the get job run error operation @@ -195,6 +200,9 @@ func (o *LookoutAPI) Validate() error { if o.GetHealthHandler == nil { unregistered = append(unregistered, "GetHealthHandler") } + if o.GetJobErrorHandler == nil { + unregistered = append(unregistered, "GetJobErrorHandler") + } if o.GetJobRunDebugMessageHandler == nil { unregistered = append(unregistered, "GetJobRunDebugMessageHandler") } @@ -307,6 +315,10 @@ func (o *LookoutAPI) initHandlerCache() { if o.handlers["POST"] == nil { o.handlers["POST"] = make(map[string]http.Handler) } + o.handlers["POST"]["/api/v1/jobError"] = NewGetJobError(o.context, o.GetJobErrorHandler) + if o.handlers["POST"] == nil { + o.handlers["POST"] = make(map[string]http.Handler) + } o.handlers["POST"]["/api/v1/jobRunDebugMessage"] = NewGetJobRunDebugMessage(o.context, o.GetJobRunDebugMessageHandler) if o.handlers["POST"] == nil { o.handlers["POST"] = make(map[string]http.Handler) diff --git a/internal/lookoutv2/repository/getjoberror.go b/internal/lookoutv2/repository/getjoberror.go new file mode 100644 index 00000000000..ebcb14a59fa --- /dev/null +++ b/internal/lookoutv2/repository/getjoberror.go @@ -0,0 +1,44 @@ +package repository + +import ( + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + + "github.com/armadaproject/armada/internal/common/armadacontext" + "github.com/armadaproject/armada/internal/common/compress" +) + +type GetJobErrorRepository interface { + GetJobErrorMessage(ctx *armadacontext.Context, jobId string) (string, error) +} + +type SqlGetJobErrorMessageRepository struct { + db *pgxpool.Pool + decompressor compress.Decompressor +} + +func NewSqlGetJobErrorRepository(db *pgxpool.Pool, decompressor compress.Decompressor) *SqlGetJobErrorMessageRepository { + return &SqlGetJobErrorMessageRepository{ + db: db, + decompressor: decompressor, + } +} + +func (r *SqlGetJobErrorMessageRepository) GetJobErrorMessage(ctx *armadacontext.Context, jobId string) (string, error) { + var rawBytes []byte + err := r.db.QueryRow(ctx, "SELECT error FROM job_error WHERE job_id = $1", jobId).Scan(&rawBytes) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + return "", errors.Errorf("no error found for job with id %s", jobId) + } + return "", err + } + decompressed, err := r.decompressor.Decompress(rawBytes) + if err != nil { + log.WithError(err).Error("failed to decompress") + return "", err + } + return string(decompressed), nil +} diff --git a/internal/lookoutv2/repository/getjoberror_test.go b/internal/lookoutv2/repository/getjoberror_test.go new file mode 100644 index 00000000000..50042c4b4b1 --- /dev/null +++ b/internal/lookoutv2/repository/getjoberror_test.go @@ -0,0 +1,53 @@ +package repository + +import ( + "testing" + + "github.com/jackc/pgx/v5/pgxpool" + "github.com/stretchr/testify/assert" + + "github.com/armadaproject/armada/internal/common/armadacontext" + "github.com/armadaproject/armada/internal/common/compress" + "github.com/armadaproject/armada/internal/common/database/lookout" + "github.com/armadaproject/armada/internal/lookoutingesterv2/instructions" + "github.com/armadaproject/armada/internal/lookoutingesterv2/lookoutdb" + "github.com/armadaproject/armada/internal/lookoutingesterv2/metrics" +) + +func TestGetJobError(t *testing.T) { + err := lookout.WithLookoutDb(func(db *pgxpool.Pool) error { + converter := instructions.NewInstructionConverter(metrics.Get(), userAnnotationPrefix, &compress.NoOpCompressor{}) + store := lookoutdb.NewLookoutDb(db, nil, metrics.Get(), 10) + errMsg := "some bad error happened!" + _ = NewJobSimulator(converter, store). + Submit(queue, jobSet, owner, namespace, baseTime, &JobOptions{ + JobId: jobId, + Priority: priority, + PriorityClass: priorityClass, + Cpu: cpu, + Memory: memory, + EphemeralStorage: ephemeralStorage, + Gpu: gpu, + }). + Rejected(errMsg, baseTime). + Build(). + ApiJob() + + repo := NewSqlGetJobErrorRepository(db, &compress.NoOpDecompressor{}) + result, err := repo.GetJobErrorMessage(armadacontext.TODO(), jobId) + assert.NoError(t, err) + assert.Equal(t, errMsg, result) + return nil + }) + assert.NoError(t, err) +} + +func TestGetJobErrorNotFound(t *testing.T) { + err := lookout.WithLookoutDb(func(db *pgxpool.Pool) error { + repo := NewSqlGetJobErrorRepository(db, &compress.NoOpDecompressor{}) + _, err := repo.GetJobErrorMessage(armadacontext.TODO(), jobId) + assert.Error(t, err) + return nil + }) + assert.NoError(t, err) +} diff --git a/internal/lookoutv2/repository/groupjobs_test.go b/internal/lookoutv2/repository/groupjobs_test.go index 5b2d7019a47..2a3798677d7 100644 --- a/internal/lookoutv2/repository/groupjobs_test.go +++ b/internal/lookoutv2/repository/groupjobs_test.go @@ -658,6 +658,7 @@ func TestGroupJobsWithAllStateCounts(t *testing.T) { string(lookout.JobFailed): 0, string(lookout.JobCancelled): 0, string(lookout.JobPreempted): 0, + string(lookout.JobRejected): 0, }, }, }, @@ -674,6 +675,7 @@ func TestGroupJobsWithAllStateCounts(t *testing.T) { string(lookout.JobFailed): 0, string(lookout.JobCancelled): 10, string(lookout.JobPreempted): 9, + string(lookout.JobRejected): 0, }, }, }, @@ -690,6 +692,7 @@ func TestGroupJobsWithAllStateCounts(t *testing.T) { string(lookout.JobFailed): 12, string(lookout.JobCancelled): 0, string(lookout.JobPreempted): 0, + string(lookout.JobRejected): 0, }, }, }, diff --git a/internal/lookoutv2/repository/util.go b/internal/lookoutv2/repository/util.go index 9ca90d6271f..d68ceb608df 100644 --- a/internal/lookoutv2/repository/util.go +++ b/internal/lookoutv2/repository/util.go @@ -436,6 +436,33 @@ func (js *JobSimulator) RunFailed(runId string, node string, exitCode int32, mes return js } +func (js *JobSimulator) Rejected(message string, timestamp time.Time) *JobSimulator { + ts := timestampOrNow(timestamp) + rejected := &armadaevents.EventSequence_Event{ + Created: &ts, + Event: &armadaevents.EventSequence_Event_JobErrors{ + JobErrors: &armadaevents.JobErrors{ + JobId: js.jobId, + Errors: []*armadaevents.Error{ + { + Terminal: true, + Reason: &armadaevents.Error_JobRejected{ + JobRejected: &armadaevents.JobRejected{ + Message: message, + }, + }, + }, + }, + }, + }, + } + js.events = append(js.events, rejected) + + js.job.LastTransitionTime = ts + js.job.State = string(lookout.JobRejected) + return js +} + func (js *JobSimulator) Failed(node string, exitCode int32, message string, timestamp time.Time) *JobSimulator { ts := timestampOrNow(timestamp) failed := &armadaevents.EventSequence_Event{ diff --git a/internal/lookoutv2/schema/migrations/011_add_job_error.sql b/internal/lookoutv2/schema/migrations/011_add_job_error.sql new file mode 100644 index 00000000000..8743caa1315 --- /dev/null +++ b/internal/lookoutv2/schema/migrations/011_add_job_error.sql @@ -0,0 +1,5 @@ +CREATE TABLE IF NOT EXISTS job_error ( + job_id varchar(32) NOT NULL PRIMARY KEY, + error bytea NOT NULL +); +ALTER TABLE job_error ALTER COLUMN error SET STORAGE EXTERNAL; diff --git a/internal/lookoutv2/swagger.yaml b/internal/lookoutv2/swagger.yaml index 186ac8803c2..7deed115530 100644 --- a/internal/lookoutv2/swagger.yaml +++ b/internal/lookoutv2/swagger.yaml @@ -90,6 +90,7 @@ definitions: - CANCELLED - PREEMPTED - LEASED + - REJECTED x-nullable: false lastTransitionTime: type: string @@ -379,6 +380,44 @@ paths: schema: $ref: "#/definitions/error" + /api/v1/jobError: + post: + operationId: getJobError + consumes: + - application/json + parameters: + - name: getJobErrorRequest + required: true + in: body + schema: + type: object + required: + - jobId + properties: + jobId: + type: string + x-nullable: false + produces: + - application/json + responses: + 200: + description: Returns error for specific job (if present) + schema: + type: object + properties: + errorString: + type: string + description: Error for job + x-nullable: false + 400: + description: Error response + schema: + $ref: "#/definitions/error" + default: + description: Error response + schema: + $ref: "#/definitions/error" + /api/v1/jobRunError: post: operationId: getJobRunError diff --git a/pkg/api/api.swagger.go b/pkg/api/api.swagger.go index f922cb52759..b7db2275956 100644 --- a/pkg/api/api.swagger.go +++ b/pkg/api/api.swagger.go @@ -1572,7 +1572,8 @@ func SwaggerJsonTemplate() string { " \"SUBMITTED\",\n" + " \"LEASED\",\n" + " \"PREEMPTED\",\n" + - " \"CANCELLED\"\n" + + " \"CANCELLED\",\n" + + " \"REJECTED\"\n" + " ]\n" + " },\n" + " \"apiJobStatusRequest\": {\n" + diff --git a/pkg/api/api.swagger.json b/pkg/api/api.swagger.json index 422a9fb425e..f5091b05734 100644 --- a/pkg/api/api.swagger.json +++ b/pkg/api/api.swagger.json @@ -1561,7 +1561,8 @@ "SUBMITTED", "LEASED", "PREEMPTED", - "CANCELLED" + "CANCELLED", + "REJECTED" ] }, "apiJobStatusRequest": { diff --git a/pkg/api/submit.pb.go b/pkg/api/submit.pb.go index 1840186f78e..83a137983f6 100644 --- a/pkg/api/submit.pb.go +++ b/pkg/api/submit.pb.go @@ -101,19 +101,21 @@ const ( JobState_LEASED JobState = 7 JobState_PREEMPTED JobState = 8 JobState_CANCELLED JobState = 9 + JobState_REJECTED JobState = 10 ) var JobState_name = map[int32]string{ - 0: "QUEUED", - 1: "PENDING", - 2: "RUNNING", - 3: "SUCCEEDED", - 4: "FAILED", - 5: "UNKNOWN", - 6: "SUBMITTED", - 7: "LEASED", - 8: "PREEMPTED", - 9: "CANCELLED", + 0: "QUEUED", + 1: "PENDING", + 2: "RUNNING", + 3: "SUCCEEDED", + 4: "FAILED", + 5: "UNKNOWN", + 6: "SUBMITTED", + 7: "LEASED", + 8: "PREEMPTED", + 9: "CANCELLED", + 10: "REJECTED", } var JobState_value = map[string]int32{ @@ -127,6 +129,7 @@ var JobState_value = map[string]int32{ "LEASED": 7, "PREEMPTED": 8, "CANCELLED": 9, + "REJECTED": 10, } func (x JobState) String() string { @@ -2081,12 +2084,12 @@ func init() { func init() { proto.RegisterFile("pkg/api/submit.proto", fileDescriptor_e998bacb27df16c1) } var fileDescriptor_e998bacb27df16c1 = []byte{ - // 2972 bytes of a gzipped FileDescriptorProto + // 2981 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcb, 0x6f, 0x1b, 0xd7, 0xd5, 0xe7, 0x88, 0x7a, 0xf1, 0x50, 0x0f, 0xea, 0xea, 0xe1, 0x11, 0xed, 0x88, 0xf2, 0x24, 0xf1, 0x27, 0x0b, 0x09, 0x15, 0x2b, 0x5f, 0x50, 0xdb, 0x4d, 0xe1, 0x8a, 0x12, 0x6d, 0x4b, 0xb1, 0x65, 0x85, 0xb2, 0x92, 0xa6, 0x28, 0xca, 0x0c, 0x39, 0x57, 0xd4, 0x48, 0xe4, 0x0c, 0x33, 0x33, 0x94, - 0xab, 0x16, 0x01, 0x8a, 0xa2, 0x68, 0x17, 0xdd, 0xa4, 0xe8, 0xae, 0x05, 0xd2, 0x45, 0x77, 0xe9, + 0xab, 0x16, 0x01, 0x8a, 0xa2, 0x68, 0x17, 0xdd, 0x04, 0xe8, 0xae, 0x05, 0x92, 0x45, 0x77, 0xe9, 0xa2, 0x9b, 0xa2, 0xe8, 0x9f, 0x90, 0x65, 0x80, 0xa2, 0x40, 0xba, 0x61, 0x5a, 0xbb, 0x0f, 0x80, 0xbb, 0xee, 0xbb, 0x28, 0xee, 0xb9, 0x77, 0x38, 0x77, 0xf8, 0x10, 0x45, 0xdb, 0x4a, 0x36, 0xdd, 0xe9, 0xfe, 0xee, 0x79, 0xcf, 0xb9, 0xe7, 0x9e, 0x73, 0x29, 0x98, 0xa9, 0x1e, 0x95, 0x56, 0xf4, @@ -2098,176 +2101,177 @@ var fileDescriptor_e998bacb27df16c1 = []byte{ 0xa9, 0x13, 0xe5, 0x25, 0xa1, 0x8e, 0x51, 0xea, 0x96, 0x65, 0x7b, 0xba, 0x67, 0xda, 0x96, 0x2b, 0x76, 0x5f, 0x2d, 0x99, 0xde, 0x41, 0xad, 0x90, 0x2e, 0xda, 0x95, 0x95, 0x92, 0x5d, 0xb2, 0x03, 0xab, 0xd8, 0x0a, 0x17, 0xf8, 0x97, 0x20, 0x6f, 0xfa, 0x7c, 0x40, 0xf5, 0xb2, 0x77, 0xc0, 0x51, - 0xed, 0xd7, 0x31, 0x98, 0xd9, 0xb2, 0x0b, 0xbb, 0x18, 0x87, 0x1c, 0xfd, 0xa0, 0x46, 0x5d, 0x6f, - 0xd3, 0xa3, 0x15, 0xb2, 0x0a, 0xa3, 0x55, 0xc7, 0xb4, 0x1d, 0xd3, 0x3b, 0x51, 0x95, 0x45, 0x65, - 0x49, 0xc9, 0xcc, 0x35, 0xea, 0x29, 0xe2, 0x63, 0xaf, 0xd8, 0x15, 0xd3, 0xc3, 0xd0, 0xe4, 0x9a, - 0x74, 0xe4, 0x0d, 0x88, 0x59, 0x7a, 0x85, 0xba, 0x55, 0xbd, 0x48, 0xd5, 0xe8, 0xa2, 0xb2, 0x14, - 0xcb, 0x5c, 0x68, 0xd4, 0x53, 0xd3, 0x4d, 0x50, 0xe2, 0x0a, 0x28, 0xc9, 0xeb, 0x10, 0x2b, 0x96, - 0x4d, 0x6a, 0x79, 0x79, 0xd3, 0x50, 0x47, 0x91, 0x0d, 0x75, 0x71, 0x70, 0xd3, 0x90, 0x75, 0xf9, - 0x18, 0xd9, 0x85, 0xe1, 0xb2, 0x5e, 0xa0, 0x65, 0x57, 0x1d, 0x5c, 0x8c, 0x2e, 0xc5, 0x57, 0x5f, - 0x4e, 0xeb, 0x55, 0x33, 0xdd, 0xc9, 0x95, 0xf4, 0x3d, 0xa4, 0xcb, 0x5a, 0x9e, 0x73, 0x92, 0x99, - 0x69, 0xd4, 0x53, 0x09, 0xce, 0x28, 0x89, 0x15, 0xa2, 0x48, 0x09, 0xe2, 0x52, 0x9c, 0xd5, 0x21, - 0x94, 0xbc, 0xdc, 0x5d, 0xf2, 0x5a, 0x40, 0xcc, 0xc5, 0xcf, 0x37, 0xea, 0xa9, 0x59, 0x49, 0x84, - 0xa4, 0x43, 0x96, 0x4c, 0x7e, 0xaa, 0xc0, 0x8c, 0x43, 0x3f, 0xa8, 0x99, 0x0e, 0x35, 0xf2, 0x96, - 0x6d, 0xd0, 0xbc, 0x70, 0x66, 0x18, 0x55, 0x5e, 0xeb, 0xae, 0x32, 0x27, 0xb8, 0xb6, 0x6d, 0x83, - 0xca, 0x8e, 0x69, 0x8d, 0x7a, 0xea, 0x92, 0xd3, 0xb6, 0x19, 0x18, 0xa0, 0x2a, 0x39, 0xd2, 0xbe, - 0x4f, 0x1e, 0xc0, 0x68, 0xd5, 0x36, 0xf2, 0x6e, 0x95, 0x16, 0xd5, 0x81, 0x45, 0x65, 0x29, 0xbe, - 0x7a, 0x31, 0xcd, 0x13, 0x14, 0x6d, 0x60, 0x49, 0x9c, 0x3e, 0xbe, 0x96, 0xde, 0xb1, 0x8d, 0xdd, - 0x2a, 0x2d, 0xe2, 0xf7, 0x9c, 0xaa, 0xf2, 0x45, 0x48, 0xf6, 0x88, 0x00, 0xc9, 0x0e, 0xc4, 0x7c, - 0x81, 0xae, 0x3a, 0x82, 0xee, 0x9c, 0x2a, 0x91, 0xa7, 0x15, 0x5f, 0xb8, 0xa1, 0xb4, 0x12, 0x18, - 0x59, 0x87, 0x11, 0xd3, 0x2a, 0x39, 0xd4, 0x75, 0xd5, 0x18, 0xca, 0x23, 0x28, 0x68, 0x93, 0x63, - 0xeb, 0xb6, 0xb5, 0x6f, 0x96, 0x32, 0xb3, 0xcc, 0x30, 0x41, 0x26, 0x49, 0xf1, 0x39, 0xc9, 0x6d, - 0x18, 0x75, 0xa9, 0x73, 0x6c, 0x16, 0xa9, 0xab, 0x82, 0x24, 0x65, 0x97, 0x83, 0x42, 0x0a, 0x1a, - 0xe3, 0xd3, 0xc9, 0xc6, 0xf8, 0x18, 0xcb, 0x71, 0xb7, 0x78, 0x40, 0x8d, 0x5a, 0x99, 0x3a, 0x6a, - 0x3c, 0xc8, 0xf1, 0x26, 0x28, 0xe7, 0x78, 0x13, 0x4c, 0xea, 0x10, 0x97, 0xbe, 0x16, 0x79, 0x11, - 0xa2, 0x47, 0x94, 0x1f, 0xac, 0x58, 0x66, 0xaa, 0x51, 0x4f, 0x8d, 0x1f, 0x51, 0xf9, 0x4c, 0xb1, - 0x5d, 0x72, 0x15, 0x86, 0x8e, 0xf5, 0x72, 0x8d, 0xe2, 0x77, 0x89, 0x65, 0xa6, 0x1b, 0xf5, 0xd4, - 0x24, 0x02, 0x12, 0x21, 0xa7, 0xb8, 0x39, 0x70, 0x5d, 0x49, 0xee, 0x43, 0xa2, 0x35, 0x1f, 0xcf, - 0x45, 0x4f, 0x05, 0x2e, 0x74, 0x49, 0xc2, 0xf3, 0x50, 0xa7, 0xfd, 0x3b, 0x0a, 0xe3, 0xa1, 0x4f, - 0x4d, 0x6e, 0xc2, 0xa0, 0x77, 0x52, 0xa5, 0xa8, 0x66, 0x62, 0x35, 0x21, 0x27, 0xc3, 0xc3, 0x93, - 0x2a, 0xc5, 0x33, 0x3e, 0xc1, 0x28, 0x42, 0x09, 0x8a, 0x3c, 0x4c, 0x79, 0xd5, 0x76, 0x3c, 0x57, - 0x1d, 0x58, 0x8c, 0x2e, 0x8d, 0x73, 0xe5, 0x08, 0xc8, 0xca, 0x11, 0x20, 0xef, 0x87, 0x8b, 0x41, - 0x14, 0x93, 0xe6, 0xc5, 0xf6, 0xd4, 0x7b, 0xfa, 0x2a, 0x70, 0x03, 0xe2, 0x5e, 0xd9, 0xcd, 0x53, - 0x4b, 0x2f, 0x94, 0xa9, 0xa1, 0x0e, 0x2e, 0x2a, 0x4b, 0xa3, 0x19, 0xb5, 0x51, 0x4f, 0xcd, 0x78, - 0x2c, 0xa2, 0x88, 0x4a, 0xbc, 0x10, 0xa0, 0x58, 0x33, 0xa9, 0xe3, 0xe5, 0x59, 0x15, 0x55, 0x87, - 0xa4, 0x9a, 0x49, 0x1d, 0x6f, 0x5b, 0xaf, 0xd0, 0x50, 0xcd, 0x14, 0x18, 0xb9, 0x05, 0xe3, 0x35, - 0x97, 0xe6, 0x8b, 0xe5, 0x9a, 0xeb, 0x51, 0x67, 0x73, 0x47, 0x1d, 0x46, 0x8d, 0xc9, 0x46, 0x3d, - 0x35, 0x57, 0x73, 0xe9, 0xba, 0x8f, 0x4b, 0xcc, 0x63, 0x32, 0xfe, 0x65, 0xa5, 0x98, 0xe6, 0xc1, - 0x78, 0xe8, 0x5c, 0x92, 0xeb, 0x1d, 0x3e, 0xb9, 0xa0, 0xc0, 0x4f, 0x4e, 0xda, 0x3f, 0x79, 0xdf, - 0x1f, 0x5c, 0xfb, 0x8b, 0x02, 0x89, 0xd6, 0x9a, 0xcb, 0xf8, 0x3f, 0xa8, 0xd1, 0x1a, 0x15, 0x0e, - 0x22, 0x3f, 0x02, 0x32, 0x3f, 0x02, 0xe4, 0xff, 0x01, 0x0e, 0xed, 0x42, 0xde, 0xa5, 0x78, 0x91, - 0x0d, 0x04, 0x1f, 0xe5, 0xd0, 0x2e, 0xec, 0xd2, 0x96, 0x8b, 0xcc, 0xc7, 0x88, 0x01, 0x53, 0x8c, - 0xcb, 0xe1, 0xfa, 0xf2, 0x8c, 0xc0, 0x4f, 0xb6, 0xf9, 0xae, 0xd7, 0x40, 0xe6, 0x85, 0x46, 0x3d, - 0x35, 0x7f, 0x68, 0x17, 0x24, 0x4c, 0xf6, 0x68, 0xb2, 0x65, 0x4b, 0xfb, 0x8d, 0x02, 0x53, 0x5b, - 0x76, 0x61, 0xc7, 0xa1, 0x8c, 0xe0, 0x4b, 0x73, 0xee, 0x55, 0x18, 0x61, 0x5c, 0xa6, 0xc1, 0x5d, - 0x8a, 0xf1, 0xfb, 0xf7, 0xd0, 0x2e, 0x6c, 0x1a, 0xa1, 0xfb, 0x97, 0x23, 0xda, 0x7f, 0xf8, 0x17, - 0x58, 0xd7, 0xad, 0x22, 0x2d, 0xfb, 0x46, 0x2e, 0xc3, 0x30, 0x97, 0x21, 0x5b, 0x89, 0x0c, 0xb2, - 0x95, 0x08, 0x3c, 0xa5, 0x95, 0xcd, 0x30, 0x44, 0x7b, 0x86, 0x41, 0x72, 0x68, 0xb0, 0xb7, 0x43, - 0xe4, 0x15, 0x18, 0x76, 0xa8, 0xee, 0xda, 0x96, 0x38, 0xa3, 0x48, 0xcd, 0x11, 0x99, 0x9a, 0x23, - 0xda, 0x3f, 0x14, 0x98, 0xde, 0x42, 0xa3, 0xc2, 0x11, 0x08, 0x7b, 0xa5, 0xf4, 0xeb, 0xd5, 0x40, - 0x4f, 0xaf, 0x6e, 0xc1, 0xf0, 0xbe, 0x59, 0xf6, 0xa8, 0x83, 0x11, 0x88, 0xaf, 0x4e, 0x35, 0x13, - 0x8f, 0x7a, 0xb7, 0x71, 0x83, 0x5b, 0xce, 0x89, 0x64, 0xcb, 0x39, 0x22, 0xf9, 0x39, 0x78, 0x06, - 0x3f, 0xdf, 0x82, 0x31, 0x59, 0x36, 0xf9, 0x3a, 0x0c, 0xbb, 0x9e, 0xee, 0x51, 0x57, 0x55, 0x16, - 0xa3, 0x4b, 0x13, 0xab, 0xe3, 0x4d, 0xf5, 0x0c, 0xe5, 0xc2, 0x38, 0x81, 0x2c, 0x8c, 0x23, 0xda, - 0x17, 0x93, 0x10, 0xdd, 0xb2, 0x0b, 0x64, 0x11, 0x06, 0x9a, 0xc1, 0x49, 0x34, 0xea, 0xa9, 0x31, - 0x53, 0x0e, 0xcb, 0x80, 0x69, 0x84, 0xfb, 0xcc, 0xf1, 0x33, 0xf6, 0x99, 0xe7, 0x9e, 0x51, 0xa1, - 0xa6, 0x79, 0xe4, 0xcc, 0x4d, 0x73, 0xa6, 0xd9, 0xff, 0xf2, 0x9e, 0x68, 0xc6, 0x8f, 0x59, 0x1f, - 0xed, 0xee, 0x3b, 0xe1, 0x1b, 0x0e, 0xc2, 0x45, 0xe7, 0xe9, 0xef, 0xb5, 0xe3, 0x2e, 0xcd, 0x6d, - 0x1c, 0x15, 0x2c, 0x36, 0x15, 0x3c, 0xef, 0x5e, 0xf6, 0x2a, 0x0c, 0xd9, 0x8f, 0x2c, 0xea, 0x88, - 0x21, 0x02, 0xa3, 0x8e, 0x80, 0x1c, 0x75, 0x04, 0x08, 0x85, 0x8b, 0x18, 0xfe, 0x3c, 0x2e, 0xdd, - 0x03, 0xb3, 0x9a, 0xaf, 0xb9, 0xd4, 0xc9, 0x97, 0x1c, 0xbb, 0x56, 0x75, 0xd5, 0x49, 0x3c, 0xdb, - 0x57, 0x1a, 0xf5, 0x94, 0x86, 0x64, 0x0f, 0x7c, 0xaa, 0x3d, 0x97, 0x3a, 0x77, 0x90, 0x46, 0x92, - 0xa9, 0x76, 0xa3, 0x21, 0x3f, 0x56, 0xe0, 0x4a, 0xd1, 0xae, 0x54, 0x59, 0xb7, 0x40, 0x8d, 0xfc, - 0x69, 0x2a, 0xa7, 0x17, 0x95, 0xa5, 0xb1, 0xcc, 0x6b, 0x8d, 0x7a, 0xea, 0x95, 0x80, 0xe3, 0xed, - 0xde, 0xca, 0xb5, 0xde, 0xd4, 0xa1, 0x61, 0x6e, 0xf0, 0x8c, 0xc3, 0x9c, 0x3c, 0x18, 0x0c, 0x3d, - 0xf7, 0xc1, 0x60, 0xec, 0x79, 0x0c, 0x06, 0xbf, 0x54, 0x60, 0x51, 0xb4, 0xd8, 0xa6, 0x55, 0xca, - 0x3b, 0xd4, 0xb5, 0x6b, 0x4e, 0x91, 0xe6, 0x45, 0x6a, 0x54, 0xa8, 0xe5, 0xb9, 0xea, 0x2c, 0xda, - 0xbe, 0xd4, 0x49, 0x53, 0x4e, 0x30, 0xe4, 0x24, 0xfa, 0xcc, 0x95, 0x4f, 0xeb, 0xa9, 0x48, 0xa3, - 0x9e, 0x5a, 0x08, 0x24, 0x77, 0xa2, 0xcb, 0xf5, 0xd8, 0x27, 0x9b, 0x30, 0x52, 0x74, 0x28, 0x9b, - 0xe6, 0xb1, 0xcd, 0x8a, 0xaf, 0x26, 0xd3, 0x7c, 0x9c, 0x4f, 0xfb, 0x73, 0x7a, 0xfa, 0xa1, 0xff, - 0x7a, 0x90, 0x99, 0x16, 0x4a, 0x7d, 0x96, 0x8f, 0xbe, 0x48, 0x29, 0x39, 0x7f, 0x21, 0x0f, 0x40, - 0x13, 0xcf, 0x65, 0x00, 0x4a, 0x3c, 0xc3, 0x00, 0xf4, 0x1d, 0x88, 0x1f, 0x5d, 0x77, 0xf3, 0xbe, - 0x41, 0x53, 0x28, 0xea, 0xb2, 0x1c, 0xde, 0xe0, 0x51, 0x83, 0x05, 0x59, 0x58, 0xc9, 0xfb, 0xda, - 0xa3, 0xeb, 0xee, 0x66, 0x9b, 0x89, 0x10, 0xa0, 0xac, 0x24, 0x31, 0xe9, 0x42, 0x9b, 0x4a, 0xba, - 0xa7, 0x89, 0xb0, 0xbb, 0x29, 0x57, 0xac, 0x5b, 0xe4, 0x0a, 0x34, 0x3c, 0xb6, 0xcd, 0x9c, 0x75, - 0x6c, 0x23, 0x9b, 0x30, 0xc5, 0xcf, 0xac, 0xe7, 0x95, 0xf3, 0x2e, 0x2d, 0xda, 0x96, 0xe1, 0xaa, - 0x73, 0x8b, 0xca, 0x52, 0x94, 0x77, 0x60, 0xb8, 0xf9, 0xd0, 0x2b, 0xef, 0xf2, 0x2d, 0xb9, 0x03, - 0x6b, 0xd9, 0xfa, 0xdf, 0x04, 0xf8, 0xd4, 0xd3, 0xc0, 0x3f, 0x15, 0x98, 0xdb, 0x62, 0xfd, 0xac, - 0xa8, 0x4d, 0xe6, 0xf7, 0xa9, 0xdf, 0x19, 0x49, 0xed, 0x98, 0x72, 0x86, 0x76, 0xec, 0xdc, 0x2f, - 0xf3, 0x37, 0x61, 0xcc, 0xa2, 0x8f, 0xf2, 0x2d, 0xc5, 0x16, 0xef, 0x4d, 0x8b, 0x3e, 0xda, 0x69, - 0xaf, 0xb7, 0x71, 0x09, 0xd6, 0x7e, 0x3b, 0x00, 0x17, 0xda, 0x1c, 0x75, 0xab, 0xb6, 0xe5, 0x52, - 0xf2, 0x2b, 0x05, 0x54, 0x27, 0xd8, 0xc0, 0x4f, 0xcc, 0x2a, 0x5e, 0xad, 0xec, 0x71, 0xdf, 0xe3, - 0xab, 0x37, 0xfc, 0x8b, 0xb5, 0x93, 0x80, 0x74, 0xae, 0x85, 0x39, 0xc7, 0x79, 0xf9, 0x8d, 0xfb, - 0x72, 0xa3, 0x9e, 0xba, 0xec, 0x74, 0xa6, 0x90, 0xac, 0xbd, 0xd0, 0x85, 0x24, 0xe9, 0xc0, 0xa5, - 0xd3, 0xe4, 0x9f, 0x4b, 0x5a, 0x58, 0x30, 0x2b, 0x8d, 0x46, 0xdc, 0x4b, 0x7c, 0xba, 0xec, 0x67, - 0x60, 0xb8, 0x0a, 0x43, 0xd4, 0x71, 0x6c, 0x47, 0xd6, 0x89, 0x80, 0x4c, 0x8a, 0x80, 0xf6, 0x21, - 0x4e, 0x50, 0x61, 0x7d, 0xe4, 0x00, 0x08, 0x9f, 0xde, 0xf8, 0x5a, 0x8c, 0x6f, 0xfc, 0x7b, 0x24, - 0x5b, 0xc7, 0xb7, 0xc0, 0xc6, 0xcc, 0x42, 0xa3, 0x9e, 0x4a, 0xe2, 0x90, 0x16, 0x80, 0x72, 0xa4, - 0x13, 0xad, 0x7b, 0xda, 0x9f, 0x47, 0x61, 0x08, 0x2f, 0x78, 0x72, 0x05, 0x06, 0x71, 0xec, 0xe7, - 0xde, 0xe1, 0xe8, 0x6b, 0x85, 0x47, 0x7e, 0xdc, 0x27, 0x59, 0x98, 0xf4, 0x13, 0x31, 0xbf, 0xaf, - 0x17, 0x3d, 0xe1, 0xa5, 0x92, 0xb9, 0xd4, 0xa8, 0xa7, 0x54, 0x7f, 0xeb, 0x36, 0xee, 0x48, 0xcc, - 0x13, 0xe1, 0x1d, 0x72, 0x03, 0xe2, 0xd8, 0xa7, 0xf0, 0xb6, 0x45, 0xcc, 0x71, 0x58, 0x75, 0x19, - 0xcc, 0xdb, 0x0d, 0xb9, 0xea, 0x06, 0x28, 0x3b, 0x0e, 0xd8, 0xdd, 0xf8, 0xbc, 0x7c, 0x64, 0xc2, - 0xe3, 0x80, 0x78, 0x1b, 0x73, 0x5c, 0x82, 0x49, 0x09, 0x26, 0x9b, 0x57, 0x7a, 0xd9, 0xac, 0x98, - 0x9e, 0xff, 0x22, 0xbb, 0x80, 0x81, 0xc5, 0x60, 0x34, 0xef, 0xf0, 0x7b, 0x48, 0xc0, 0xb3, 0x99, - 0x05, 0x57, 0x75, 0x42, 0x1b, 0xa1, 0x96, 0x64, 0x22, 0xbc, 0x47, 0x7e, 0xa7, 0xc0, 0x95, 0x16, - 0x4d, 0xf9, 0xc2, 0x49, 0xf3, 0x14, 0xe7, 0x8b, 0x65, 0xdd, 0x75, 0xf9, 0x53, 0xcb, 0x88, 0xf4, - 0x3e, 0xdb, 0xc9, 0x80, 0xcc, 0x89, 0x7f, 0x9a, 0xd7, 0x19, 0xd3, 0xb6, 0x5e, 0xa1, 0xdc, 0xa6, - 0xab, 0xe2, 0x86, 0xbf, 0xec, 0xf4, 0xa2, 0xcf, 0xf5, 0x26, 0x21, 0xbb, 0x10, 0xaf, 0x52, 0xa7, - 0x62, 0xba, 0x2e, 0x76, 0xee, 0xfc, 0xd5, 0x78, 0x4e, 0xb2, 0x6a, 0x27, 0xd8, 0xe5, 0xf1, 0x96, - 0xc8, 0xe5, 0x78, 0x4b, 0x70, 0xf2, 0x5f, 0x0a, 0xc4, 0x25, 0x3e, 0x92, 0x83, 0x51, 0xb7, 0x56, - 0x38, 0xa4, 0xc5, 0x66, 0x85, 0x59, 0xe8, 0xac, 0x21, 0xbd, 0xcb, 0xc9, 0x44, 0xf7, 0x20, 0x78, - 0x42, 0xdd, 0x83, 0xc0, 0xf0, 0x8c, 0x53, 0xa7, 0xc0, 0x9f, 0x63, 0xfc, 0x33, 0xce, 0x80, 0xd0, - 0x19, 0x67, 0x40, 0xf2, 0x3d, 0x18, 0x11, 0x72, 0x59, 0xc6, 0x1f, 0x99, 0x96, 0x21, 0x67, 0x3c, - 0x5b, 0xcb, 0x19, 0xcf, 0xd6, 0xcd, 0x93, 0x31, 0x70, 0xfa, 0xc9, 0x48, 0x9a, 0x30, 0xdd, 0x21, - 0x6f, 0x9e, 0xa2, 0x4a, 0x29, 0x3d, 0xef, 0xca, 0x8f, 0x15, 0xb8, 0x72, 0xb6, 0x14, 0x39, 0x9b, - 0xfa, 0xb7, 0x64, 0xf5, 0xfe, 0x30, 0x15, 0x12, 0xd8, 0xa2, 0xad, 0x57, 0x19, 0xfd, 0xc9, 0x10, - 0x5c, 0x3c, 0x85, 0x9f, 0x35, 0xd9, 0xf3, 0x15, 0xfd, 0x7b, 0x66, 0xa5, 0x56, 0x09, 0x3a, 0xec, - 0x7d, 0x47, 0x2f, 0xb2, 0x22, 0x2f, 0xf2, 0xe2, 0x1b, 0xbd, 0xac, 0x48, 0xdf, 0xe7, 0x12, 0x7c, - 0xf4, 0xb6, 0xe0, 0xe7, 0x67, 0x23, 0x25, 0xce, 0xc6, 0x85, 0x4a, 0x67, 0xaa, 0x5c, 0xb7, 0x0d, - 0xf2, 0x7b, 0x05, 0x2e, 0x77, 0x35, 0x0e, 0xcf, 0xb0, 0x6d, 0x97, 0x31, 0xd7, 0xe2, 0xab, 0xeb, - 0x4f, 0x6b, 0x64, 0xe6, 0x64, 0xc7, 0xb6, 0xcb, 0xe2, 0xa2, 0x14, 0xa6, 0xbe, 0x50, 0x39, 0x8d, - 0x36, 0x77, 0xfa, 0x36, 0xbb, 0x2e, 0x4f, 0x0b, 0xc8, 0x79, 0x25, 0xa2, 0xd6, 0xdb, 0xc1, 0xb3, - 0xa9, 0x7e, 0x10, 0x4e, 0xc2, 0x97, 0xda, 0x23, 0x8b, 0x51, 0xe8, 0x2f, 0x11, 0xff, 0x30, 0x00, - 0xa9, 0x1e, 0x32, 0xc8, 0xc7, 0x67, 0x48, 0xc6, 0xb5, 0xb3, 0x58, 0x73, 0x4e, 0x09, 0xf9, 0x55, - 0x7c, 0x59, 0x2d, 0x0b, 0x31, 0x2c, 0xc9, 0xf7, 0x4c, 0xd7, 0x23, 0xd7, 0x61, 0x18, 0x5b, 0x51, - 0xbf, 0x64, 0x43, 0x50, 0xb2, 0x79, 0x73, 0xcc, 0x77, 0xe5, 0xe6, 0x98, 0x23, 0xda, 0x1e, 0x10, - 0xfe, 0xec, 0x58, 0x96, 0xfa, 0x37, 0x72, 0x0b, 0xc6, 0x8b, 0x1c, 0xa5, 0x86, 0xd4, 0x67, 0xe3, - 0x6f, 0x06, 0xcd, 0x8d, 0x70, 0xb7, 0x3d, 0x26, 0xe3, 0xda, 0x0d, 0x98, 0x44, 0xed, 0x77, 0x68, - 0xf3, 0xd9, 0xf9, 0x8c, 0x0d, 0x8c, 0x76, 0x0b, 0xd4, 0x5d, 0xcf, 0xa1, 0x7a, 0xc5, 0xb4, 0x4a, - 0xad, 0x32, 0x5e, 0x84, 0xa8, 0x55, 0xab, 0xa0, 0x88, 0x71, 0x1e, 0x48, 0xab, 0x56, 0x91, 0x03, - 0x69, 0xd5, 0x2a, 0xda, 0x9b, 0x40, 0x90, 0x6f, 0x83, 0x96, 0xa9, 0x47, 0xfb, 0x55, 0xff, 0x89, - 0x02, 0xc0, 0xdf, 0x29, 0x37, 0xad, 0x7d, 0xfb, 0xcc, 0x6d, 0xd7, 0x0d, 0x88, 0x63, 0x44, 0x8d, - 0xfc, 0xa1, 0x8d, 0x17, 0x9d, 0xb2, 0x34, 0xc4, 0xfb, 0x25, 0x0e, 0x6f, 0xd9, 0xa1, 0xdb, 0x0e, - 0x02, 0x94, 0xb1, 0x96, 0xa9, 0xee, 0xfa, 0xac, 0xd1, 0x80, 0x95, 0xc3, 0xad, 0xac, 0x01, 0xaa, - 0x3d, 0x82, 0x69, 0x74, 0x75, 0xaf, 0x6a, 0xe8, 0x5e, 0x30, 0x36, 0xbc, 0x21, 0xbf, 0xf0, 0x87, - 0xb3, 0xe1, 0xb4, 0x39, 0xa6, 0x8f, 0xb6, 0xb8, 0x06, 0x6a, 0x46, 0xf7, 0x8a, 0x07, 0x9d, 0xb4, - 0xbf, 0x07, 0xe3, 0xfb, 0xba, 0x59, 0xf6, 0x5f, 0xbe, 0xfc, 0x9c, 0x54, 0x03, 0x2b, 0xc2, 0x0c, - 0x3c, 0xad, 0x38, 0xcb, 0xdb, 0xad, 0x79, 0x3a, 0x26, 0xe3, 0x4d, 0x7f, 0xd7, 0xf1, 0x8d, 0xe4, - 0xab, 0xf2, 0xb7, 0x45, 0x7b, 0x6f, 0x7f, 0xc3, 0x0c, 0x7d, 0xf8, 0x1b, 0x87, 0x58, 0xd6, 0x32, - 0xee, 0xeb, 0xce, 0x11, 0x75, 0xb4, 0x8f, 0x14, 0x98, 0x0d, 0x9f, 0x8c, 0xfb, 0xd4, 0x75, 0xf5, - 0x12, 0x25, 0x5f, 0xeb, 0xcf, 0xff, 0xbb, 0x91, 0xe0, 0x19, 0x3a, 0x4a, 0x2d, 0x43, 0x14, 0xf4, - 0x09, 0x64, 0x6b, 0xea, 0xe3, 0xe7, 0x8b, 0xca, 0x0d, 0xd7, 0xdd, 0x48, 0x8e, 0xd1, 0x67, 0x46, - 0x60, 0x88, 0x1e, 0x53, 0xcb, 0x5b, 0x4e, 0x42, 0x5c, 0xfa, 0x0d, 0x96, 0xc4, 0x61, 0x44, 0x2c, - 0x13, 0x91, 0xe5, 0xab, 0x10, 0x97, 0x7e, 0xac, 0x23, 0x63, 0x30, 0xba, 0x6d, 0x1b, 0x74, 0xc7, - 0x76, 0xbc, 0x44, 0x84, 0xad, 0xee, 0x52, 0xdd, 0x28, 0x33, 0x52, 0x65, 0xf9, 0xe7, 0x0a, 0x8c, - 0xfa, 0x0f, 0xff, 0x04, 0x60, 0xf8, 0xed, 0xbd, 0xec, 0x5e, 0x76, 0x23, 0x11, 0x61, 0x02, 0x77, - 0xb2, 0xdb, 0x1b, 0x9b, 0xdb, 0x77, 0x12, 0x0a, 0x5b, 0xe4, 0xf6, 0xb6, 0xb7, 0xd9, 0x62, 0x80, - 0x8c, 0x43, 0x6c, 0x77, 0x6f, 0x7d, 0x3d, 0x9b, 0xdd, 0xc8, 0x6e, 0x24, 0xa2, 0x8c, 0xe9, 0xf6, - 0xda, 0xe6, 0xbd, 0xec, 0x46, 0x62, 0x90, 0xd1, 0xed, 0x6d, 0xbf, 0xb5, 0xfd, 0xe0, 0xdd, 0xed, - 0xc4, 0x10, 0xa7, 0xcb, 0xdc, 0xdf, 0x7c, 0xf8, 0x30, 0xbb, 0x91, 0x18, 0x66, 0x74, 0xf7, 0xb2, - 0x6b, 0xbb, 0xd9, 0x8d, 0xc4, 0x08, 0xdb, 0xda, 0xc9, 0x65, 0xb3, 0xf7, 0x77, 0xd8, 0xd6, 0x28, - 0x5b, 0xae, 0xaf, 0x6d, 0xaf, 0x67, 0xef, 0x31, 0x29, 0xb1, 0xd5, 0x3f, 0x46, 0x61, 0x0c, 0x43, - 0xe8, 0x3f, 0x26, 0xbd, 0x0e, 0x71, 0xfe, 0x1d, 0xf9, 0x3c, 0x26, 0x05, 0x39, 0x39, 0xd7, 0xf6, - 0xc8, 0x97, 0x65, 0xe1, 0xd2, 0x22, 0xe4, 0x16, 0x8c, 0x49, 0x4c, 0x2e, 0x99, 0x08, 0xb8, 0x58, - 0xe1, 0x4e, 0xbe, 0x80, 0xeb, 0x6e, 0xa9, 0xa5, 0x45, 0x98, 0x56, 0x7e, 0x5a, 0xfa, 0xd4, 0x2a, - 0x31, 0xf5, 0xd6, 0x1a, 0x3e, 0x8f, 0x5a, 0x84, 0x7c, 0x13, 0xe2, 0xbc, 0x7a, 0x72, 0xad, 0x17, - 0x02, 0xfe, 0x50, 0x51, 0x3d, 0xc5, 0x84, 0x34, 0x8c, 0xde, 0xa1, 0x1e, 0x67, 0x9f, 0x09, 0xd8, - 0x83, 0x5a, 0x9e, 0x94, 0x5c, 0xd1, 0x22, 0x64, 0x0b, 0x62, 0x3e, 0xbd, 0x4b, 0xb8, 0x7d, 0xdd, - 0x6e, 0x81, 0x64, 0xb2, 0xc3, 0xb6, 0x38, 0x0a, 0x5a, 0xe4, 0x35, 0x65, 0xf5, 0x67, 0x31, 0x18, - 0xe6, 0xd3, 0x37, 0x79, 0x07, 0x80, 0xff, 0x85, 0x95, 0x76, 0xb6, 0xe3, 0x4f, 0xab, 0xc9, 0xb9, - 0xce, 0x23, 0xbb, 0x36, 0xff, 0xa3, 0x3f, 0xfd, 0xfd, 0x17, 0x03, 0xd3, 0xda, 0xc4, 0xca, 0xf1, - 0xb5, 0x95, 0x43, 0xbb, 0x20, 0xfe, 0x71, 0xec, 0xa6, 0xb2, 0x4c, 0xde, 0x05, 0xe0, 0xd7, 0x66, - 0x58, 0x6e, 0xe8, 0x17, 0xbc, 0x24, 0x0f, 0x5b, 0xfb, 0xf5, 0xda, 0x2e, 0x98, 0xdf, 0x9d, 0x4c, - 0xf0, 0x77, 0x61, 0xac, 0x29, 0x78, 0x97, 0x7a, 0x44, 0x95, 0x7e, 0x94, 0x0b, 0x4b, 0xef, 0x16, - 0xfb, 0x4b, 0x28, 0x7c, 0x4e, 0x9b, 0x12, 0xc2, 0x5d, 0xea, 0x49, 0xf2, 0x2d, 0x48, 0xc8, 0x0f, - 0x45, 0x68, 0xfe, 0xc5, 0xce, 0x4f, 0x48, 0x5c, 0xcd, 0xa5, 0xd3, 0xde, 0x97, 0xb4, 0x14, 0x2a, - 0x9b, 0xd7, 0x66, 0x7c, 0x4f, 0xa4, 0xb7, 0x22, 0xca, 0xf4, 0xbd, 0x07, 0x71, 0xf1, 0xf3, 0x33, - 0xaa, 0x6a, 0x86, 0x3a, 0xfc, 0x9b, 0x74, 0x57, 0x67, 0x92, 0x28, 0x7f, 0x46, 0x9b, 0xf4, 0xe5, - 0x57, 0x39, 0x1f, 0x13, 0x7d, 0xa7, 0xff, 0x03, 0x39, 0x83, 0xe2, 0x26, 0xb4, 0x18, 0x13, 0x87, - 0x25, 0x90, 0x09, 0x2a, 0x3e, 0xdb, 0x21, 0x7d, 0x09, 0x85, 0x2e, 0x68, 0xf3, 0x4c, 0x68, 0x81, - 0x51, 0x51, 0x63, 0x85, 0xbf, 0xe2, 0x8b, 0x1b, 0x81, 0x29, 0xd9, 0xee, 0xff, 0x20, 0x5f, 0x44, - 0xc1, 0xb3, 0xc9, 0x44, 0xd3, 0xda, 0x95, 0x1f, 0xb0, 0x66, 0xe3, 0x43, 0x61, 0xf4, 0xb3, 0x9c, - 0x71, 0x61, 0x74, 0x32, 0x64, 0x74, 0x0d, 0x69, 0x24, 0xa3, 0xbf, 0xf5, 0x8c, 0x75, 0x40, 0x45, - 0x2d, 0x64, 0xb9, 0xcd, 0x03, 0x72, 0xbb, 0xaf, 0xfa, 0x20, 0xe4, 0x90, 0x76, 0x39, 0xc6, 0x73, - 0xaa, 0x1b, 0x22, 0xd1, 0x08, 0x91, 0xe3, 0xc1, 0x03, 0xf1, 0x9a, 0x42, 0x6e, 0xc2, 0xf0, 0x5d, - 0xfc, 0x07, 0x4a, 0xd2, 0xc5, 0xd3, 0x24, 0x3f, 0xa7, 0x9c, 0x68, 0xfd, 0x80, 0x16, 0x8f, 0x9a, - 0xb7, 0xfd, 0xfb, 0x9f, 0xff, 0x6d, 0x21, 0xf2, 0xc3, 0xc7, 0x0b, 0xca, 0xa7, 0x8f, 0x17, 0x94, - 0xcf, 0x1e, 0x2f, 0x28, 0x7f, 0x7d, 0xbc, 0xa0, 0x7c, 0xf4, 0x64, 0x21, 0xf2, 0xd9, 0x93, 0x85, - 0xc8, 0xe7, 0x4f, 0x16, 0x22, 0xdf, 0xfe, 0x3f, 0xe9, 0x7f, 0x3a, 0x75, 0xa7, 0xa2, 0x1b, 0x7a, - 0xd5, 0xb1, 0x0f, 0x69, 0xd1, 0x13, 0xab, 0x15, 0xf1, 0x4f, 0x9c, 0x9f, 0x0c, 0xcc, 0xac, 0x21, - 0xb0, 0xc3, 0xb7, 0xd3, 0x9b, 0x76, 0x7a, 0xad, 0x6a, 0x16, 0x86, 0xd1, 0x96, 0xd7, 0xff, 0x1b, - 0x00, 0x00, 0xff, 0xff, 0x52, 0xad, 0xfa, 0xb1, 0xe1, 0x2a, 0x00, 0x00, + 0xed, 0x93, 0x18, 0xcc, 0x6c, 0xd9, 0x85, 0x5d, 0x8c, 0x43, 0x8e, 0x7e, 0x50, 0xa3, 0xae, 0xb7, + 0xe9, 0xd1, 0x0a, 0x59, 0x85, 0xd1, 0xaa, 0x63, 0xda, 0x8e, 0xe9, 0x9d, 0xa8, 0xca, 0xa2, 0xb2, + 0xa4, 0x64, 0xe6, 0x1a, 0xf5, 0x14, 0xf1, 0xb1, 0x57, 0xec, 0x8a, 0xe9, 0x61, 0x68, 0x72, 0x4d, + 0x3a, 0xf2, 0x06, 0xc4, 0x2c, 0xbd, 0x42, 0xdd, 0xaa, 0x5e, 0xa4, 0x6a, 0x74, 0x51, 0x59, 0x8a, + 0x65, 0x2e, 0x34, 0xea, 0xa9, 0xe9, 0x26, 0x28, 0x71, 0x05, 0x94, 0xe4, 0x75, 0x88, 0x15, 0xcb, + 0x26, 0xb5, 0xbc, 0xbc, 0x69, 0xa8, 0xa3, 0xc8, 0x86, 0xba, 0x38, 0xb8, 0x69, 0xc8, 0xba, 0x7c, + 0x8c, 0xec, 0xc2, 0x70, 0x59, 0x2f, 0xd0, 0xb2, 0xab, 0x0e, 0x2e, 0x46, 0x97, 0xe2, 0xab, 0x2f, + 0xa7, 0xf5, 0xaa, 0x99, 0xee, 0xe4, 0x4a, 0xfa, 0x1e, 0xd2, 0x65, 0x2d, 0xcf, 0x39, 0xc9, 0xcc, + 0x34, 0xea, 0xa9, 0x04, 0x67, 0x94, 0xc4, 0x0a, 0x51, 0xa4, 0x04, 0x71, 0x29, 0xce, 0xea, 0x10, + 0x4a, 0x5e, 0xee, 0x2e, 0x79, 0x2d, 0x20, 0xe6, 0xe2, 0xe7, 0x1b, 0xf5, 0xd4, 0xac, 0x24, 0x42, + 0xd2, 0x21, 0x4b, 0x26, 0x3f, 0x57, 0x60, 0xc6, 0xa1, 0x1f, 0xd4, 0x4c, 0x87, 0x1a, 0x79, 0xcb, + 0x36, 0x68, 0x5e, 0x38, 0x33, 0x8c, 0x2a, 0xaf, 0x75, 0x57, 0x99, 0x13, 0x5c, 0xdb, 0xb6, 0x41, + 0x65, 0xc7, 0xb4, 0x46, 0x3d, 0x75, 0xc9, 0x69, 0xdb, 0x0c, 0x0c, 0x50, 0x95, 0x1c, 0x69, 0xdf, + 0x27, 0x0f, 0x60, 0xb4, 0x6a, 0x1b, 0x79, 0xb7, 0x4a, 0x8b, 0xea, 0xc0, 0xa2, 0xb2, 0x14, 0x5f, + 0xbd, 0x98, 0xe6, 0x09, 0x8a, 0x36, 0xb0, 0x24, 0x4e, 0x1f, 0x5f, 0x4b, 0xef, 0xd8, 0xc6, 0x6e, + 0x95, 0x16, 0xf1, 0x7b, 0x4e, 0x55, 0xf9, 0x22, 0x24, 0x7b, 0x44, 0x80, 0x64, 0x07, 0x62, 0xbe, + 0x40, 0x57, 0x1d, 0x41, 0x77, 0x4e, 0x95, 0xc8, 0xd3, 0x8a, 0x2f, 0xdc, 0x50, 0x5a, 0x09, 0x8c, + 0xac, 0xc3, 0x88, 0x69, 0x95, 0x1c, 0xea, 0xba, 0x6a, 0x0c, 0xe5, 0x11, 0x14, 0xb4, 0xc9, 0xb1, + 0x75, 0xdb, 0xda, 0x37, 0x4b, 0x99, 0x59, 0x66, 0x98, 0x20, 0x93, 0xa4, 0xf8, 0x9c, 0xe4, 0x36, + 0x8c, 0xba, 0xd4, 0x39, 0x36, 0x8b, 0xd4, 0x55, 0x41, 0x92, 0xb2, 0xcb, 0x41, 0x21, 0x05, 0x8d, + 0xf1, 0xe9, 0x64, 0x63, 0x7c, 0x8c, 0xe5, 0xb8, 0x5b, 0x3c, 0xa0, 0x46, 0xad, 0x4c, 0x1d, 0x35, + 0x1e, 0xe4, 0x78, 0x13, 0x94, 0x73, 0xbc, 0x09, 0x26, 0x75, 0x88, 0x4b, 0x5f, 0x8b, 0xbc, 0x08, + 0xd1, 0x23, 0xca, 0x0f, 0x56, 0x2c, 0x33, 0xd5, 0xa8, 0xa7, 0xc6, 0x8f, 0xa8, 0x7c, 0xa6, 0xd8, + 0x2e, 0xb9, 0x0a, 0x43, 0xc7, 0x7a, 0xb9, 0x46, 0xf1, 0xbb, 0xc4, 0x32, 0xd3, 0x8d, 0x7a, 0x6a, + 0x12, 0x01, 0x89, 0x90, 0x53, 0xdc, 0x1c, 0xb8, 0xae, 0x24, 0xf7, 0x21, 0xd1, 0x9a, 0x8f, 0xe7, + 0xa2, 0xa7, 0x02, 0x17, 0xba, 0x24, 0xe1, 0x79, 0xa8, 0xd3, 0xfe, 0x1d, 0x85, 0xf1, 0xd0, 0xa7, + 0x26, 0x37, 0x61, 0xd0, 0x3b, 0xa9, 0x52, 0x54, 0x33, 0xb1, 0x9a, 0x90, 0x93, 0xe1, 0xe1, 0x49, + 0x95, 0xe2, 0x19, 0x9f, 0x60, 0x14, 0xa1, 0x04, 0x45, 0x1e, 0xa6, 0xbc, 0x6a, 0x3b, 0x9e, 0xab, + 0x0e, 0x2c, 0x46, 0x97, 0xc6, 0xb9, 0x72, 0x04, 0x64, 0xe5, 0x08, 0x90, 0xf7, 0xc3, 0xc5, 0x20, + 0x8a, 0x49, 0xf3, 0x62, 0x7b, 0xea, 0x3d, 0x7d, 0x15, 0xb8, 0x01, 0x71, 0xaf, 0xec, 0xe6, 0xa9, + 0xa5, 0x17, 0xca, 0xd4, 0x50, 0x07, 0x17, 0x95, 0xa5, 0xd1, 0x8c, 0xda, 0xa8, 0xa7, 0x66, 0x3c, + 0x16, 0x51, 0x44, 0x25, 0x5e, 0x08, 0x50, 0xac, 0x99, 0xd4, 0xf1, 0xf2, 0xac, 0x8a, 0xaa, 0x43, + 0x52, 0xcd, 0xa4, 0x8e, 0xb7, 0xad, 0x57, 0x68, 0xa8, 0x66, 0x0a, 0x8c, 0xdc, 0x82, 0xf1, 0x9a, + 0x4b, 0xf3, 0xc5, 0x72, 0xcd, 0xf5, 0xa8, 0xb3, 0xb9, 0xa3, 0x0e, 0xa3, 0xc6, 0x64, 0xa3, 0x9e, + 0x9a, 0xab, 0xb9, 0x74, 0xdd, 0xc7, 0x25, 0xe6, 0x31, 0x19, 0xff, 0xaa, 0x52, 0x4c, 0xf3, 0x60, + 0x3c, 0x74, 0x2e, 0xc9, 0xf5, 0x0e, 0x9f, 0x5c, 0x50, 0xe0, 0x27, 0x27, 0xed, 0x9f, 0xbc, 0xef, + 0x0f, 0xae, 0xfd, 0x45, 0x81, 0x44, 0x6b, 0xcd, 0x65, 0xfc, 0x1f, 0xd4, 0x68, 0x8d, 0x0a, 0x07, + 0x91, 0x1f, 0x01, 0x99, 0x1f, 0x01, 0xf2, 0xff, 0x00, 0x87, 0x76, 0x21, 0xef, 0x52, 0xbc, 0xc8, + 0x06, 0x82, 0x8f, 0x72, 0x68, 0x17, 0x76, 0x69, 0xcb, 0x45, 0xe6, 0x63, 0xc4, 0x80, 0x29, 0xc6, + 0xe5, 0x70, 0x7d, 0x79, 0x46, 0xe0, 0x27, 0xdb, 0x7c, 0xd7, 0x6b, 0x20, 0xf3, 0x42, 0xa3, 0x9e, + 0x9a, 0x3f, 0xb4, 0x0b, 0x12, 0x26, 0x7b, 0x34, 0xd9, 0xb2, 0xa5, 0xfd, 0x46, 0x81, 0xa9, 0x2d, + 0xbb, 0xb0, 0xe3, 0x50, 0x46, 0xf0, 0x95, 0x39, 0xf7, 0x2a, 0x8c, 0x30, 0x2e, 0xd3, 0xe0, 0x2e, + 0xc5, 0xf8, 0xfd, 0x7b, 0x68, 0x17, 0x36, 0x8d, 0xd0, 0xfd, 0xcb, 0x11, 0xed, 0x3f, 0xfc, 0x0b, + 0xac, 0xeb, 0x56, 0x91, 0x96, 0x7d, 0x23, 0x97, 0x61, 0x98, 0xcb, 0x90, 0xad, 0x44, 0x06, 0xd9, + 0x4a, 0x04, 0x9e, 0xd2, 0xca, 0x66, 0x18, 0xa2, 0x3d, 0xc3, 0x20, 0x39, 0x34, 0xd8, 0xdb, 0x21, + 0xf2, 0x0a, 0x0c, 0x3b, 0x54, 0x77, 0x6d, 0x4b, 0x9c, 0x51, 0xa4, 0xe6, 0x88, 0x4c, 0xcd, 0x11, + 0xed, 0x1f, 0x0a, 0x4c, 0x6f, 0xa1, 0x51, 0xe1, 0x08, 0x84, 0xbd, 0x52, 0xfa, 0xf5, 0x6a, 0xa0, + 0xa7, 0x57, 0xb7, 0x60, 0x78, 0xdf, 0x2c, 0x7b, 0xd4, 0xc1, 0x08, 0xc4, 0x57, 0xa7, 0x9a, 0x89, + 0x47, 0xbd, 0xdb, 0xb8, 0xc1, 0x2d, 0xe7, 0x44, 0xb2, 0xe5, 0x1c, 0x91, 0xfc, 0x1c, 0x3c, 0x83, + 0x9f, 0x6f, 0xc1, 0x98, 0x2c, 0x9b, 0x7c, 0x13, 0x86, 0x5d, 0x4f, 0xf7, 0xa8, 0xab, 0x2a, 0x8b, + 0xd1, 0xa5, 0x89, 0xd5, 0xf1, 0xa6, 0x7a, 0x86, 0x72, 0x61, 0x9c, 0x40, 0x16, 0xc6, 0x11, 0xed, + 0xcb, 0x49, 0x88, 0x6e, 0xd9, 0x05, 0xb2, 0x08, 0x03, 0xcd, 0xe0, 0x24, 0x1a, 0xf5, 0xd4, 0x98, + 0x29, 0x87, 0x65, 0xc0, 0x34, 0xc2, 0x7d, 0xe6, 0xf8, 0x19, 0xfb, 0xcc, 0x73, 0xcf, 0xa8, 0x50, + 0xd3, 0x3c, 0x72, 0xe6, 0xa6, 0x39, 0xd3, 0xec, 0x7f, 0x79, 0x4f, 0x34, 0xe3, 0xc7, 0xac, 0x8f, + 0x76, 0xf7, 0x9d, 0xf0, 0x0d, 0x07, 0xe1, 0xa2, 0xf3, 0xf4, 0xf7, 0xda, 0x71, 0x97, 0xe6, 0x36, + 0x8e, 0x0a, 0x16, 0x9b, 0x0a, 0x9e, 0x77, 0x2f, 0x7b, 0x15, 0x86, 0xec, 0x47, 0x16, 0x75, 0xc4, + 0x10, 0x81, 0x51, 0x47, 0x40, 0x8e, 0x3a, 0x02, 0x84, 0xc2, 0x45, 0x0c, 0x7f, 0x1e, 0x97, 0xee, + 0x81, 0x59, 0xcd, 0xd7, 0x5c, 0xea, 0xe4, 0x4b, 0x8e, 0x5d, 0xab, 0xba, 0xea, 0x24, 0x9e, 0xed, + 0x2b, 0x8d, 0x7a, 0x4a, 0x43, 0xb2, 0x07, 0x3e, 0xd5, 0x9e, 0x4b, 0x9d, 0x3b, 0x48, 0x23, 0xc9, + 0x54, 0xbb, 0xd1, 0x90, 0x9f, 0x2a, 0x70, 0xa5, 0x68, 0x57, 0xaa, 0xac, 0x5b, 0xa0, 0x46, 0xfe, + 0x34, 0x95, 0xd3, 0x8b, 0xca, 0xd2, 0x58, 0xe6, 0xb5, 0x46, 0x3d, 0xf5, 0x4a, 0xc0, 0xf1, 0x76, + 0x6f, 0xe5, 0x5a, 0x6f, 0xea, 0xd0, 0x30, 0x37, 0x78, 0xc6, 0x61, 0x4e, 0x1e, 0x0c, 0x86, 0x9e, + 0xfb, 0x60, 0x30, 0xf6, 0x3c, 0x06, 0x83, 0x5f, 0x29, 0xb0, 0x28, 0x5a, 0x6c, 0xd3, 0x2a, 0xe5, + 0x1d, 0xea, 0xda, 0x35, 0xa7, 0x48, 0xf3, 0x22, 0x35, 0x2a, 0xd4, 0xf2, 0x5c, 0x75, 0x16, 0x6d, + 0x5f, 0xea, 0xa4, 0x29, 0x27, 0x18, 0x72, 0x12, 0x7d, 0xe6, 0xca, 0x67, 0xf5, 0x54, 0xa4, 0x51, + 0x4f, 0x2d, 0x04, 0x92, 0x3b, 0xd1, 0xe5, 0x7a, 0xec, 0x93, 0x4d, 0x18, 0x29, 0x3a, 0x94, 0x4d, + 0xf3, 0xd8, 0x66, 0xc5, 0x57, 0x93, 0x69, 0x3e, 0xce, 0xa7, 0xfd, 0x39, 0x3d, 0xfd, 0xd0, 0x7f, + 0x3d, 0xc8, 0x4c, 0x0b, 0xa5, 0x3e, 0xcb, 0x47, 0x5f, 0xa6, 0x94, 0x9c, 0xbf, 0x90, 0x07, 0xa0, + 0x89, 0xe7, 0x32, 0x00, 0x25, 0x9e, 0x61, 0x00, 0xfa, 0x1e, 0xc4, 0x8f, 0xae, 0xbb, 0x79, 0xdf, + 0xa0, 0x29, 0x14, 0x75, 0x59, 0x0e, 0x6f, 0xf0, 0xa8, 0xc1, 0x82, 0x2c, 0xac, 0xe4, 0x7d, 0xed, + 0xd1, 0x75, 0x77, 0xb3, 0xcd, 0x44, 0x08, 0x50, 0x56, 0x92, 0x98, 0x74, 0xa1, 0x4d, 0x25, 0xdd, + 0xd3, 0x44, 0xd8, 0xdd, 0x94, 0x2b, 0xd6, 0x2d, 0x72, 0x05, 0x1a, 0x1e, 0xdb, 0x66, 0xce, 0x3a, + 0xb6, 0x91, 0x4d, 0x98, 0xe2, 0x67, 0xd6, 0xf3, 0xca, 0x79, 0x97, 0x16, 0x6d, 0xcb, 0x70, 0xd5, + 0xb9, 0x45, 0x65, 0x29, 0xca, 0x3b, 0x30, 0xdc, 0x7c, 0xe8, 0x95, 0x77, 0xf9, 0x96, 0xdc, 0x81, + 0xb5, 0x6c, 0xfd, 0x6f, 0x02, 0x7c, 0xea, 0x69, 0xe0, 0x9f, 0x0a, 0xcc, 0x6d, 0xb1, 0x7e, 0x56, + 0xd4, 0x26, 0xf3, 0x87, 0xd4, 0xef, 0x8c, 0xa4, 0x76, 0x4c, 0x39, 0x43, 0x3b, 0x76, 0xee, 0x97, + 0xf9, 0x9b, 0x30, 0x66, 0xd1, 0x47, 0xf9, 0x96, 0x62, 0x8b, 0xf7, 0xa6, 0x45, 0x1f, 0xed, 0xb4, + 0xd7, 0xdb, 0xb8, 0x04, 0x6b, 0xbf, 0x1d, 0x80, 0x0b, 0x6d, 0x8e, 0xba, 0x55, 0xdb, 0x72, 0x29, + 0xf9, 0xb5, 0x02, 0xaa, 0x13, 0x6c, 0xe0, 0x27, 0x66, 0x15, 0xaf, 0x56, 0xf6, 0xb8, 0xef, 0xf1, + 0xd5, 0x1b, 0xfe, 0xc5, 0xda, 0x49, 0x40, 0x3a, 0xd7, 0xc2, 0x9c, 0xe3, 0xbc, 0xfc, 0xc6, 0x7d, + 0xb9, 0x51, 0x4f, 0x5d, 0x76, 0x3a, 0x53, 0x48, 0xd6, 0x5e, 0xe8, 0x42, 0x92, 0x74, 0xe0, 0xd2, + 0x69, 0xf2, 0xcf, 0x25, 0x2d, 0x2c, 0x98, 0x95, 0x46, 0x23, 0xee, 0x25, 0x3e, 0x5d, 0xf6, 0x33, + 0x30, 0x5c, 0x85, 0x21, 0xea, 0x38, 0xb6, 0x23, 0xeb, 0x44, 0x40, 0x26, 0x45, 0x40, 0xfb, 0x10, + 0x27, 0xa8, 0xb0, 0x3e, 0x72, 0x00, 0x84, 0x4f, 0x6f, 0x7c, 0x2d, 0xc6, 0x37, 0xfe, 0x3d, 0x92, + 0xad, 0xe3, 0x5b, 0x60, 0x63, 0x66, 0xa1, 0x51, 0x4f, 0x25, 0x71, 0x48, 0x0b, 0x40, 0x39, 0xd2, + 0x89, 0xd6, 0x3d, 0xed, 0xcf, 0xa3, 0x30, 0x84, 0x17, 0x3c, 0xb9, 0x02, 0x83, 0x38, 0xf6, 0x73, + 0xef, 0x70, 0xf4, 0xb5, 0xc2, 0x23, 0x3f, 0xee, 0x93, 0x2c, 0x4c, 0xfa, 0x89, 0x98, 0xdf, 0xd7, + 0x8b, 0x9e, 0xf0, 0x52, 0xc9, 0x5c, 0x6a, 0xd4, 0x53, 0xaa, 0xbf, 0x75, 0x1b, 0x77, 0x24, 0xe6, + 0x89, 0xf0, 0x0e, 0xb9, 0x01, 0x71, 0xec, 0x53, 0x78, 0xdb, 0x22, 0xe6, 0x38, 0xac, 0xba, 0x0c, + 0xe6, 0xed, 0x86, 0x5c, 0x75, 0x03, 0x94, 0x1d, 0x07, 0xec, 0x6e, 0x7c, 0x5e, 0x3e, 0x32, 0xe1, + 0x71, 0x40, 0xbc, 0x8d, 0x39, 0x2e, 0xc1, 0xa4, 0x04, 0x93, 0xcd, 0x2b, 0xbd, 0x6c, 0x56, 0x4c, + 0xcf, 0x7f, 0x91, 0x5d, 0xc0, 0xc0, 0x62, 0x30, 0x9a, 0x77, 0xf8, 0x3d, 0x24, 0xe0, 0xd9, 0xcc, + 0x82, 0xab, 0x3a, 0xa1, 0x8d, 0x50, 0x4b, 0x32, 0x11, 0xde, 0x23, 0xbf, 0x53, 0xe0, 0x4a, 0x8b, + 0xa6, 0x7c, 0xe1, 0xa4, 0x79, 0x8a, 0xf3, 0xc5, 0xb2, 0xee, 0xba, 0xfc, 0xa9, 0x65, 0x44, 0x7a, + 0x9f, 0xed, 0x64, 0x40, 0xe6, 0xc4, 0x3f, 0xcd, 0xeb, 0x8c, 0x69, 0x5b, 0xaf, 0x50, 0x6e, 0xd3, + 0x55, 0x71, 0xc3, 0x5f, 0x76, 0x7a, 0xd1, 0xe7, 0x7a, 0x93, 0x90, 0x5d, 0x88, 0x57, 0xa9, 0x53, + 0x31, 0x5d, 0x17, 0x3b, 0x77, 0xfe, 0x6a, 0x3c, 0x27, 0x59, 0xb5, 0x13, 0xec, 0xf2, 0x78, 0x4b, + 0xe4, 0x72, 0xbc, 0x25, 0x38, 0xf9, 0x2f, 0x05, 0xe2, 0x12, 0x1f, 0xc9, 0xc1, 0xa8, 0x5b, 0x2b, + 0x1c, 0xd2, 0x62, 0xb3, 0xc2, 0x2c, 0x74, 0xd6, 0x90, 0xde, 0xe5, 0x64, 0xa2, 0x7b, 0x10, 0x3c, + 0xa1, 0xee, 0x41, 0x60, 0x78, 0xc6, 0xa9, 0x53, 0xe0, 0xcf, 0x31, 0xfe, 0x19, 0x67, 0x40, 0xe8, + 0x8c, 0x33, 0x20, 0xf9, 0x1e, 0x8c, 0x08, 0xb9, 0x2c, 0xe3, 0x8f, 0x4c, 0xcb, 0x90, 0x33, 0x9e, + 0xad, 0xe5, 0x8c, 0x67, 0xeb, 0xe6, 0xc9, 0x18, 0x38, 0xfd, 0x64, 0x24, 0x4d, 0x98, 0xee, 0x90, + 0x37, 0x4f, 0x51, 0xa5, 0x94, 0x9e, 0x77, 0xe5, 0xc7, 0x0a, 0x5c, 0x39, 0x5b, 0x8a, 0x9c, 0x4d, + 0xfd, 0x5b, 0xb2, 0x7a, 0x7f, 0x98, 0x0a, 0x09, 0x6c, 0xd1, 0xd6, 0xab, 0x8c, 0xfe, 0x6c, 0x08, + 0x2e, 0x9e, 0xc2, 0xcf, 0x9a, 0xec, 0xf9, 0x8a, 0xfe, 0x03, 0xb3, 0x52, 0xab, 0x04, 0x1d, 0xf6, + 0xbe, 0xa3, 0x17, 0x59, 0x91, 0x17, 0x79, 0xf1, 0xad, 0x5e, 0x56, 0xa4, 0xef, 0x73, 0x09, 0x3e, + 0x7a, 0x5b, 0xf0, 0xf3, 0xb3, 0x91, 0x12, 0x67, 0xe3, 0x42, 0xa5, 0x33, 0x55, 0xae, 0xdb, 0x06, + 0xf9, 0xbd, 0x02, 0x97, 0xbb, 0x1a, 0x87, 0x67, 0xd8, 0xb6, 0xcb, 0x98, 0x6b, 0xf1, 0xd5, 0xf5, + 0xa7, 0x35, 0x32, 0x73, 0xb2, 0x63, 0xdb, 0x65, 0x71, 0x51, 0x0a, 0x53, 0x5f, 0xa8, 0x9c, 0x46, + 0x9b, 0x3b, 0x7d, 0x9b, 0x5d, 0x97, 0xa7, 0x05, 0xe4, 0xbc, 0x12, 0x51, 0xeb, 0xed, 0xe0, 0xd9, + 0x54, 0x3f, 0x08, 0x27, 0xe1, 0x4b, 0xed, 0x91, 0xc5, 0x28, 0xf4, 0x97, 0x88, 0x7f, 0x18, 0x80, + 0x54, 0x0f, 0x19, 0xe4, 0xe3, 0x33, 0x24, 0xe3, 0xda, 0x59, 0xac, 0x39, 0xa7, 0x84, 0xfc, 0x3a, + 0xbe, 0xac, 0x96, 0x85, 0x18, 0x96, 0xe4, 0x7b, 0xa6, 0xeb, 0x91, 0xeb, 0x30, 0x8c, 0xad, 0xa8, + 0x5f, 0xb2, 0x21, 0x28, 0xd9, 0xbc, 0x39, 0xe6, 0xbb, 0x72, 0x73, 0xcc, 0x11, 0x6d, 0x0f, 0x08, + 0x7f, 0x76, 0x2c, 0x4b, 0xfd, 0x1b, 0xb9, 0x05, 0xe3, 0x45, 0x8e, 0x52, 0x43, 0xea, 0xb3, 0xf1, + 0x37, 0x83, 0xe6, 0x46, 0xb8, 0xdb, 0x1e, 0x93, 0x71, 0xed, 0x06, 0x4c, 0xa2, 0xf6, 0x3b, 0xb4, + 0xf9, 0xec, 0x7c, 0xc6, 0x06, 0x46, 0xbb, 0x05, 0xea, 0xae, 0xe7, 0x50, 0xbd, 0x62, 0x5a, 0xa5, + 0x56, 0x19, 0x2f, 0x42, 0xd4, 0xaa, 0x55, 0x50, 0xc4, 0x38, 0x0f, 0xa4, 0x55, 0xab, 0xc8, 0x81, + 0xb4, 0x6a, 0x15, 0xed, 0x4d, 0x20, 0xc8, 0xb7, 0x41, 0xcb, 0xd4, 0xa3, 0xfd, 0xaa, 0xff, 0x54, + 0x01, 0xe0, 0xef, 0x94, 0x9b, 0xd6, 0xbe, 0x7d, 0xe6, 0xb6, 0xeb, 0x06, 0xc4, 0x31, 0xa2, 0x46, + 0xfe, 0xd0, 0xc6, 0x8b, 0x4e, 0x59, 0x1a, 0xe2, 0xfd, 0x12, 0x87, 0xb7, 0xec, 0xd0, 0x6d, 0x07, + 0x01, 0xca, 0x58, 0xcb, 0x54, 0x77, 0x7d, 0xd6, 0x68, 0xc0, 0xca, 0xe1, 0x56, 0xd6, 0x00, 0xd5, + 0x1e, 0xc1, 0x34, 0xba, 0xba, 0x57, 0x35, 0x74, 0x2f, 0x18, 0x1b, 0xde, 0x90, 0x5f, 0xf8, 0xc3, + 0xd9, 0x70, 0xda, 0x1c, 0xd3, 0x47, 0x5b, 0x5c, 0x03, 0x35, 0xa3, 0x7b, 0xc5, 0x83, 0x4e, 0xda, + 0xdf, 0x83, 0xf1, 0x7d, 0xdd, 0x2c, 0xfb, 0x2f, 0x5f, 0x7e, 0x4e, 0xaa, 0x81, 0x15, 0x61, 0x06, + 0x9e, 0x56, 0x9c, 0xe5, 0xed, 0xd6, 0x3c, 0x1d, 0x93, 0xf1, 0xa6, 0xbf, 0xeb, 0xf8, 0x46, 0xf2, + 0x75, 0xf9, 0xdb, 0xa2, 0xbd, 0xb7, 0xbf, 0x61, 0x86, 0x3e, 0xfc, 0x8d, 0x43, 0x2c, 0x6b, 0x19, + 0xf7, 0x75, 0xe7, 0x88, 0x3a, 0xda, 0x47, 0x0a, 0xcc, 0x86, 0x4f, 0xc6, 0x7d, 0xea, 0xba, 0x7a, + 0x89, 0x92, 0x6f, 0xf4, 0xe7, 0xff, 0xdd, 0x48, 0xf0, 0x0c, 0x1d, 0xa5, 0x96, 0x21, 0x0a, 0xfa, + 0x04, 0xb2, 0x35, 0xf5, 0xf1, 0xf3, 0x45, 0xe5, 0x86, 0xeb, 0x6e, 0x24, 0xc7, 0xe8, 0x33, 0x23, + 0x30, 0x44, 0x8f, 0xa9, 0xe5, 0x2d, 0x27, 0x21, 0x2e, 0xfd, 0x06, 0x4b, 0xe2, 0x30, 0x22, 0x96, + 0x89, 0xc8, 0xf2, 0x55, 0x88, 0x4b, 0x3f, 0xd6, 0x91, 0x31, 0x18, 0xdd, 0xb6, 0x0d, 0xba, 0x63, + 0x3b, 0x5e, 0x22, 0xc2, 0x56, 0x77, 0xa9, 0x6e, 0x94, 0x19, 0xa9, 0xb2, 0xfc, 0x89, 0x02, 0xa3, + 0xfe, 0xc3, 0x3f, 0x01, 0x18, 0x7e, 0x7b, 0x2f, 0xbb, 0x97, 0xdd, 0x48, 0x44, 0x98, 0xc0, 0x9d, + 0xec, 0xf6, 0xc6, 0xe6, 0xf6, 0x9d, 0x84, 0xc2, 0x16, 0xb9, 0xbd, 0xed, 0x6d, 0xb6, 0x18, 0x20, + 0xe3, 0x10, 0xdb, 0xdd, 0x5b, 0x5f, 0xcf, 0x66, 0x37, 0xb2, 0x1b, 0x89, 0x28, 0x63, 0xba, 0xbd, + 0xb6, 0x79, 0x2f, 0xbb, 0x91, 0x18, 0x64, 0x74, 0x7b, 0xdb, 0x6f, 0x6d, 0x3f, 0x78, 0x77, 0x3b, + 0x31, 0xc4, 0xe9, 0x32, 0xf7, 0x37, 0x1f, 0x3e, 0xcc, 0x6e, 0x24, 0x86, 0x19, 0xdd, 0xbd, 0xec, + 0xda, 0x6e, 0x76, 0x23, 0x31, 0xc2, 0xb6, 0x76, 0x72, 0xd9, 0xec, 0xfd, 0x1d, 0xb6, 0x35, 0xca, + 0x96, 0xeb, 0x6b, 0xdb, 0xeb, 0xd9, 0x7b, 0x4c, 0x4a, 0x8c, 0x59, 0x98, 0xcb, 0x6e, 0x65, 0xd7, + 0xd9, 0x26, 0xac, 0xfe, 0x31, 0x0a, 0x63, 0x18, 0x50, 0xff, 0x69, 0xe9, 0x75, 0x88, 0xf3, 0xaf, + 0xca, 0xa7, 0x33, 0x29, 0xe4, 0xc9, 0xb9, 0xb6, 0x27, 0xbf, 0x2c, 0x0b, 0x9e, 0x16, 0x21, 0xb7, + 0x60, 0x4c, 0x62, 0x72, 0xc9, 0x44, 0xc0, 0xc5, 0xca, 0x78, 0xf2, 0x05, 0x5c, 0x77, 0x4b, 0x34, + 0x2d, 0xc2, 0xb4, 0xf2, 0xb3, 0xd3, 0xa7, 0x56, 0x89, 0xa9, 0xb7, 0xd6, 0xf0, 0xe9, 0xd4, 0x22, + 0xe4, 0xdb, 0x10, 0xe7, 0xb5, 0x94, 0x6b, 0xbd, 0x10, 0xf0, 0x87, 0x4a, 0xec, 0x29, 0x26, 0xa4, + 0x61, 0xf4, 0x0e, 0xf5, 0x38, 0xfb, 0x4c, 0xc0, 0x1e, 0x54, 0xf6, 0xa4, 0xe4, 0x8a, 0x16, 0x21, + 0x5b, 0x10, 0xf3, 0xe9, 0x5d, 0xc2, 0xed, 0xeb, 0x76, 0x27, 0x24, 0x93, 0x1d, 0xb6, 0xc5, 0xc1, + 0xd0, 0x22, 0xaf, 0x29, 0xab, 0xbf, 0x88, 0xc1, 0x30, 0x9f, 0xc5, 0xc9, 0x3b, 0x00, 0xfc, 0x2f, + 0xac, 0xbb, 0xb3, 0x1d, 0x7f, 0x68, 0x4d, 0xce, 0x75, 0x1e, 0xe0, 0xb5, 0xf9, 0x9f, 0xfc, 0xe9, + 0xef, 0xbf, 0x1c, 0x98, 0xd6, 0x26, 0x56, 0x8e, 0xaf, 0xad, 0x1c, 0xda, 0x05, 0xf1, 0x6f, 0x64, + 0x37, 0x95, 0x65, 0xf2, 0x2e, 0x00, 0xbf, 0x44, 0xc3, 0x72, 0x43, 0xbf, 0xe7, 0x25, 0x79, 0xd8, + 0xda, 0x2f, 0xdb, 0x76, 0xc1, 0xfc, 0x26, 0x65, 0x82, 0xbf, 0x0f, 0x63, 0x4d, 0xc1, 0xbb, 0xd4, + 0x23, 0xaa, 0xf4, 0x13, 0x5d, 0x58, 0x7a, 0xb7, 0xd8, 0x5f, 0x42, 0xe1, 0x73, 0xda, 0x94, 0x10, + 0xee, 0x52, 0x4f, 0x92, 0x6f, 0x41, 0x42, 0x7e, 0x36, 0x42, 0xf3, 0x2f, 0x76, 0x7e, 0x50, 0xe2, + 0x6a, 0x2e, 0x9d, 0xf6, 0xda, 0xa4, 0xa5, 0x50, 0xd9, 0xbc, 0x36, 0xe3, 0x7b, 0x22, 0xbd, 0x1c, + 0x51, 0xa6, 0xef, 0x3d, 0x88, 0x8b, 0x1f, 0xa3, 0x51, 0x55, 0x33, 0xd4, 0xe1, 0x5f, 0xa8, 0xbb, + 0x3a, 0x93, 0x44, 0xf9, 0x33, 0xda, 0xa4, 0x2f, 0xbf, 0xca, 0xf9, 0x98, 0xe8, 0x3b, 0xfd, 0x1f, + 0xc8, 0x19, 0x14, 0x37, 0xa1, 0xc5, 0x98, 0x38, 0x2c, 0x88, 0x4c, 0x50, 0xf1, 0xd9, 0x0e, 0xe9, + 0x4b, 0x28, 0x74, 0x41, 0x9b, 0x67, 0x42, 0x0b, 0x8c, 0x8a, 0x1a, 0x2b, 0xfc, 0x4d, 0x5f, 0xdc, + 0x0f, 0x4c, 0xc9, 0x76, 0xff, 0x07, 0xf9, 0x22, 0x0a, 0x9e, 0x4d, 0x26, 0x9a, 0xd6, 0xae, 0xfc, + 0x88, 0xb5, 0x1e, 0x1f, 0x0a, 0xa3, 0x9f, 0xe5, 0x8c, 0x0b, 0xa3, 0x93, 0x21, 0xa3, 0x6b, 0x48, + 0x23, 0x19, 0xfd, 0x9d, 0x67, 0xac, 0x03, 0x2a, 0x6a, 0x21, 0xcb, 0x6d, 0x1e, 0x90, 0xdb, 0x7d, + 0xd5, 0x07, 0x21, 0x87, 0xb4, 0xcb, 0x31, 0x9e, 0x53, 0xdd, 0x10, 0x89, 0x46, 0x88, 0x1c, 0x0f, + 0x1e, 0x88, 0xd7, 0x14, 0x72, 0x13, 0x86, 0xef, 0xe2, 0xbf, 0x53, 0x92, 0x2e, 0x9e, 0x26, 0xf9, + 0x39, 0xe5, 0x44, 0xeb, 0x07, 0xb4, 0x78, 0xd4, 0xbc, 0xfb, 0xdf, 0xff, 0xe2, 0x6f, 0x0b, 0x91, + 0x1f, 0x3f, 0x5e, 0x50, 0x3e, 0x7b, 0xbc, 0xa0, 0x7c, 0xfe, 0x78, 0x41, 0xf9, 0xeb, 0xe3, 0x05, + 0xe5, 0xa3, 0x27, 0x0b, 0x91, 0xcf, 0x9f, 0x2c, 0x44, 0xbe, 0x78, 0xb2, 0x10, 0xf9, 0xee, 0xff, + 0x49, 0xff, 0xe1, 0xa9, 0x3b, 0x15, 0xdd, 0xd0, 0xab, 0x8e, 0x7d, 0x48, 0x8b, 0x9e, 0x58, 0xad, + 0x88, 0x7f, 0xe9, 0xfc, 0x74, 0x60, 0x66, 0x0d, 0x81, 0x1d, 0xbe, 0x9d, 0xde, 0xb4, 0xd3, 0x6b, + 0x55, 0xb3, 0x30, 0x8c, 0xb6, 0xbc, 0xfe, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x4c, 0x6f, 0x9b, + 0x69, 0xef, 0x2a, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/pkg/api/submit.proto b/pkg/api/submit.proto index 3fcd4b068be..48abb3385d8 100644 --- a/pkg/api/submit.proto +++ b/pkg/api/submit.proto @@ -104,6 +104,7 @@ enum JobState { LEASED = 7; PREEMPTED = 8; CANCELLED = 9; + REJECTED = 10; } message Job { From b38bd6f65293bfd298b7ae1b80dda3e1b0e0afc7 Mon Sep 17 00:00:00 2001 From: Chris Martin Date: Mon, 24 Jun 2024 11:11:13 +0100 Subject: [PATCH 3/9] Pre-emption Based On Adjusted Fair Share (#3714) * wip Signed-off-by: Chris Martin * wip Signed-off-by: Chris Martin * fix some tests Signed-off-by: Chris Martin * fix some tests Signed-off-by: Chris Martin * wip Signed-off-by: Chris Martin * extra test Signed-off-by: Chris Martin * allow ability to disable adjusted fair share protection. Signed-off-by: Chris Martin * lint Signed-off-by: Chris Martin * extra test Signed-off-by: Chris Martin --------- Signed-off-by: Chris Martin Co-authored-by: Chris Martin --- config/scheduler/config.yaml | 1 + .../scheduler/configuration/configuration.go | 2 + internal/scheduler/context/context.go | 76 +++++++++ internal/scheduler/context/context_test.go | 156 +++++++++++++++++- internal/scheduler/gang_scheduler_test.go | 1 + .../scheduler/preempting_queue_scheduler.go | 35 ++-- .../preempting_queue_scheduler_test.go | 59 ++++++- internal/scheduler/queue_scheduler_test.go | 1 + internal/scheduler/scheduler_metrics.go | 17 +- internal/scheduler/scheduling_algo.go | 40 +++-- internal/scheduler/scheduling_algo_test.go | 8 + internal/scheduler/simulator/simulator.go | 25 +++ .../scheduler/testfixtures/testfixtures.go | 1 + 13 files changed, 385 insertions(+), 37 deletions(-) diff --git a/config/scheduler/config.yaml b/config/scheduler/config.yaml index 5b7400edbb9..cd5d312dbdb 100644 --- a/config/scheduler/config.yaml +++ b/config/scheduler/config.yaml @@ -87,6 +87,7 @@ scheduling: disableScheduling: false enableAssertions: false protectedFractionOfFairShare: 1.0 + useAdjustedFairShareProtection: true nodeIdLabel: "kubernetes.io/hostname" priorityClasses: armada-default: diff --git a/internal/scheduler/configuration/configuration.go b/internal/scheduler/configuration/configuration.go index 9e3cf36c04e..54800d0e7d9 100644 --- a/internal/scheduler/configuration/configuration.go +++ b/internal/scheduler/configuration/configuration.go @@ -153,6 +153,8 @@ type SchedulingConfig struct { EnableAssertions bool // Only queues allocated more than this fraction of their fair share are considered for preemption. ProtectedFractionOfFairShare float64 `validate:"gte=0"` + // Use Max(AdjustedFairShare, FairShare) for fair share protection. If false then FairShare will be used. + UseAdjustedFairShareProtection bool // Armada adds a node selector term to every scheduled pod using this label with the node name as value. // This to force kube-scheduler to schedule pods on the node chosen by Armada. // For example, if NodeIdLabel is "kubernetes.io/hostname" and armada schedules a pod on node "myNode", diff --git a/internal/scheduler/context/context.go b/internal/scheduler/context/context.go index 136599190dc..b4bc129a8ce 100644 --- a/internal/scheduler/context/context.go +++ b/internal/scheduler/context/context.go @@ -110,6 +110,7 @@ func (sctx *SchedulingContext) ClearUnfeasibleSchedulingKeys() { func (sctx *SchedulingContext) AddQueueSchedulingContext( queue string, weight float64, initialAllocatedByPriorityClass schedulerobjects.QuantityByTAndResourceType[string], + demand schedulerobjects.ResourceList, limiter *rate.Limiter, ) error { if _, ok := sctx.QueueSchedulingContexts[queue]; ok { @@ -137,6 +138,7 @@ func (sctx *SchedulingContext) AddQueueSchedulingContext( Weight: weight, Limiter: limiter, Allocated: allocated, + Demand: demand, AllocatedByPriorityClass: initialAllocatedByPriorityClass, ScheduledResourcesByPriorityClass: make(schedulerobjects.QuantityByTAndResourceType[string]), EvictedResourcesByPriorityClass: make(schedulerobjects.QuantityByTAndResourceType[string]), @@ -167,6 +169,73 @@ func (sctx *SchedulingContext) TotalCost() float64 { return rv } +// UpdateFairShares updates FairShare and AdjustedFairShare for every QueueSchedulingContext associated with the +// SchedulingContext. This works by calculating a far share as queue_weight/sum_of_all_queue_weights and an +// AdjustedFairShare by resharing any unused capacity (as determined by a queue's demand) +func (sctx *SchedulingContext) UpdateFairShares() { + const maxIterations = 5 + + type queueInfo struct { + queueName string + adjustedShare float64 + fairShare float64 + weight float64 + cappedShare float64 + } + + queueInfos := make([]*queueInfo, 0, len(sctx.QueueSchedulingContexts)) + for queueName, qctx := range sctx.QueueSchedulingContexts { + cappedShare := 1.0 + if !sctx.TotalResources.IsZero() { + cappedShare = sctx.FairnessCostProvider.CostFromAllocationAndWeight(qctx.Demand, qctx.Weight) * qctx.Weight + } + queueInfos = append(queueInfos, &queueInfo{ + queueName: queueName, + adjustedShare: 0, + fairShare: qctx.Weight / sctx.WeightSum, + weight: qctx.Weight, + cappedShare: cappedShare, + }) + } + + // We do this so that we get deterministic output + slices.SortFunc(queueInfos, func(a, b *queueInfo) int { + return strings.Compare(a.queueName, b.queueName) + }) + + unallocated := 1.0 // this is the proportion of the cluster that we can share each time + + // We will reshare unused capacity until we've reshared 99% of all capacity or we've completed 5 iteration + for i := 0; i < maxIterations && unallocated > 0.01; i++ { + totalWeight := 0.0 + for _, q := range queueInfos { + totalWeight += q.weight + } + + for _, q := range queueInfos { + if q.weight > 0 { + share := (q.weight / totalWeight) * unallocated + q.adjustedShare += share + } + } + unallocated = 0.0 + for _, q := range queueInfos { + excessShare := q.adjustedShare - q.cappedShare + if excessShare > 0 { + q.adjustedShare = q.cappedShare + q.weight = 0.0 + unallocated += excessShare + } + } + } + + for _, q := range queueInfos { + qtx := sctx.QueueSchedulingContexts[q.queueName] + qtx.FairShare = q.fairShare + qtx.AdjustedFairShare = q.adjustedShare + } +} + func (sctx *SchedulingContext) ReportString(verbosity int32) string { var sb strings.Builder w := tabwriter.NewWriter(&sb, 1, 1, 1, ' ', 0) @@ -343,6 +412,13 @@ type QueueSchedulingContext struct { // Total resources assigned to the queue across all clusters by priority class priority. // Includes jobs scheduled during this invocation of the scheduler. Allocated schedulerobjects.ResourceList + // Total demand from this queue. This is essentially the cumulative resources of all non-terminal jobs at the + // start of the scheduling cycle + Demand schedulerobjects.ResourceList + // Fair share is the weight of this queue over the sum of the weights of all queues + FairShare float64 + // AdjustedFairShare modifies fair share such that queues that have a demand cost less than their fair share, have their fair share reallocated. + AdjustedFairShare float64 // Total resources assigned to the queue across all clusters by priority class. // Includes jobs scheduled during this invocation of the scheduler. AllocatedByPriorityClass schedulerobjects.QuantityByTAndResourceType[string] diff --git a/internal/scheduler/context/context_test.go b/internal/scheduler/context/context_test.go index 69dfd575bc9..dcc9aeedcbc 100644 --- a/internal/scheduler/context/context_test.go +++ b/internal/scheduler/context/context_test.go @@ -54,7 +54,7 @@ func TestSchedulingContextAccounting(t *testing.T) { }, } for _, queue := range []string{"A", "B"} { - err := sctx.AddQueueSchedulingContext(queue, priorityFactorByQueue[queue], allocatedByQueueAndPriorityClass[queue], nil) + err := sctx.AddQueueSchedulingContext(queue, priorityFactorByQueue[queue], allocatedByQueueAndPriorityClass[queue], schedulerobjects.ResourceList{}, nil) require.NoError(t, err) } @@ -114,3 +114,157 @@ func TestJobSchedulingContext_SetAssignedNodeId(t *testing.T) { assert.Len(t, jctx.AdditionalNodeSelectors, 1) assert.Equal(t, map[string]string{configuration.NodeIdLabel: "node1"}, jctx.AdditionalNodeSelectors) } + +func TestCalculateFairShares(t *testing.T) { + zeroCpu := schedulerobjects.ResourceList{ + Resources: map[string]resource.Quantity{"cpu": resource.MustParse("0")}, + } + oneCpu := schedulerobjects.ResourceList{ + Resources: map[string]resource.Quantity{"cpu": resource.MustParse("1")}, + } + fortyCpu := schedulerobjects.ResourceList{ + Resources: map[string]resource.Quantity{"cpu": resource.MustParse("40")}, + } + oneHundredCpu := schedulerobjects.ResourceList{ + Resources: map[string]resource.Quantity{"cpu": resource.MustParse("100")}, + } + oneThousandCpu := schedulerobjects.ResourceList{ + Resources: map[string]resource.Quantity{"cpu": resource.MustParse("1000")}, + } + tests := map[string]struct { + availableResources schedulerobjects.ResourceList + queueCtxs map[string]*QueueSchedulingContext + expectedFairShares map[string]float64 + expectedAdjustedFairShares map[string]float64 + }{ + "one queue, demand exceeds capacity": { + availableResources: oneHundredCpu, + queueCtxs: map[string]*QueueSchedulingContext{ + "queueA": {Weight: 1.0, Demand: oneThousandCpu}, + }, + expectedFairShares: map[string]float64{"queueA": 1.0}, + expectedAdjustedFairShares: map[string]float64{"queueA": 1.0}, + }, + "one queue, demand less than capacity": { + availableResources: oneHundredCpu, + queueCtxs: map[string]*QueueSchedulingContext{ + "queueA": {Weight: 1.0, Demand: oneCpu}, + }, + expectedFairShares: map[string]float64{"queueA": 1.0}, + expectedAdjustedFairShares: map[string]float64{"queueA": 0.01}, + }, + "two queues, equal weights, demand exceeds capacity": { + availableResources: oneHundredCpu, + queueCtxs: map[string]*QueueSchedulingContext{ + "queueA": {Weight: 1.0, Demand: oneThousandCpu}, + "queueB": {Weight: 1.0, Demand: oneThousandCpu}, + }, + expectedFairShares: map[string]float64{"queueA": 0.5, "queueB": 0.5}, + expectedAdjustedFairShares: map[string]float64{"queueA": 0.5, "queueB": 0.5}, + }, + "two queues, equal weights, demand less than capacity for both queues": { + availableResources: oneHundredCpu, + queueCtxs: map[string]*QueueSchedulingContext{ + "queueA": {Weight: 1.0, Demand: oneCpu}, + "queueB": {Weight: 1.0, Demand: oneCpu}, + }, + expectedFairShares: map[string]float64{"queueA": 0.5, "queueB": 0.5}, + expectedAdjustedFairShares: map[string]float64{"queueA": 0.01, "queueB": 0.01}, + }, + "two queues, equal weights, demand less than capacity for one queue": { + availableResources: oneHundredCpu, + queueCtxs: map[string]*QueueSchedulingContext{ + "queueA": {Weight: 1.0, Demand: oneCpu}, + "queueB": {Weight: 1.0, Demand: oneThousandCpu}, + }, + expectedFairShares: map[string]float64{"queueA": 0.5, "queueB": 0.5}, + expectedAdjustedFairShares: map[string]float64{"queueA": 0.01, "queueB": 0.99}, + }, + "two queues, non equal weights, demand exceeds capacity for both queues": { + availableResources: oneHundredCpu, + queueCtxs: map[string]*QueueSchedulingContext{ + "queueA": {Weight: 1.0, Demand: oneThousandCpu}, + "queueB": {Weight: 3.0, Demand: oneThousandCpu}, + }, + expectedFairShares: map[string]float64{"queueA": 0.25, "queueB": 0.75}, + expectedAdjustedFairShares: map[string]float64{"queueA": 0.25, "queueB": 0.75}, + }, + "two queues, non equal weights, demand exceeds capacity for higher priority queue only": { + availableResources: oneHundredCpu, + queueCtxs: map[string]*QueueSchedulingContext{ + "queueA": {Weight: 1.0, Demand: oneCpu}, + "queueB": {Weight: 3.0, Demand: oneThousandCpu}, + }, + expectedFairShares: map[string]float64{"queueA": 0.25, "queueB": 0.75}, + expectedAdjustedFairShares: map[string]float64{"queueA": 0.01, "queueB": 0.99}, + }, + "two queues, non equal weights, demand exceeds capacity for lower priority queue only": { + availableResources: oneHundredCpu, + queueCtxs: map[string]*QueueSchedulingContext{ + "queueA": {Weight: 1.0, Demand: oneThousandCpu}, + "queueB": {Weight: 3.0, Demand: oneCpu}, + }, + expectedFairShares: map[string]float64{"queueA": 0.25, "queueB": 0.75}, + expectedAdjustedFairShares: map[string]float64{"queueA": 0.99, "queueB": 0.01}, + }, + "three queues, equal weights. Adjusted fair share requires multiple iterations": { + availableResources: oneHundredCpu, + queueCtxs: map[string]*QueueSchedulingContext{ + "queueA": {Weight: 1.0, Demand: oneCpu}, + "queueB": {Weight: 1.0, Demand: fortyCpu}, + "queueC": {Weight: 1.0, Demand: oneThousandCpu}, + }, + expectedFairShares: map[string]float64{"queueA": 1.0 / 3, "queueB": 1.0 / 3, "queueC": 1.0 / 3}, + expectedAdjustedFairShares: map[string]float64{"queueA": 0.01, "queueB": 0.4, "queueC": 0.59}, + }, + "No demand": { + availableResources: oneHundredCpu, + queueCtxs: map[string]*QueueSchedulingContext{ + "queueA": {Weight: 1.0, Demand: zeroCpu}, + "queueB": {Weight: 1.0, Demand: zeroCpu}, + "queueC": {Weight: 1.0, Demand: zeroCpu}, + }, + expectedFairShares: map[string]float64{"queueA": 1.0 / 3, "queueB": 1.0 / 3, "queueC": 1.0 / 3}, + expectedAdjustedFairShares: map[string]float64{"queueA": 0.0, "queueB": 0.0, "queueC": 0.0}, + }, + "No capacity": { + availableResources: zeroCpu, + queueCtxs: map[string]*QueueSchedulingContext{ + "queueA": {Weight: 1.0, Demand: oneCpu}, + "queueB": {Weight: 1.0, Demand: oneCpu}, + "queueC": {Weight: 1.0, Demand: oneCpu}, + }, + expectedFairShares: map[string]float64{"queueA": 1.0 / 3, "queueB": 1.0 / 3, "queueC": 1.0 / 3}, + expectedAdjustedFairShares: map[string]float64{"queueA": 1.0 / 3, "queueB": 1.0 / 3, "queueC": 1.0 / 3}, + }, + } + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + fairnessCostProvider, err := fairness.NewDominantResourceFairness(tc.availableResources, []string{"cpu"}) + require.NoError(t, err) + sctx := NewSchedulingContext( + "executor", + "pool", + testfixtures.TestPriorityClasses, + testfixtures.TestDefaultPriorityClass, + fairnessCostProvider, + nil, + tc.availableResources, + ) + for qName, q := range tc.queueCtxs { + err = sctx.AddQueueSchedulingContext( + qName, q.Weight, schedulerobjects.QuantityByTAndResourceType[string]{}, q.Demand, nil) + require.NoError(t, err) + } + sctx.UpdateFairShares() + for qName, qctx := range sctx.QueueSchedulingContexts { + expectedFairShare, ok := tc.expectedFairShares[qName] + require.True(t, ok, "Expected fair share for queue %s not found", qName) + expectedAdjustedFairShare, ok := tc.expectedAdjustedFairShares[qName] + require.True(t, ok, "Expected adjusted fair share for queue %s not found", qName) + assert.Equal(t, expectedFairShare, qctx.FairShare, "Fair share for queue %s", qName) + assert.Equal(t, expectedAdjustedFairShare, qctx.AdjustedFairShare, "Adjusted Fair share for queue %s", qName) + } + }) + } +} diff --git a/internal/scheduler/gang_scheduler_test.go b/internal/scheduler/gang_scheduler_test.go index 67c2086baae..09ce9fc04dd 100644 --- a/internal/scheduler/gang_scheduler_test.go +++ b/internal/scheduler/gang_scheduler_test.go @@ -556,6 +556,7 @@ func TestGangScheduler(t *testing.T) { queue, priorityFactor, nil, + schedulerobjects.NewResourceList(0), rate.NewLimiter( rate.Limit(tc.SchedulingConfig.MaximumPerQueueSchedulingRate), tc.SchedulingConfig.MaximumPerQueueSchedulingBurst, diff --git a/internal/scheduler/preempting_queue_scheduler.go b/internal/scheduler/preempting_queue_scheduler.go index 028dd71a57f..dc6c8c069df 100644 --- a/internal/scheduler/preempting_queue_scheduler.go +++ b/internal/scheduler/preempting_queue_scheduler.go @@ -2,6 +2,7 @@ package scheduler import ( "fmt" + "math" "reflect" "time" @@ -26,11 +27,12 @@ import ( // PreemptingQueueScheduler is a scheduler that makes a unified decisions on which jobs to preempt and schedule. // Uses QueueScheduler as a building block. type PreemptingQueueScheduler struct { - schedulingContext *schedulercontext.SchedulingContext - constraints schedulerconstraints.SchedulingConstraints - protectedFractionOfFairShare float64 - jobRepo JobRepository - nodeDb *nodedb.NodeDb + schedulingContext *schedulercontext.SchedulingContext + constraints schedulerconstraints.SchedulingConstraints + protectedFractionOfFairShare float64 + useAdjustedFairShareProtection bool + jobRepo JobRepository + nodeDb *nodedb.NodeDb // Maps job ids to the id of the node the job is associated with. // For scheduled or running jobs, that is the node the job is assigned to. // For preempted jobs, that is the node the job was preempted from. @@ -49,6 +51,7 @@ func NewPreemptingQueueScheduler( sctx *schedulercontext.SchedulingContext, constraints schedulerconstraints.SchedulingConstraints, protectedFractionOfFairShare float64, + useAdjustedFairShareProtection bool, jobRepo JobRepository, nodeDb *nodedb.NodeDb, initialNodeIdByJobId map[string]string, @@ -69,14 +72,15 @@ func NewPreemptingQueueScheduler( initialJobIdsByGangId[gangId] = maps.Clone(jobIds) } return &PreemptingQueueScheduler{ - schedulingContext: sctx, - constraints: constraints, - protectedFractionOfFairShare: protectedFractionOfFairShare, - jobRepo: jobRepo, - nodeDb: nodeDb, - nodeIdByJobId: maps.Clone(initialNodeIdByJobId), - jobIdsByGangId: initialJobIdsByGangId, - gangIdByJobId: maps.Clone(initialGangIdByJobId), + schedulingContext: sctx, + constraints: constraints, + protectedFractionOfFairShare: protectedFractionOfFairShare, + useAdjustedFairShareProtection: useAdjustedFairShareProtection, + jobRepo: jobRepo, + nodeDb: nodeDb, + nodeIdByJobId: maps.Clone(initialNodeIdByJobId), + jobIdsByGangId: initialJobIdsByGangId, + gangIdByJobId: maps.Clone(initialGangIdByJobId), } } @@ -127,8 +131,11 @@ func (sch *PreemptingQueueScheduler) Schedule(ctx *armadacontext.Context) (*Sche return false } if qctx, ok := sch.schedulingContext.QueueSchedulingContexts[job.Queue()]; ok { - fairShare := qctx.Weight / sch.schedulingContext.WeightSum actualShare := sch.schedulingContext.FairnessCostProvider.CostFromQueue(qctx) / totalCost + fairShare := qctx.FairShare + if sch.useAdjustedFairShareProtection { + fairShare = math.Max(qctx.AdjustedFairShare, fairShare) + } fractionOfFairShare := actualShare / fairShare if fractionOfFairShare <= sch.protectedFractionOfFairShare { return false diff --git a/internal/scheduler/preempting_queue_scheduler_test.go b/internal/scheduler/preempting_queue_scheduler_test.go index 93000154e43..e726c4b4a88 100644 --- a/internal/scheduler/preempting_queue_scheduler_test.go +++ b/internal/scheduler/preempting_queue_scheduler_test.go @@ -1276,6 +1276,42 @@ func TestPreemptingQueueScheduler(t *testing.T) { "C": 1, }, }, + "ProtectedFractionOfFairShare reshared": { + SchedulingConfig: testfixtures.WithProtectedFractionOfFairShareConfig( + 1.0, + testfixtures.TestSchedulingConfig(), + ), + Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), + Rounds: []SchedulingRound{ + { + JobsByQueue: map[string][]*jobdb.Job{ + "A": testfixtures.N1Cpu4GiJobs("A", testfixtures.PriorityClass2NonPreemptible, 16), // not preemptible + "B": testfixtures.N1Cpu4GiJobs("B", testfixtures.PriorityClass0, 11), + "C": testfixtures.N1Cpu4GiJobs("C", testfixtures.PriorityClass0, 3), + "D": testfixtures.N1Cpu4GiJobs("D", testfixtures.PriorityClass0, 2), + }, + ExpectedScheduledIndices: map[string][]int{ + "A": testfixtures.IntRange(0, 15), + "B": testfixtures.IntRange(0, 10), + "C": testfixtures.IntRange(0, 2), + "D": testfixtures.IntRange(0, 1), + }, + }, + { + // D submits one more job. No preemption occurs because B is below adjusted fair share + JobsByQueue: map[string][]*jobdb.Job{ + "D": testfixtures.N1Cpu4GiJobs("D", testfixtures.PriorityClass0, 1), + }, + }, + {}, // Empty round to make sure nothing changes. + }, + PriorityFactorByQueue: map[string]float64{ + "A": 1, + "B": 1, + "C": 1, + "D": 1, + }, + }, "DominantResourceFairness": { SchedulingConfig: testfixtures.TestSchedulingConfig(), Nodes: testfixtures.N32CpuNodes(1, testfixtures.TestPriorities), @@ -1697,6 +1733,8 @@ func TestPreemptingQueueScheduler(t *testing.T) { ) } + demandByQueue := map[string]schedulerobjects.ResourceList{} + // Run the scheduler. ctx := armadacontext.Background() for i, round := range tc.Rounds { @@ -1712,6 +1750,12 @@ func TestPreemptingQueueScheduler(t *testing.T) { queuedJobs = append(queuedJobs, job.WithQueued(true)) roundByJobId[job.Id()] = i indexByJobId[job.Id()] = j + r, ok := demandByQueue[job.Queue()] + if !ok { + r = schedulerobjects.NewResourceList(len(job.PodRequirements().ResourceRequirements.Requests)) + demandByQueue[job.Queue()] = r + } + r.AddV1ResourceList(job.PodRequirements().ResourceRequirements.Requests) } } err = jobDbTxn.Upsert(queuedJobs) @@ -1733,6 +1777,12 @@ func TestPreemptingQueueScheduler(t *testing.T) { delete(gangIdByJobId, job.Id()) delete(jobIdsByGangId[gangId], job.Id()) } + r, ok := demandByQueue[job.Queue()] + if !ok { + r = schedulerobjects.NewResourceList(len(job.PodRequirements().ResourceRequirements.Requests)) + demandByQueue[job.Queue()] = r + } + r.SubV1ResourceList(job.PodRequirements().ResourceRequirements.Requests) } } } @@ -1774,6 +1824,7 @@ func TestPreemptingQueueScheduler(t *testing.T) { queue, weight, allocatedByQueueAndPriorityClass[queue], + demandByQueue[queue], limiterByQueue[queue], ) require.NoError(t, err) @@ -1785,10 +1836,12 @@ func TestPreemptingQueueScheduler(t *testing.T) { tc.SchedulingConfig, nil, ) + sctx.UpdateFairShares() sch := NewPreemptingQueueScheduler( sctx, constraints, tc.SchedulingConfig.ProtectedFractionOfFairShare, + tc.SchedulingConfig.UseAdjustedFairShareProtection, NewSchedulerJobRepositoryAdapter(jobDbTxn), nodeDb, nodeIdByJobId, @@ -2130,7 +2183,7 @@ func BenchmarkPreemptingQueueScheduler(b *testing.B) { ) for queue, priorityFactor := range priorityFactorByQueue { weight := 1 / priorityFactor - err := sctx.AddQueueSchedulingContext(queue, weight, make(schedulerobjects.QuantityByTAndResourceType[string]), limiterByQueue[queue]) + err := sctx.AddQueueSchedulingContext(queue, weight, make(schedulerobjects.QuantityByTAndResourceType[string]), schedulerobjects.NewResourceList(0), limiterByQueue[queue]) require.NoError(b, err) } constraints := schedulerconstraints.NewSchedulingConstraints( @@ -2144,6 +2197,7 @@ func BenchmarkPreemptingQueueScheduler(b *testing.B) { sctx, constraints, tc.SchedulingConfig.ProtectedFractionOfFairShare, + tc.SchedulingConfig.UseAdjustedFairShareProtection, NewSchedulerJobRepositoryAdapter(jobDbTxn), nodeDb, nil, @@ -2197,13 +2251,14 @@ func BenchmarkPreemptingQueueScheduler(b *testing.B) { ) for queue, priorityFactor := range priorityFactorByQueue { weight := 1 / priorityFactor - err := sctx.AddQueueSchedulingContext(queue, weight, allocatedByQueueAndPriorityClass[queue], limiterByQueue[queue]) + err := sctx.AddQueueSchedulingContext(queue, weight, allocatedByQueueAndPriorityClass[queue], schedulerobjects.NewResourceList(0), limiterByQueue[queue]) require.NoError(b, err) } sch := NewPreemptingQueueScheduler( sctx, constraints, tc.SchedulingConfig.ProtectedFractionOfFairShare, + tc.SchedulingConfig.UseAdjustedFairShareProtection, NewSchedulerJobRepositoryAdapter(jobDbTxn), nodeDb, nil, diff --git a/internal/scheduler/queue_scheduler_test.go b/internal/scheduler/queue_scheduler_test.go index 5f9493e041b..510b586f5de 100644 --- a/internal/scheduler/queue_scheduler_test.go +++ b/internal/scheduler/queue_scheduler_test.go @@ -569,6 +569,7 @@ func TestQueueScheduler(t *testing.T) { err := sctx.AddQueueSchedulingContext( q.Name, weight, tc.InitialAllocatedByQueueAndPriorityClass[q.Name], + schedulerobjects.NewResourceList(0), rate.NewLimiter( rate.Limit(tc.SchedulingConfig.MaximumPerQueueSchedulingRate), tc.SchedulingConfig.MaximumPerQueueSchedulingBurst, diff --git a/internal/scheduler/scheduler_metrics.go b/internal/scheduler/scheduler_metrics.go index bc81d4c92c2..04464bb7ac4 100644 --- a/internal/scheduler/scheduler_metrics.go +++ b/internal/scheduler/scheduler_metrics.go @@ -60,6 +60,15 @@ var fairSharePerQueueDesc = prometheus.NewDesc( }, nil, ) +var adjustedFairSharePerQueueDesc = prometheus.NewDesc( + fmt.Sprintf("%s_%s_%s", NAMESPACE, SUBSYSTEM, "adjusted_fair_share"), + "Adjusted Fair share of each queue and pool.", + []string{ + "queue", + "pool", + }, nil, +) + var actualSharePerQueueDesc = prometheus.NewDesc( fmt.Sprintf("%s_%s_%s", NAMESPACE, SUBSYSTEM, "actual_share"), "Actual share of each queue and pool.", @@ -147,6 +156,7 @@ func generateSchedulerMetrics(schedulingRoundData schedulingRoundData) []prometh for key, value := range schedulingRoundData.queuePoolData { result = append(result, prometheus.MustNewConstMetric(consideredJobsDesc, prometheus.GaugeValue, float64(value.numberOfJobsConsidered), key.queue, key.pool)) result = append(result, prometheus.MustNewConstMetric(fairSharePerQueueDesc, prometheus.GaugeValue, float64(value.fairShare), key.queue, key.pool)) + result = append(result, prometheus.MustNewConstMetric(adjustedFairSharePerQueueDesc, prometheus.GaugeValue, float64(value.adjustedFairShare), key.queue, key.pool)) result = append(result, prometheus.MustNewConstMetric(actualSharePerQueueDesc, prometheus.GaugeValue, float64(value.actualShare), key.queue, key.pool)) } for key, value := range schedulingRoundData.scheduledJobData { @@ -185,17 +195,15 @@ func (metrics *SchedulerMetrics) calculateQueuePoolMetrics(schedulingContexts [] result := make(map[queuePoolKey]queuePoolData) for _, schedContext := range schedulingContexts { totalCost := schedContext.TotalCost() - totalWeight := schedContext.WeightSum pool := schedContext.Pool for queue, queueContext := range schedContext.QueueSchedulingContexts { key := queuePoolKey{queue: queue, pool: pool} - fairShare := queueContext.Weight / totalWeight actualShare := schedContext.FairnessCostProvider.CostFromQueue(queueContext) / totalCost - result[key] = queuePoolData{ numberOfJobsConsidered: len(queueContext.UnsuccessfulJobSchedulingContexts) + len(queueContext.SuccessfulJobSchedulingContexts), - fairShare: fairShare, + fairShare: queueContext.FairShare, + adjustedFairShare: queueContext.AdjustedFairShare, actualShare: actualShare, } } @@ -224,4 +232,5 @@ type queuePoolData struct { numberOfJobsConsidered int actualShare float64 fairShare float64 + adjustedFairShare float64 } diff --git a/internal/scheduler/scheduling_algo.go b/internal/scheduler/scheduling_algo.go index 39f6d649404..0af6e58c435 100644 --- a/internal/scheduler/scheduling_algo.go +++ b/internal/scheduler/scheduling_algo.go @@ -2,7 +2,6 @@ package scheduler import ( "context" - "math/rand" "sort" "strings" "time" @@ -19,7 +18,6 @@ import ( "github.com/armadaproject/armada/internal/common/logging" armadaslices "github.com/armadaproject/armada/internal/common/slices" "github.com/armadaproject/armada/internal/common/stringinterner" - "github.com/armadaproject/armada/internal/common/util" "github.com/armadaproject/armada/internal/scheduler/configuration" schedulerconstraints "github.com/armadaproject/armada/internal/scheduler/constraints" schedulercontext "github.com/armadaproject/armada/internal/scheduler/context" @@ -64,8 +62,6 @@ type FairSchedulingAlgo struct { queueQuarantiner *quarantine.QueueQuarantiner // Function that is called every time an executor is scheduled. Useful for testing. onExecutorScheduled func(executor *schedulerobjects.Executor) - // rand and clock injected here for repeatable testing. - rand *rand.Rand clock clock.Clock stringInterner *stringinterner.StringInterner resourceListFactory *internaltypes.ResourceListFactory @@ -99,7 +95,6 @@ func NewFairSchedulingAlgo( nodeQuarantiner: nodeQuarantiner, queueQuarantiner: queueQuarantiner, onExecutorScheduled: func(executor *schedulerobjects.Executor) {}, - rand: util.NewThreadsafeRand(time.Now().UnixNano()), clock: clock.RealClock{}, stringInterner: stringInterner, resourceListFactory: resourceListFactory, @@ -253,7 +248,7 @@ func (it *JobQueueIteratorAdapter) Next() (*jobdb.Job, error) { type fairSchedulingAlgoContext struct { queues []*api.Queue priorityFactorByQueue map[string]float64 - isActiveByPoolByQueue map[string]map[string]bool + demandByPoolByQueue map[string]map[string]schedulerobjects.ResourceList totalCapacityByPool schedulerobjects.QuantityByTAndResourceType[string] jobsByExecutorId map[string][]*jobdb.Job nodeIdByJobId map[string]string @@ -297,13 +292,18 @@ func (l *FairSchedulingAlgo) newFairSchedulingAlgoContext(ctx *armadacontext.Con } // Create a map of jobs associated with each executor. - isActiveByPoolByQueue := make(map[string]map[string]bool, len(queues)) jobsByExecutorId := make(map[string][]*jobdb.Job) nodeIdByJobId := make(map[string]string) jobIdsByGangId := make(map[string]map[string]bool) gangIdByJobId := make(map[string]string) + demandByPoolByQueue := make(map[string]map[string]schedulerobjects.ResourceList) + for _, job := range txn.GetAll() { + if job.InTerminalState() { + continue + } + // Mark a queue being active for a given pool. A queue is defined as being active if it has a job running // on a pool or if a queued job is eligible for that pool pools := job.Pools() @@ -318,12 +318,17 @@ func (l *FairSchedulingAlgo) newFairSchedulingAlgoContext(ctx *armadacontext.Con } for _, pool := range pools { - isActiveByQueue, ok := isActiveByPoolByQueue[pool] + poolQueueResources, ok := demandByPoolByQueue[pool] + if !ok { + poolQueueResources = make(map[string]schedulerobjects.ResourceList, len(queues)) + demandByPoolByQueue[pool] = poolQueueResources + } + queueResources, ok := poolQueueResources[job.Queue()] if !ok { - isActiveByQueue = make(map[string]bool, len(queues)) + queueResources = schedulerobjects.NewResourceList(len(job.PodRequirements().ResourceRequirements.Requests)) + poolQueueResources[job.Queue()] = queueResources } - isActiveByQueue[job.Queue()] = true - isActiveByPoolByQueue[pool] = isActiveByQueue + queueResources.AddV1ResourceList(job.PodRequirements().ResourceRequirements.Requests) } if job.Queued() { @@ -371,7 +376,7 @@ func (l *FairSchedulingAlgo) newFairSchedulingAlgoContext(ctx *armadacontext.Con return &fairSchedulingAlgoContext{ queues: queues, priorityFactorByQueue: priorityFactorByQueue, - isActiveByPoolByQueue: isActiveByPoolByQueue, + demandByPoolByQueue: demandByPoolByQueue, totalCapacityByPool: totalCapacityByPool, jobsByExecutorId: jobsByExecutorId, nodeIdByJobId: nodeIdByJobId, @@ -437,14 +442,15 @@ func (l *FairSchedulingAlgo) scheduleOnExecutors( totalResources, ) - activeByQueue, ok := fsctx.isActiveByPoolByQueue[pool] + demandByQueue, ok := fsctx.demandByPoolByQueue[pool] if !ok { - activeByQueue = map[string]bool{} + demandByQueue = map[string]schedulerobjects.ResourceList{} } now := time.Now() for queue, priorityFactor := range fsctx.priorityFactorByQueue { - if !activeByQueue[queue] { + demand, hasDemand := demandByQueue[queue] + if !hasDemand { // To ensure fair share is computed only from active queues, i.e., queues with jobs queued or running. continue } @@ -474,10 +480,11 @@ func (l *FairSchedulingAlgo) scheduleOnExecutors( } queueLimiter.SetLimitAt(now, rate.Limit(l.schedulingConfig.MaximumPerQueueSchedulingRate*(1-quarantineFactor))) - if err := sctx.AddQueueSchedulingContext(queue, weight, allocatedByPriorityClass, queueLimiter); err != nil { + if err := sctx.AddQueueSchedulingContext(queue, weight, allocatedByPriorityClass, demand, queueLimiter); err != nil { return nil, nil, err } } + sctx.UpdateFairShares() constraints := schedulerconstraints.NewSchedulingConstraints( pool, fsctx.totalCapacityByPool[pool], @@ -489,6 +496,7 @@ func (l *FairSchedulingAlgo) scheduleOnExecutors( sctx, constraints, l.schedulingConfig.ProtectedFractionOfFairShare, + l.schedulingConfig.UseAdjustedFairShareProtection, NewSchedulerJobRepositoryAdapter(fsctx.txn), nodeDb, fsctx.nodeIdByJobId, diff --git a/internal/scheduler/scheduling_algo_test.go b/internal/scheduler/scheduling_algo_test.go index c0b2e07cd3d..94825c2a287 100644 --- a/internal/scheduler/scheduling_algo_test.go +++ b/internal/scheduler/scheduling_algo_test.go @@ -493,6 +493,14 @@ func TestSchedule(t *testing.T) { dbJob := txn.GetById(job.Id()) assert.True(t, job.Equal(dbJob), "expected %v but got %v", job, dbJob) } + + // Check that we calculated fair share and adjusted fair share + for _, schCtx := range schedulerResult.SchedulingContexts { + for _, qtx := range schCtx.QueueSchedulingContexts { + assert.NotEqual(t, 0, qtx.AdjustedFairShare) + assert.NotEqual(t, 0, qtx.FairShare) + } + } }) } } diff --git a/internal/scheduler/simulator/simulator.go b/internal/scheduler/simulator/simulator.go index 592d71240fc..059e4ee0deb 100644 --- a/internal/scheduler/simulator/simulator.go +++ b/internal/scheduler/simulator/simulator.go @@ -443,6 +443,7 @@ func (s *Simulator) handleScheduleEvent(ctx *armadacontext.Context) error { var eventSequences []*armadaevents.EventSequence txn := s.jobDb.WriteTxn() defer txn.Abort() + demandByQueue := calculateDemandByQueue(txn.GetAll()) for _, pool := range s.ClusterSpec.Pools { for i := range pool.ClusterGroups { nodeDb := s.nodeDbByPoolAndExecutorGroup[pool.Name][i] @@ -469,6 +470,11 @@ func (s *Simulator) handleScheduleEvent(ctx *armadacontext.Context) error { sctx.Started = s.time for _, queue := range s.WorkloadSpec.Queues { + demand, hasDemand := demandByQueue[queue.Name] + if !hasDemand { + // To ensure fair share is computed only from active queues, i.e., queues with jobs queued or running. + continue + } limiter, ok := s.limiterByQueue[queue.Name] if !ok { limiter = rate.NewLimiter( @@ -482,6 +488,7 @@ func (s *Simulator) handleScheduleEvent(ctx *armadacontext.Context) error { queue.Name, queue.Weight, s.allocationByPoolAndQueueAndPriorityClass[pool.Name][queue.Name], + demand, limiter, ) if err != nil { @@ -500,6 +507,7 @@ func (s *Simulator) handleScheduleEvent(ctx *armadacontext.Context) error { sctx, constraints, s.schedulingConfig.ProtectedFractionOfFairShare, + s.schedulingConfig.UseAdjustedFairShareProtection, scheduler.NewSchedulerJobRepositoryAdapter(txn), nodeDb, // TODO: Necessary to support partial eviction. @@ -880,3 +888,20 @@ func maxTime(a, b time.Time) time.Time { func pointer[T any](t T) *T { return &t } + +func calculateDemandByQueue(jobs []*jobdb.Job) map[string]schedulerobjects.ResourceList { + queueResources := make(map[string]schedulerobjects.ResourceList) + + for _, job := range jobs { + if job.InTerminalState() { + continue + } + r, ok := queueResources[job.Queue()] + if !ok { + r = schedulerobjects.NewResourceList(len(job.PodRequirements().ResourceRequirements.Requests)) + queueResources[job.Queue()] = r + } + r.AddV1ResourceList(job.PodRequirements().ResourceRequirements.Requests) + } + return queueResources +} diff --git a/internal/scheduler/testfixtures/testfixtures.go b/internal/scheduler/testfixtures/testfixtures.go index dac90abbf39..36bac3dfb58 100644 --- a/internal/scheduler/testfixtures/testfixtures.go +++ b/internal/scheduler/testfixtures/testfixtures.go @@ -173,6 +173,7 @@ func TestSchedulingConfig() schedulerconfiguration.SchedulingConfig { ExecutorTimeout: 15 * time.Minute, MaxUnacknowledgedJobsPerExecutor: math.MaxInt, SupportedResourceTypes: GetTestSupportedResourceTypes(), + UseAdjustedFairShareProtection: true, } } From 3a9207ff341709d2087af57abf883b846f94301c Mon Sep 17 00:00:00 2001 From: JamesMurkin Date: Mon, 24 Jun 2024 12:31:53 +0100 Subject: [PATCH 4/9] Improved ingester logging (#3711) * Make ingesters limit how many events they process at once Currently we limit how many messages are processed at once However a single message may contain thousands of events, so limit on messages is a bit flawed - A limit of 1000, could mean you're processing 1000 or 1000000+ events at once This should make the ingesters less prone to long pauses if some large messages come along Also adjusted event ingester config to be more standard with other config naming Signed-off-by: JamesMurkin * Comment improvement Signed-off-by: JamesMurkin * Comment improvement Signed-off-by: JamesMurkin * gofumpt Signed-off-by: JamesMurkin * Improve config descriptions Signed-off-by: JamesMurkin * Limit Pulsar messages to have a configurable max number of events per message Currently we can publish very large messages (100k+ events per message) This can make the time to process messages quite unpredictable, as they can be anywhere between 1 event and 100000+ events Now we restrict how many messages we put into each message (via `maxAllowedEventsPerMessage`), which should make how many changes a given message may contain somewhat more predictable Signed-off-by: JamesMurkin * Revert "Limit Pulsar messages to have a configurable max number of events per message" This reverts commit 11a8a2a4294c5905a3dcea25ce71e01b83d5f96d. * Improve logging in ingester pipeline This should help us understand what is happening in our ingestion pipelines - Should log if we are no longer receiving pulsar messages for 2mins - Will log a summary of how many messages and event in each "batch" - Will log a summary of the types of events in each batch - Will log a summary of how long Convert took for each batch This is admittedly quite a "quick" fix and better long term steps would be: - Metrics or spans - Some of these could be at the ingseter pipeline level (generic) - Some would need to be done in each ingester to expose more detailed information such as which query is all the time being spent on Signed-off-by: JamesMurkin --------- Signed-off-by: JamesMurkin --- internal/common/ingest/ingestion_pipeline.go | 58 +++++++++++++++++++- internal/scheduleringester/instructions.go | 1 + pkg/armadaevents/events_util.go | 52 ++++++++++++++++++ 3 files changed, 108 insertions(+), 3 deletions(-) diff --git a/internal/common/ingest/ingestion_pipeline.go b/internal/common/ingest/ingestion_pipeline.go index 46c3096bb83..404372ef859 100644 --- a/internal/common/ingest/ingestion_pipeline.go +++ b/internal/common/ingest/ingestion_pipeline.go @@ -109,12 +109,38 @@ func (i *IngestionPipeline[T]) Run(ctx *armadacontext.Context) error { i.consumer = consumer defer closePulsar() } - pulsarMsgs := i.consumer.Chan() + pulsarMessageChannel := i.consumer.Chan() + pulsarMessages := make(chan pulsar.ConsumerMessage) + + // Consume pulsar messages + // Used to track if we are no longer receiving pulsar messages + go func() { + timeout := time.Minute * 2 + timer := time.NewTimer(timeout) + loop: + for { + if !timer.Stop() { + <-timer.C + } + timer.Reset(timeout) + select { + case msg, ok := <-pulsarMessageChannel: + if !ok { + // Channel closed + break loop + } + pulsarMessages <- msg + case <-timer.C: + log.Infof("No pulsar message received in %s", timeout) + } + } + close(pulsarMessages) + }() // Convert to event sequences eventSequences := make(chan *EventSequencesWithIds) go func() { - for msg := range pulsarMsgs { + for msg := range pulsarMessages { converted := unmarshalEventSequences(msg, i.metrics) eventSequences <- converted } @@ -131,11 +157,24 @@ func (i *IngestionPipeline[T]) Run(ctx *armadacontext.Context) error { close(batchedEventSequences) }() + // Log summary of batch + preprocessedBatchEventSequences := make(chan *EventSequencesWithIds) + go func() { + for msg := range batchedEventSequences { + logSummaryOfEventSequences(msg) + preprocessedBatchEventSequences <- msg + } + close(preprocessedBatchEventSequences) + }() + // Convert to instructions instructions := make(chan T) go func() { - for msg := range batchedEventSequences { + for msg := range preprocessedBatchEventSequences { + start := time.Now() converted := i.converter.Convert(ctx, msg) + taken := time.Now().Sub(start) + log.Infof("Processed %d pulsar messages in %dms", len(msg.MessageIds), taken.Milliseconds()) instructions <- converted } close(instructions) @@ -244,3 +283,16 @@ func combineEventSequences(sequences []*EventSequencesWithIds) *EventSequencesWi EventSequences: combinedSequences, MessageIds: messageIds, } } + +func logSummaryOfEventSequences(sequence *EventSequencesWithIds) { + numberOfEvents := 0 + countOfEventsByType := map[string]int{} + for _, eventSequence := range sequence.EventSequences { + numberOfEvents += len(eventSequence.Events) + for _, e := range eventSequence.Events { + typeString := e.GetEventName() + countOfEventsByType[typeString] = countOfEventsByType[typeString] + 1 + } + } + log.Infof("Batch being processed contains %d event messages and %d events of type %v", len(sequence.MessageIds), numberOfEvents, countOfEventsByType) +} diff --git a/internal/scheduleringester/instructions.go b/internal/scheduleringester/instructions.go index 9b45a3db3d7..509a00f2d94 100644 --- a/internal/scheduleringester/instructions.go +++ b/internal/scheduleringester/instructions.go @@ -57,6 +57,7 @@ func (c *InstructionConverter) Convert(_ *armadacontext.Context, sequencesWithId operations = AppendDbOperation(operations, op) } } + log.Infof("Converted sequences into %d db operations", len(operations)) return &DbOperationsWithMessageIds{ Ops: operations, MessageIds: sequencesWithIds.MessageIds, diff --git a/pkg/armadaevents/events_util.go b/pkg/armadaevents/events_util.go index 0a142522e48..22e3cbf35ba 100644 --- a/pkg/armadaevents/events_util.go +++ b/pkg/armadaevents/events_util.go @@ -167,6 +167,58 @@ func (ev *EventSequence_Event) UnmarshalJSON(data []byte) error { return nil } +func (ev *EventSequence_Event) GetEventName() string { + switch ev.GetEvent().(type) { + case *EventSequence_Event_SubmitJob: + return "SubmitJob" + case *EventSequence_Event_JobRunLeased: + return "JobRunLeased" + case *EventSequence_Event_JobRunRunning: + return "JobRunRunning" + case *EventSequence_Event_JobRunSucceeded: + return "JobRunSucceeded" + case *EventSequence_Event_JobRunErrors: + return "JobRunErrors" + case *EventSequence_Event_JobSucceeded: + return "JobSucceeded" + case *EventSequence_Event_JobErrors: + return "JobErrors" + case *EventSequence_Event_JobPreemptionRequested: + return "JobPreemptionRequested" + case *EventSequence_Event_JobRunPreemptionRequested: + return "JobRunPreemptionRequested" + case *EventSequence_Event_ReprioritiseJob: + return "ReprioritiseJob" + case *EventSequence_Event_ReprioritiseJobSet: + return "ReprioritiseJobSet" + case *EventSequence_Event_CancelJob: + return "CancelJob" + case *EventSequence_Event_CancelJobSet: + return "CancelJobSet" + case *EventSequence_Event_CancelledJob: + return "CancelledJob" + case *EventSequence_Event_JobRunCancelled: + return "JobRunCancelled" + case *EventSequence_Event_JobRequeued: + return "JobRequeued" + case *EventSequence_Event_PartitionMarker: + return "PartitionMarker" + case *EventSequence_Event_JobRunPreempted: + return "JobRunPreemped" + case *EventSequence_Event_JobRunAssigned: + return "JobRunAssigned" + case *EventSequence_Event_JobValidated: + return "JobValidated" + case *EventSequence_Event_ReprioritisedJob: + return "ReprioritisedJob" + case *EventSequence_Event_ResourceUtilisation: + return "ResourceUtilisation" + case *EventSequence_Event_StandaloneIngressInfo: + return "StandloneIngressIngo" + } + return "Unknown" +} + func (kmo *KubernetesMainObject) UnmarshalJSON(data []byte) error { if string(data) == "null" || string(data) == `""` { return nil From efa679c8d637d3b74bb71b787bb22e39f2bd0209 Mon Sep 17 00:00:00 2001 From: robertdavidsmith <34475852+robertdavidsmith@users.noreply.github.com> Date: Mon, 24 Jun 2024 13:51:28 +0100 Subject: [PATCH 5/9] Lookout: Support state rejected and job errors (#3706) * Support state rejected and job errors Signed-off-by: Robert Smith * fix test Signed-off-by: Robert Smith --------- Signed-off-by: Robert Smith Signed-off-by: Chris Martin Co-authored-by: Chris Martin --- internal/lookout/ui/src/App.tsx | 4 +- .../lookoutV2/sidebar/ContainerDetails.tsx | 4 +- .../lookoutV2/sidebar/Sidebar.test.tsx | 10 +-- .../components/lookoutV2/sidebar/Sidebar.tsx | 19 +++-- .../sidebar/SidebarTabJobDetails.tsx | 4 +- .../lookoutV2/sidebar/SidebarTabJobLogs.tsx | 4 +- ...ule.css => SidebarTabJobResult.module.css} | 0 ...TabJobRuns.tsx => SidebarTabJobResult.tsx} | 74 ++++++++++++++++--- .../lookoutV2/sidebar/SidebarTabJobYaml.tsx | 4 +- .../lookoutV2/JobsTableContainer.test.tsx | 8 +- .../lookoutV2/JobsTableContainer.tsx | 4 +- internal/lookout/ui/src/hooks/useJobSpec.ts | 4 +- internal/lookout/ui/src/index.tsx | 6 +- .../lookout/ui/src/models/lookoutV2Models.ts | 10 ++- .../services/lookoutV2/GetJobInfoService.ts | 33 +++++++++ .../services/lookoutV2/GetJobSpecService.ts | 19 ----- ...pecService.ts => FakeGetJobInfoService.ts} | 14 +++- .../ui/src/utils/jobsTableFormatters.ts | 2 + tools.yaml | 1 + 19 files changed, 159 insertions(+), 65 deletions(-) rename internal/lookout/ui/src/components/lookoutV2/sidebar/{SidebarTabJobRuns.module.css => SidebarTabJobResult.module.css} (100%) rename internal/lookout/ui/src/components/lookoutV2/sidebar/{SidebarTabJobRuns.tsx => SidebarTabJobResult.tsx} (84%) create mode 100644 internal/lookout/ui/src/services/lookoutV2/GetJobInfoService.ts delete mode 100644 internal/lookout/ui/src/services/lookoutV2/GetJobSpecService.ts rename internal/lookout/ui/src/services/lookoutV2/mocks/{FakeGetJobSpecService.ts => FakeGetJobInfoService.ts} (72%) diff --git a/internal/lookout/ui/src/App.tsx b/internal/lookout/ui/src/App.tsx index 8615a369c24..bd6fedba20b 100644 --- a/internal/lookout/ui/src/App.tsx +++ b/internal/lookout/ui/src/App.tsx @@ -17,7 +17,7 @@ import NavBar from "./components/NavBar" import JobSetsContainer from "./containers/JobSetsContainer" import { UserManagerContext, useUserManager } from "./oidc" import { ICordonService } from "./services/lookoutV2/CordonService" -import { IGetJobSpecService } from "./services/lookoutV2/GetJobSpecService" +import { IGetJobInfoService } from "./services/lookoutV2/GetJobInfoService" import { IGetRunInfoService } from "./services/lookoutV2/GetRunInfoService" import { ILogService } from "./services/lookoutV2/LogService" import { CommandSpec } from "./utils" @@ -69,7 +69,7 @@ type AppProps = { v2GetJobsService: IGetJobsService v2GroupJobsService: IGroupJobsService v2RunInfoService: IGetRunInfoService - v2JobSpecService: IGetJobSpecService + v2JobSpecService: IGetJobInfoService v2LogService: ILogService v2UpdateJobsService: UpdateJobsService v2UpdateJobSetsService: UpdateJobSetsService diff --git a/internal/lookout/ui/src/components/lookoutV2/sidebar/ContainerDetails.tsx b/internal/lookout/ui/src/components/lookoutV2/sidebar/ContainerDetails.tsx index 0fbc0b6079c..1121d130ef6 100644 --- a/internal/lookout/ui/src/components/lookoutV2/sidebar/ContainerDetails.tsx +++ b/internal/lookout/ui/src/components/lookoutV2/sidebar/ContainerDetails.tsx @@ -7,7 +7,7 @@ import { KeyValuePairTable } from "./KeyValuePairTable" import { useCustomSnackbar } from "../../../hooks/useCustomSnackbar" import { useJobSpec } from "../../../hooks/useJobSpec" import { Job } from "../../../models/lookoutV2Models" -import { IGetJobSpecService } from "../../../services/lookoutV2/GetJobSpecService" +import { IGetJobInfoService } from "../../../services/lookoutV2/GetJobInfoService" export interface ContainerData { name: string @@ -19,7 +19,7 @@ export interface ContainerData { interface ContainerDetailsProps { job: Job - jobSpecService: IGetJobSpecService + jobSpecService: IGetJobInfoService } const getContainerData = (container: any): ContainerData => { diff --git a/internal/lookout/ui/src/components/lookoutV2/sidebar/Sidebar.test.tsx b/internal/lookout/ui/src/components/lookoutV2/sidebar/Sidebar.test.tsx index 4f88ee85fcc..aabbcf5a05a 100644 --- a/internal/lookout/ui/src/components/lookoutV2/sidebar/Sidebar.test.tsx +++ b/internal/lookout/ui/src/components/lookoutV2/sidebar/Sidebar.test.tsx @@ -6,7 +6,7 @@ import { makeTestJob } from "utils/fakeJobsUtils" import { Sidebar } from "./Sidebar" import { FakeCordonService } from "../../../services/lookoutV2/mocks/FakeCordonService" -import FakeGetJobSpecService from "../../../services/lookoutV2/mocks/FakeGetJobSpecService" +import FakeGetJobInfoService from "../../../services/lookoutV2/mocks/FakeGetJobInfoService" import { FakeGetRunInfoService } from "../../../services/lookoutV2/mocks/FakeGetRunInfoService" import { FakeLogService } from "../../../services/lookoutV2/mocks/FakeLogService" @@ -44,7 +44,7 @@ describe("Sidebar", () => { { const run = job.runs[0] // Switch to runs tab - await userEvent.click(getByRole("tab", { name: /Runs/ })) + await userEvent.click(getByRole("tab", { name: /Result/ })) // First run should already be expanded within(getByRole("row", { name: /Run ID/ })).getByText(run.runId) @@ -84,7 +84,7 @@ describe("Sidebar", () => { run.exitCode = 137 // Switch to runs tab - await userEvent.click(getByRole("tab", { name: /Runs/ })) + await userEvent.click(getByRole("tab", { name: /Result/ })) // First run should already be expanded within(getByRole("row", { name: /Run ID/ })).getByText(run.runId) @@ -96,7 +96,7 @@ describe("Sidebar", () => { const { getByRole, getByText } = renderComponent() // Switch to runs tab - await userEvent.click(getByRole("tab", { name: /Runs/ })) + await userEvent.click(getByRole("tab", { name: /Result/ })) getByText("This job has not run.") }) diff --git a/internal/lookout/ui/src/components/lookoutV2/sidebar/Sidebar.tsx b/internal/lookout/ui/src/components/lookoutV2/sidebar/Sidebar.tsx index f883f3fc82e..8d6c555631b 100644 --- a/internal/lookout/ui/src/components/lookoutV2/sidebar/Sidebar.tsx +++ b/internal/lookout/ui/src/components/lookoutV2/sidebar/Sidebar.tsx @@ -9,17 +9,17 @@ import { SidebarHeader } from "./SidebarHeader" import { SidebarTabJobCommands } from "./SidebarTabJobCommands" import { SidebarTabJobDetails } from "./SidebarTabJobDetails" import { SidebarTabJobLogs } from "./SidebarTabJobLogs" -import { SidebarTabJobRuns } from "./SidebarTabJobRuns" +import { SidebarTabJobResult } from "./SidebarTabJobResult" import { SidebarTabJobYaml } from "./SidebarTabJobYaml" import { ICordonService } from "../../../services/lookoutV2/CordonService" -import { IGetJobSpecService } from "../../../services/lookoutV2/GetJobSpecService" +import { IGetJobInfoService } from "../../../services/lookoutV2/GetJobInfoService" import { IGetRunInfoService } from "../../../services/lookoutV2/GetRunInfoService" import { ILogService } from "../../../services/lookoutV2/LogService" import { CommandSpec } from "../../../utils" enum SidebarTab { JobDetails = "JobDetails", - JobRuns = "JobRuns", + JobResult = "JobResult", Yaml = "Yaml", Logs = "Logs", Commands = "Commands", @@ -34,7 +34,7 @@ type ResizeState = { export interface SidebarProps { job: Job runInfoService: IGetRunInfoService - jobSpecService: IGetJobSpecService + jobSpecService: IGetJobInfoService logService: ILogService cordonService: ICordonService sidebarWidth: number @@ -165,7 +165,7 @@ export const Sidebar = memo( - + - - + + diff --git a/internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobDetails.tsx b/internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobDetails.tsx index e7eaacd4164..ab94c8b87a0 100644 --- a/internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobDetails.tsx +++ b/internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobDetails.tsx @@ -3,12 +3,12 @@ import { Job } from "models/lookoutV2Models" import { ContainerDetails } from "./ContainerDetails" import { KeyValuePairTable } from "./KeyValuePairTable" -import { IGetJobSpecService } from "../../../services/lookoutV2/GetJobSpecService" +import { IGetJobInfoService } from "../../../services/lookoutV2/GetJobInfoService" import { formatBytes, formatCpu } from "../../../utils/resourceUtils" export interface SidebarTabJobDetailsProps { job: Job - jobSpecService: IGetJobSpecService + jobSpecService: IGetJobInfoService } export const SidebarTabJobDetails = ({ job, jobSpecService }: SidebarTabJobDetailsProps) => { diff --git a/internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobLogs.tsx b/internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobLogs.tsx index 5c0a2589ffe..78ebfebe9a3 100644 --- a/internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobLogs.tsx +++ b/internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobLogs.tsx @@ -18,13 +18,13 @@ import styles from "./SidebarTabJobLogs.module.css" import { useCustomSnackbar } from "../../../hooks/useCustomSnackbar" import { useJobSpec } from "../../../hooks/useJobSpec" import { getAccessToken, useUserManager } from "../../../oidc" -import { IGetJobSpecService } from "../../../services/lookoutV2/GetJobSpecService" +import { IGetJobInfoService } from "../../../services/lookoutV2/GetJobInfoService" import { ILogService, LogLine } from "../../../services/lookoutV2/LogService" import { getErrorMessage, RequestStatus } from "../../../utils" export interface SidebarTabJobLogsProps { job: Job - jobSpecService: IGetJobSpecService + jobSpecService: IGetJobInfoService logService: ILogService } diff --git a/internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobRuns.module.css b/internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobResult.module.css similarity index 100% rename from internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobRuns.module.css rename to internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobResult.module.css diff --git a/internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobRuns.tsx b/internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobResult.tsx similarity index 84% rename from internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobRuns.tsx rename to internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobResult.tsx index 8e2c7d470b1..19f285ce651 100644 --- a/internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobRuns.tsx +++ b/internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobResult.tsx @@ -3,40 +3,49 @@ import React, { useCallback, useEffect, useMemo, useRef, useState } from "react" import { ExpandMore } from "@mui/icons-material" import { Accordion, - AccordionSummary, - Typography, AccordionDetails, + AccordionSummary, + Button, CircularProgress, Dialog, - DialogTitle, - DialogContent, DialogActions, + DialogContent, + DialogTitle, + Tooltip, + Typography, } from "@mui/material" -import { Button, Tooltip } from "@mui/material" -import { Job, JobRun } from "models/lookoutV2Models" +import { Job, JobRun, JobState } from "models/lookoutV2Models" import { formatJobRunState, formatTimeSince, formatUtcDate } from "utils/jobsTableFormatters" import { CodeBlock } from "./CodeBlock" import { KeyValuePairTable } from "./KeyValuePairTable" -import styles from "./SidebarTabJobRuns.module.css" +import styles from "./SidebarTabJobResult.module.css" import { useCustomSnackbar } from "../../../hooks/useCustomSnackbar" import { getAccessToken, useUserManager } from "../../../oidc" import { ICordonService } from "../../../services/lookoutV2/CordonService" +import { IGetJobInfoService } from "../../../services/lookoutV2/GetJobInfoService" import { IGetRunInfoService } from "../../../services/lookoutV2/GetRunInfoService" import { getErrorMessage } from "../../../utils" -export interface SidebarTabJobRunsProps { +export interface SidebarTabJobResultProps { job: Job runInfoService: IGetRunInfoService + jobInfoService: IGetJobInfoService cordonService: ICordonService } type LoadState = "Idle" | "Loading" -export const SidebarTabJobRuns = ({ job, runInfoService, cordonService }: SidebarTabJobRunsProps) => { +export const SidebarTabJobResult = ({ + job, + jobInfoService, + runInfoService, + cordonService, +}: SidebarTabJobResultProps) => { const mounted = useRef(false) const openSnackbar = useCustomSnackbar() const runsNewestFirst = useMemo(() => [...job.runs].reverse(), [job]) + const [jobError, setJobError] = useState("") const [runErrorMap, setRunErrorMap] = useState>(new Map()) const [runErrorLoadingMap, setRunErrorLoadingMap] = useState>(new Map()) const [runDebugMessageMap, setRunDebugMessageMap] = useState>(new Map()) @@ -45,6 +54,29 @@ export const SidebarTabJobRuns = ({ job, runInfoService, cordonService }: Sideba ) const [open, setOpen] = useState(false) + const fetchJobError = useCallback(async () => { + if (job.state != JobState.Failed && job.state != JobState.Rejected) { + setJobError("") + return + } + const getJobErrorResultPromise = jobInfoService.getJobError(job.jobId) + getJobErrorResultPromise + .then((errorString) => { + if (!mounted.current) { + return + } + setJobError(errorString) + }) + .catch(async (e) => { + const errMsg = await getErrorMessage(e) + console.error(errMsg) + if (!mounted.current) { + return + } + openSnackbar("Failed to retrieve Job error for Job with ID: " + job.jobId + ": " + errMsg, "error") + }) + }, [job]) + const fetchRunErrors = useCallback(async () => { const newRunErrorLoadingMap = new Map() for (const run of job.runs) { @@ -134,9 +166,25 @@ export const SidebarTabJobRuns = ({ job, runInfoService, cordonService }: Sideba } }, [job]) + let topLevelError = "" + let topLevelErrorTitle = "" + if (jobError != "") { + topLevelError = jobError + topLevelErrorTitle = "Job Error" + } else { + for (const run of job.runs) { + const runErr = runErrorMap.get(run.runId) ?? "" + if (runErr != "") { + topLevelError = runErr + topLevelErrorTitle = "Last Job Run Error" + } + } + } + useEffect(() => { mounted.current = true fetchRunErrors() + fetchJobError() fetchRunDebugMessages() return () => { mounted.current = false @@ -167,9 +215,15 @@ export const SidebarTabJobRuns = ({ job, runInfoService, cordonService }: Sideba openSnackbar("Failed to cordon node " + node + ": " + errMsg, "error") } } - return (
+ {topLevelError !== "" ? ( + <> + {topLevelErrorTitle}: + + + ) : null} + Runs: {runsNewestFirst.map((run, i) => { return ( diff --git a/internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobYaml.tsx b/internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobYaml.tsx index 18346c312d4..6657a108b9b 100644 --- a/internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobYaml.tsx +++ b/internal/lookout/ui/src/components/lookoutV2/sidebar/SidebarTabJobYaml.tsx @@ -9,11 +9,11 @@ import { Job } from "models/lookoutV2Models" import styles from "./SidebarTabJobYaml.module.css" import { useCustomSnackbar } from "../../../hooks/useCustomSnackbar" import { useJobSpec } from "../../../hooks/useJobSpec" -import { IGetJobSpecService } from "../../../services/lookoutV2/GetJobSpecService" +import { IGetJobInfoService } from "../../../services/lookoutV2/GetJobInfoService" export interface SidebarTabJobYamlProps { job: Job - jobSpecService: IGetJobSpecService + jobSpecService: IGetJobInfoService } function toJobSubmissionYaml(jobSpec: Record): string { diff --git a/internal/lookout/ui/src/containers/lookoutV2/JobsTableContainer.test.tsx b/internal/lookout/ui/src/containers/lookoutV2/JobsTableContainer.test.tsx index 6f484316451..86ad1053f89 100644 --- a/internal/lookout/ui/src/containers/lookoutV2/JobsTableContainer.test.tsx +++ b/internal/lookout/ui/src/containers/lookoutV2/JobsTableContainer.test.tsx @@ -11,11 +11,11 @@ import FakeGroupJobsService from "services/lookoutV2/mocks/FakeGroupJobsService" import { v4 as uuidv4 } from "uuid" import { JobsTableContainer } from "./JobsTableContainer" -import { IGetJobSpecService } from "../../services/lookoutV2/GetJobSpecService" +import { IGetJobInfoService } from "../../services/lookoutV2/GetJobInfoService" import { IGetRunInfoService } from "../../services/lookoutV2/GetRunInfoService" import { ILogService } from "../../services/lookoutV2/LogService" import { FakeCordonService } from "../../services/lookoutV2/mocks/FakeCordonService" -import FakeGetJobSpecService from "../../services/lookoutV2/mocks/FakeGetJobSpecService" +import FakeGetJobInfoService from "../../services/lookoutV2/mocks/FakeGetJobInfoService" import { FakeGetRunInfoService } from "../../services/lookoutV2/mocks/FakeGetRunInfoService" import { FakeLogService } from "../../services/lookoutV2/mocks/FakeLogService" @@ -57,7 +57,7 @@ describe("JobsTableContainer", () => { let getJobsService: IGetJobsService, groupJobsService: IGroupJobsService, runErrorService: IGetRunInfoService, - jobSpecService: IGetJobSpecService, + jobSpecService: IGetJobInfoService, logService: ILogService, updateJobsService: UpdateJobsService @@ -69,7 +69,7 @@ describe("JobsTableContainer", () => { beforeEach(() => { setUp([]) runErrorService = new FakeGetRunInfoService(false) - jobSpecService = new FakeGetJobSpecService(false) + jobSpecService = new FakeGetJobInfoService(false) logService = new FakeLogService() localStorage.clear() diff --git a/internal/lookout/ui/src/containers/lookoutV2/JobsTableContainer.tsx b/internal/lookout/ui/src/containers/lookoutV2/JobsTableContainer.tsx index 86e1c7e21c2..0528697b588 100644 --- a/internal/lookout/ui/src/containers/lookoutV2/JobsTableContainer.tsx +++ b/internal/lookout/ui/src/containers/lookoutV2/JobsTableContainer.tsx @@ -71,7 +71,7 @@ import styles from "./JobsTableContainer.module.css" import { useCustomSnackbar } from "../../hooks/useCustomSnackbar" import { ICordonService } from "../../services/lookoutV2/CordonService" import { CustomViewsService } from "../../services/lookoutV2/CustomViewsService" -import { IGetJobSpecService } from "../../services/lookoutV2/GetJobSpecService" +import { IGetJobInfoService } from "../../services/lookoutV2/GetJobInfoService" import { ILogService } from "../../services/lookoutV2/LogService" import { getErrorMessage, waitMillis, CommandSpec } from "../../utils" import { EmptyInputError, ParseError } from "../../utils/resourceUtils" @@ -83,7 +83,7 @@ interface JobsTableContainerProps { groupJobsService: IGroupJobsService updateJobsService: UpdateJobsService runInfoService: IGetRunInfoService - jobSpecService: IGetJobSpecService + jobSpecService: IGetJobInfoService logService: ILogService cordonService: ICordonService debug: boolean diff --git a/internal/lookout/ui/src/hooks/useJobSpec.ts b/internal/lookout/ui/src/hooks/useJobSpec.ts index 0b1cc654401..68b4c72b681 100644 --- a/internal/lookout/ui/src/hooks/useJobSpec.ts +++ b/internal/lookout/ui/src/hooks/useJobSpec.ts @@ -2,7 +2,7 @@ import { useEffect, useState } from "react" import { OpenSnackbarFn } from "./useCustomSnackbar" import { Job } from "../models/lookoutV2Models" -import { IGetJobSpecService } from "../services/lookoutV2/GetJobSpecService" +import { IGetJobInfoService } from "../services/lookoutV2/GetJobInfoService" import { getErrorMessage, RequestStatus } from "../utils" export type JobSpecState = { @@ -12,7 +12,7 @@ export type JobSpecState = { export const useJobSpec = ( job: Job, - jobSpecService: IGetJobSpecService, + jobSpecService: IGetJobInfoService, openSnackbar: OpenSnackbarFn, ): JobSpecState => { const [jobSpecState, setJobSpecState] = useState({ diff --git a/internal/lookout/ui/src/index.tsx b/internal/lookout/ui/src/index.tsx index 3c014e03d96..da99728a2a9 100644 --- a/internal/lookout/ui/src/index.tsx +++ b/internal/lookout/ui/src/index.tsx @@ -11,11 +11,11 @@ import { App } from "./App" import { SubmitApi, Configuration as SubmitConfiguration } from "./openapi/armada" import reportWebVitals from "./reportWebVitals" import { CordonService } from "./services/lookoutV2/CordonService" -import { GetJobSpecService } from "./services/lookoutV2/GetJobSpecService" +import { GetJobInfoService } from "./services/lookoutV2/GetJobInfoService" import { GetRunInfoService } from "./services/lookoutV2/GetRunInfoService" import { LogService as V2LogService } from "./services/lookoutV2/LogService" import { FakeCordonService } from "./services/lookoutV2/mocks/FakeCordonService" -import FakeGetJobSpecService from "./services/lookoutV2/mocks/FakeGetJobSpecService" +import FakeGetJobInfoService from "./services/lookoutV2/mocks/FakeGetJobInfoService" import { FakeGetRunInfoService } from "./services/lookoutV2/mocks/FakeGetRunInfoService" import { FakeLogService } from "./services/lookoutV2/mocks/FakeLogService" import { getUIConfig } from "./utils" @@ -43,7 +43,7 @@ import "./index.css" const v2LogService = fakeDataEnabled ? new FakeLogService() : new V2LogService({ credentials: "include" }, uiConfig.binocularsBaseUrlPattern) - const v2JobSpecService = fakeDataEnabled ? new FakeGetJobSpecService() : new GetJobSpecService() + const v2JobSpecService = fakeDataEnabled ? new FakeGetJobInfoService() : new GetJobInfoService() const v2UpdateJobsService = new UpdateJobsService(submitApi) const v2UpdateJobSetsService = new UpdateJobSetsService(submitApi) const v2CordonService = fakeDataEnabled diff --git a/internal/lookout/ui/src/models/lookoutV2Models.ts b/internal/lookout/ui/src/models/lookoutV2Models.ts index 05782b37a5a..6cb92d8839d 100644 --- a/internal/lookout/ui/src/models/lookoutV2Models.ts +++ b/internal/lookout/ui/src/models/lookoutV2Models.ts @@ -8,6 +8,7 @@ export enum JobState { Failed = "FAILED", Cancelled = "CANCELLED", Preempted = "PREEMPTED", + Rejected = "REJECTED", } export const jobStateDisplayInfo: Record = { @@ -19,9 +20,16 @@ export const jobStateDisplayInfo: Record = { [JobState.Failed]: { displayName: "Failed", color: "#ff0000" }, [JobState.Cancelled]: { displayName: "Cancelled", color: "#999999" }, [JobState.Preempted]: { displayName: "Preempted", color: "#f8bbd0" }, + [JobState.Rejected]: { displayName: "Rejected", color: "#ef5350" }, } -const terminatedJobStates = new Set([JobState.Succeeded, JobState.Failed, JobState.Cancelled, JobState.Preempted]) +const terminatedJobStates = new Set([ + JobState.Succeeded, + JobState.Failed, + JobState.Cancelled, + JobState.Preempted, + JobState.Rejected, +]) export const isTerminatedJobState = (state: JobState) => terminatedJobStates.has(state) export enum JobRunState { diff --git a/internal/lookout/ui/src/services/lookoutV2/GetJobInfoService.ts b/internal/lookout/ui/src/services/lookoutV2/GetJobInfoService.ts new file mode 100644 index 00000000000..c20c4aa1be7 --- /dev/null +++ b/internal/lookout/ui/src/services/lookoutV2/GetJobInfoService.ts @@ -0,0 +1,33 @@ +export interface IGetJobInfoService { + getJobSpec(jobId: string, abortSignal?: AbortSignal): Promise> + getJobError(jobId: string, abortSignal?: AbortSignal): Promise +} + +export class GetJobInfoService implements IGetJobInfoService { + async getJobSpec(jobId: string, abortSignal?: AbortSignal): Promise> { + const response = await fetch("/api/v1/jobSpec", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ + jobId, + }), + signal: abortSignal, + }) + + const json = await response.json() + return json.job ?? {} + } + async getJobError(jobId: string, abortSignal?: AbortSignal): Promise { + const response = await fetch("/api/v1/jobError", { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ + jobId, + }), + signal: abortSignal, + }) + + const json = await response.json() + return json.errorString ?? "" + } +} diff --git a/internal/lookout/ui/src/services/lookoutV2/GetJobSpecService.ts b/internal/lookout/ui/src/services/lookoutV2/GetJobSpecService.ts deleted file mode 100644 index 53eddb97002..00000000000 --- a/internal/lookout/ui/src/services/lookoutV2/GetJobSpecService.ts +++ /dev/null @@ -1,19 +0,0 @@ -export interface IGetJobSpecService { - getJobSpec(jobId: string, abortSignal?: AbortSignal): Promise> -} - -export class GetJobSpecService implements IGetJobSpecService { - async getJobSpec(jobId: string, abortSignal?: AbortSignal): Promise> { - const response = await fetch("/api/v1/jobSpec", { - method: "POST", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify({ - jobId, - }), - signal: abortSignal, - }) - - const json = await response.json() - return json.job ?? {} - } -} diff --git a/internal/lookout/ui/src/services/lookoutV2/mocks/FakeGetJobSpecService.ts b/internal/lookout/ui/src/services/lookoutV2/mocks/FakeGetJobInfoService.ts similarity index 72% rename from internal/lookout/ui/src/services/lookoutV2/mocks/FakeGetJobSpecService.ts rename to internal/lookout/ui/src/services/lookoutV2/mocks/FakeGetJobInfoService.ts index 19ad8286ba3..21e6807bff8 100644 --- a/internal/lookout/ui/src/services/lookoutV2/mocks/FakeGetJobSpecService.ts +++ b/internal/lookout/ui/src/services/lookoutV2/mocks/FakeGetJobInfoService.ts @@ -1,7 +1,7 @@ import { simulateApiWait } from "../../../utils/fakeJobsUtils" -import { IGetJobSpecService } from "../GetJobSpecService" +import { IGetJobInfoService } from "../GetJobInfoService" -export default class FakeGetJobSpecService implements IGetJobSpecService { +export default class FakeGetJobInfoService implements IGetJobInfoService { constructor(private simulateApiWait = true) {} async getJobSpec(jobId: string, signal?: AbortSignal): Promise> { @@ -12,4 +12,14 @@ export default class FakeGetJobSpecService implements IGetJobSpecService { '{"id":"01gvgjbr1a8nvh5saz51j2nf8b","clientId":"01gvgjbr0jrzvschp2f8jhk6n5","jobSetId":"alices-project-0","queue":"alice","namespace":"default","owner":"anonymous","podSpec":{"containers":[{"name":"cpu-burner","image":"containerstack/alpine-stress:latest","command":["sh"],"args":["-c","echo FAILED && echo hello world > /dev/termination-log && exit 137"],"resources":{"limits":{"cpu":"200m","ephemeral-storage":"8Gi","memory":"128Mi","nvidia.com/gpu":"8"},"requests":{"cpu":"200m","ephemeral-storage":"8Gi","memory":"128Mi","nvidia.com/gpu":"8"}},"imagePullPolicy":"IfNotPresent"}],"restartPolicy":"Never","terminationGracePeriodSeconds":1,"tolerations":[{"key":"armadaproject.io/armada","operator":"Equal","value":"true","effect":"NoSchedule"},{"key":"armadaproject.io/pc-armada-default","operator":"Equal","value":"true","effect":"NoSchedule"}],"priorityClassName":"armada-default"},"created":"2023-03-14T17:23:21.29874Z"}', ) } + + async getJobError(jobId: string, abortSignal?: AbortSignal): Promise { + if (this.simulateApiWait) { + await simulateApiWait(abortSignal) + } + if (jobId === "doesnotexist") { + throw new Error("Failed to retrieve job because of reasons") + } + return Promise.resolve("something has gone wrong with this job") + } } diff --git a/internal/lookout/ui/src/utils/jobsTableFormatters.ts b/internal/lookout/ui/src/utils/jobsTableFormatters.ts index 8948ac5fea8..4a1d1df4abf 100644 --- a/internal/lookout/ui/src/utils/jobsTableFormatters.ts +++ b/internal/lookout/ui/src/utils/jobsTableFormatters.ts @@ -28,6 +28,8 @@ export const colorForJobState = (state?: JobState): string | undefined => { return pink[100] case JobState.Leased: return cyan[100] + case JobState.Rejected: + return red["400"] default: return purple["A100"] } diff --git a/tools.yaml b/tools.yaml index 9335b7e03b3..b8f058550eb 100644 --- a/tools.yaml +++ b/tools.yaml @@ -2,6 +2,7 @@ # TODO: Use latest goreleaser. After upgrading k8s.io packages. tools: - github.com/go-swagger/go-swagger/cmd/swagger@v0.29.0 +- github.com/golang/mock/mockgen@v1.6.0 - github.com/gordonklaus/ineffassign@v0.0.0-20210914165742-4cc7213b9bc8 - github.com/goreleaser/goreleaser@v1.20.0 - github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway@v1.16.0 From 3e7714ddf5f06b3cd263908a060a52e6cdf2a0d8 Mon Sep 17 00:00:00 2001 From: Dejan Zele Pejchev Date: Tue, 25 Jun 2024 10:12:14 +0200 Subject: [PATCH 6/9] hardcode armadactl version to fix integration tests issue during release (#3753) --- scripts/get-armadactl.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/scripts/get-armadactl.sh b/scripts/get-armadactl.sh index 52a881f79ff..daa2e9b762f 100755 --- a/scripts/get-armadactl.sh +++ b/scripts/get-armadactl.sh @@ -35,7 +35,9 @@ get_latest_release() { sed -E 's/.*"([^"]+)".*/\1/' } -VERSION=$(get_latest_release) +# TODO: This is commented out due to an issue with release process. Until we fix integration tests run on release, we will use the hardcoded version v0.8.2 +#VERSION=$(get_latest_release) +VERSION=v0.8.2 ARMADACTL_URL="https://github.com/armadaproject/armada/releases/download/$VERSION/armadactl_${VERSION#v}_${SYSTEM}_${ARCH}.${ARCHIVE_TYPE}" From d6568162e5f71a932a330e3747a820fdf7e0f249 Mon Sep 17 00:00:00 2001 From: JamesMurkin Date: Tue, 25 Jun 2024 10:07:55 +0100 Subject: [PATCH 7/9] [Bug] Fix ingestion pipeline (#3752) The API for working out if a timer is stopped / needs draining is more complicated than I understood. We could fix what we have to use it properly, but instead use simpler approach that isn't error prone It also means we can print how long it has been since we last received an event Signed-off-by: JamesMurkin Co-authored-by: Chris Martin --- internal/common/ingest/ingestion_pipeline.go | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/internal/common/ingest/ingestion_pipeline.go b/internal/common/ingest/ingestion_pipeline.go index 404372ef859..98d3775a58e 100644 --- a/internal/common/ingest/ingestion_pipeline.go +++ b/internal/common/ingest/ingestion_pipeline.go @@ -116,13 +116,10 @@ func (i *IngestionPipeline[T]) Run(ctx *armadacontext.Context) error { // Used to track if we are no longer receiving pulsar messages go func() { timeout := time.Minute * 2 - timer := time.NewTimer(timeout) + ticker := time.NewTicker(timeout) + lastReceivedTime := time.Now() loop: for { - if !timer.Stop() { - <-timer.C - } - timer.Reset(timeout) select { case msg, ok := <-pulsarMessageChannel: if !ok { @@ -130,8 +127,12 @@ func (i *IngestionPipeline[T]) Run(ctx *armadacontext.Context) error { break loop } pulsarMessages <- msg - case <-timer.C: - log.Infof("No pulsar message received in %s", timeout) + lastReceivedTime = time.Now() + case <-ticker.C: + timeSinceLastReceived := time.Now().Sub(lastReceivedTime) + if timeSinceLastReceived > timeout { + log.Infof("Last pulsar message received %s ago", timeSinceLastReceived) + } } } close(pulsarMessages) From 2b3e7f364a039ef6c26ac3d9ce4f33a3736f3b1f Mon Sep 17 00:00:00 2001 From: Chris Martin Date: Tue, 25 Jun 2024 10:25:42 +0100 Subject: [PATCH 8/9] Allow Limits To Be Greater Than Requests (#3748) * wip Signed-off-by: Chris Martin * add tests Signed-off-by: Chris Martin * lint Signed-off-by: Chris Martin * lint Signed-off-by: Chris Martin * lint Signed-off-by: Chris Martin * code review Signed-off-by: Chris Martin --------- Signed-off-by: Chris Martin Co-authored-by: Chris Martin --- internal/armada/configuration/types.go | 3 + .../submit/validation/submit_request.go | 40 ++++++----- .../submit/validation/submit_request_test.go | 69 +++++++++++++++++-- internal/scheduler/jobdb/reconciliation.go | 13 ---- 4 files changed, 90 insertions(+), 35 deletions(-) diff --git a/internal/armada/configuration/types.go b/internal/armada/configuration/types.go index b1a3c5c9d5f..4cacfc986d8 100644 --- a/internal/armada/configuration/types.go +++ b/internal/armada/configuration/types.go @@ -125,6 +125,9 @@ type SubmissionConfig struct { // will have activeDeadlineSeconds set to 1. // Trumps DefaultActiveDeadline. DefaultActiveDeadlineByResourceRequest map[string]time.Duration + // Maximum ratio of limits:requests per resource. Jobs who have a higher limits:resource ratio than this will be rejected. + // Any resource type missing from this map will default to 1.0. + MaxOversubscriptionByResourceRequest map[string]float64 } // TODO: we can probably just typedef this to map[string]string diff --git a/internal/armada/submit/validation/submit_request.go b/internal/armada/submit/validation/submit_request.go index 04591aa0981..0d30c871952 100644 --- a/internal/armada/submit/validation/submit_request.go +++ b/internal/armada/submit/validation/submit_request.go @@ -3,8 +3,6 @@ package validation import ( "fmt" - v1 "k8s.io/api/core/v1" - "github.com/pkg/errors" "k8s.io/component-helpers/scheduling/corev1/nodeaffinity" @@ -210,28 +208,36 @@ func validatePriorityClasses(j *api.JobSubmitRequestItem, config configuration.S // Ensures that the JobSubmitRequestItem's limits and requests are equal. // Also checks that any resources defined are above minimum values set in config func validateResources(j *api.JobSubmitRequestItem, config configuration.SubmissionConfig) error { - // Function which tells us if two k8s resource lists contain exactly the same elements - resourceListEquals := func(a v1.ResourceList, b v1.ResourceList) bool { - if len(a) != len(b) { - return false - } - for k, v := range a { - if v != b[k] { - return false - } - } - return true - } - spec := j.GetMainPodSpec() + maxOversubscriptionByResource := config.MaxOversubscriptionByResourceRequest + if maxOversubscriptionByResource == nil { + maxOversubscriptionByResource = map[string]float64{} + } for _, container := range spec.Containers { if len(container.Resources.Requests) == 0 && len(container.Resources.Requests) == 0 { return fmt.Errorf("container %v has no resources specified", container.Name) } - if !resourceListEquals(container.Resources.Requests, container.Resources.Limits) { - return fmt.Errorf("container %v does not have resource request and limit equal (this is currently not supported)", container.Name) + if len(container.Resources.Requests) != len(container.Resources.Limits) { + return fmt.Errorf("container %v defines different resources for requests and limits", container.Name) + } + + for resource, request := range container.Resources.Requests { + limit, ok := container.Resources.Limits[resource] + if !ok { + return fmt.Errorf("container %v defines %s for requests but not limits", container.Name, resource) + } + if limit.MilliValue() < request.MilliValue() { + return fmt.Errorf("container %v defines %s with limits smaller than requests", container.Name, resource) + } + maxOversubscription, ok := maxOversubscriptionByResource[resource.String()] + if !ok { + maxOversubscription = 1.0 + } + if float64(limit.MilliValue()) > maxOversubscription*float64(request.MilliValue()) { + return fmt.Errorf("container %v defines %s with limits great than %.2f*requests", container.Name, resource, maxOversubscription) + } } for rc, containerRsc := range container.Resources.Requests { diff --git a/internal/armada/submit/validation/submit_request_test.go b/internal/armada/submit/validation/submit_request_test.go index 396cdbfe41a..94a243559ca 100644 --- a/internal/armada/submit/validation/submit_request_test.go +++ b/internal/armada/submit/validation/submit_request_test.go @@ -788,9 +788,10 @@ func TestValidateResources(t *testing.T) { } tests := map[string]struct { - req *api.JobSubmitRequestItem - minJobResources v1.ResourceList - expectSuccess bool + req *api.JobSubmitRequestItem + minJobResources v1.ResourceList + maxOversubscriptionByResourceRequest map[string]float64 + expectSuccess bool }{ "Requests Missing": { req: reqFromContainer(v1.Container{ @@ -808,13 +809,67 @@ func TestValidateResources(t *testing.T) { }), expectSuccess: false, }, - "Requests and limits different": { + "Limits Less Than Request": { + req: reqFromContainer(v1.Container{ + Resources: v1.ResourceRequirements{ + Requests: twoCpu, + Limits: oneCpu, + }, + }), + expectSuccess: false, + maxOversubscriptionByResourceRequest: map[string]float64{ + "cpu": 2.0, + }, + }, + "Limits And Requests specify different resources": { + req: reqFromContainer(v1.Container{ + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("1"), + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("1"), + v1.ResourceMemory: resource.MustParse("2Gi"), + }, + }, + }), + expectSuccess: false, + maxOversubscriptionByResourceRequest: map[string]float64{ + "cpu": 2.0, + "memory": 2.0, + }, + }, + "Requests and limits different with MaxResourceOversubscriptionByResourceRequest undefined": { + req: reqFromContainer(v1.Container{ + Resources: v1.ResourceRequirements{ + Requests: oneCpu, + Limits: twoCpu, + }, + }), + expectSuccess: false, + }, + "Requests and limits different, passes MaxResourceOversubscriptionByResourceRequest": { req: reqFromContainer(v1.Container{ Resources: v1.ResourceRequirements{ Requests: oneCpu, Limits: twoCpu, }, }), + maxOversubscriptionByResourceRequest: map[string]float64{ + "cpu": 2.0, + }, + expectSuccess: true, + }, + "Requests and limits different, fails MaxResourceOversubscriptionByResourceRequest": { + req: reqFromContainer(v1.Container{ + Resources: v1.ResourceRequirements{ + Requests: oneCpu, + Limits: twoCpu, + }, + }), + maxOversubscriptionByResourceRequest: map[string]float64{ + "cpu": 1.9, + }, expectSuccess: false, }, "Request and limits the same": { @@ -846,7 +901,11 @@ func TestValidateResources(t *testing.T) { } for name, tc := range tests { t.Run(name, func(t *testing.T) { - err := validateResources(tc.req, configuration.SubmissionConfig{}) + submitConfg := configuration.SubmissionConfig{} + if tc.maxOversubscriptionByResourceRequest != nil { + submitConfg.MaxOversubscriptionByResourceRequest = tc.maxOversubscriptionByResourceRequest + } + err := validateResources(tc.req, submitConfg) if tc.expectSuccess { assert.NoError(t, err) } else { diff --git a/internal/scheduler/jobdb/reconciliation.go b/internal/scheduler/jobdb/reconciliation.go index 07e8cd464e6..c8f0dd21a56 100644 --- a/internal/scheduler/jobdb/reconciliation.go +++ b/internal/scheduler/jobdb/reconciliation.go @@ -3,7 +3,6 @@ package jobdb import ( "github.com/gogo/protobuf/proto" "github.com/pkg/errors" - v1 "k8s.io/api/core/v1" armadamath "github.com/armadaproject/armada/internal/common/math" armadaslices "github.com/armadaproject/armada/internal/common/slices" @@ -250,18 +249,6 @@ func (jobDb *JobDb) schedulerJobFromDatabaseJob(dbJob *database.Job) (*Job, erro return nil, errors.Wrapf(err, "error unmarshalling scheduling info for job %s", dbJob.JobID) } - // Modify the resource requirements so that limits and requests point to the same object. This saves memory because - // we no longer have to store both objects, while it is safe because at the api we assert that limits and requests - // must be equal. Long term this is undesirable as if we ever want to have limits != requests this trick will not work. - // instead we should find a more efficient mechanism for representing this data - if schedulingInfo.GetPodRequirements() != nil { - resourceRequirements := schedulingInfo.GetPodRequirements().GetResourceRequirements() - schedulingInfo.GetPodRequirements().ResourceRequirements = v1.ResourceRequirements{ - Limits: resourceRequirements.Limits, - Requests: resourceRequirements.Limits, - } - } - job, err := jobDb.NewJob( dbJob.JobID, dbJob.JobSet, From 09e1a124a405ca92c530e7225e8221f763a4d6c5 Mon Sep 17 00:00:00 2001 From: JamesMurkin Date: Tue, 25 Jun 2024 13:29:14 +0100 Subject: [PATCH 9/9] [Bug] Fix ingestion pipeline event counting (#3754) The function that counts the number of events per event sequence is wrong as it is actually counting the number of event sequences (each of which can contain many events) This just makes us actually count all the events in all event sequences Signed-off-by: JamesMurkin --- internal/common/ingest/ingestion_pipeline.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/internal/common/ingest/ingestion_pipeline.go b/internal/common/ingest/ingestion_pipeline.go index 98d3775a58e..9b590d1a7f4 100644 --- a/internal/common/ingest/ingestion_pipeline.go +++ b/internal/common/ingest/ingestion_pipeline.go @@ -150,7 +150,13 @@ func (i *IngestionPipeline[T]) Run(ctx *armadacontext.Context) error { // Batch up messages batchedEventSequences := make(chan *EventSequencesWithIds) - eventCounterFunc := func(seq *EventSequencesWithIds) int { return len(seq.EventSequences) } + eventCounterFunc := func(seq *EventSequencesWithIds) int { + totalEvents := 0 + for _, seq := range seq.EventSequences { + totalEvents += len(seq.Events) + } + return totalEvents + } eventPublisherFunc := func(b []*EventSequencesWithIds) { batchedEventSequences <- combineEventSequences(b) } batcher := NewBatcher[*EventSequencesWithIds](eventSequences, i.pulsarBatchSize, i.pulsarBatchDuration, eventCounterFunc, eventPublisherFunc) go func() {