diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index 7a50745a933ae..fd0684d666d64 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -40,7 +40,7 @@ steps: timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["7.17.15", "8.10.5", "8.11.0", "8.12.0"] + BWC_VERSION: ["7.17.15", "8.11.1", "8.12.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index 476a59eca58e5..3043872845779 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -1681,8 +1681,8 @@ steps: env: BWC_VERSION: 8.10.4 - - label: "{{matrix.image}} / 8.10.5 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.10.5 + - label: "{{matrix.image}} / 8.11.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.11.0 timeout_in_minutes: 300 matrix: setup: @@ -1695,10 +1695,10 @@ steps: machineType: custom-16-32768 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.10.5 + BWC_VERSION: 8.11.0 - - label: "{{matrix.image}} / 8.11.0 / packaging-tests-upgrade" - command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.11.0 + - label: "{{matrix.image}} / 8.11.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.11.1 timeout_in_minutes: 300 matrix: setup: @@ -1711,7 +1711,7 @@ steps: machineType: custom-16-32768 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.11.0 + BWC_VERSION: 8.11.1 - label: "{{matrix.image}} / 8.12.0 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.12.0 diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 174a8a3b8c3ec..e1ea27c2468e3 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -1032,8 +1032,8 @@ steps: buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.10.4 - - label: 8.10.5 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.10.5#bwcTest + - label: 8.11.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.11.0#bwcTest timeout_in_minutes: 300 agents: provider: gcp @@ -1041,9 +1041,9 @@ steps: machineType: custom-32-98304 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.10.5 - - label: 8.11.0 / bwc - command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.11.0#bwcTest + BWC_VERSION: 8.11.0 + - label: 8.11.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.11.1#bwcTest timeout_in_minutes: 300 agents: provider: gcp @@ -1051,7 +1051,7 @@ steps: machineType: custom-32-98304 buildDirectory: /dev/shm/bk env: - BWC_VERSION: 8.11.0 + BWC_VERSION: 8.11.1 - label: 8.12.0 / bwc command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.12.0#bwcTest timeout_in_minutes: 300 diff --git a/.buildkite/pull-requests.json b/.buildkite/pull-requests.json index 456fce6aba519..b59bdc79ad293 100644 --- a/.buildkite/pull-requests.json +++ b/.buildkite/pull-requests.json @@ -12,9 +12,6 @@ "build_on_commit": true, "build_on_comment": true, "trigger_comment_regex": "run\\W+elasticsearch-ci.+", - "labels": [ - "buildkite-opt-in" - ], "cancel_intermediate_builds": true, "cancel_intermediate_builds_on_comment": false }, diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 988e7d1e0b453..688d84e1c49c8 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -102,6 +102,6 @@ BWC_VERSION: - "8.10.2" - "8.10.3" - "8.10.4" - - "8.10.5" - "8.11.0" + - "8.11.1" - "8.12.0" diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+build-benchmark-part1.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+build-benchmark-part1.yml index a3f1345a07f13..173c8dbf805c0 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+build-benchmark-part1.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+build-benchmark-part1.yml @@ -23,7 +23,8 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/build-bench.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/build-bench.*' github-hooks: true status-context: elasticsearch-ci/build-benchmark-part1 cancel-builds-on-update: true @@ -32,21 +33,17 @@ excluded-regions: - ^docs/.* - ^x-pack/docs/.* - white-list-labels: - - 'build-benchmark' - black-list-labels: - - 'buildkite-opt-in' builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA JAVA8_HOME=$HOME/.java/java8 JAVA11_HOME=$HOME/.java/java11 - shell: | - #!/usr/local/bin/runbld --redirect-stderr - $WORKSPACE/.ci/scripts/run-gradle.sh :build-tools-internal:bootstrapPerformanceTests - $WORKSPACE/.ci/scripts/install-gradle-profiler.sh - $WORKSPACE/.ci/scripts/run-gradle-profiler.sh --benchmark --scenario-file build-tools-internal/build/performanceTests/elasticsearch-build-benchmark-part1.scenarios --project-dir . --output-dir profile-out - mkdir $WORKSPACE/build - tar -czf $WORKSPACE/build/${BUILD_NUMBER}.tar.bz2 profile-out + #!/usr/local/bin/runbld --redirect-stderr + $WORKSPACE/.ci/scripts/run-gradle.sh :build-tools-internal:bootstrapPerformanceTests + $WORKSPACE/.ci/scripts/install-gradle-profiler.sh + $WORKSPACE/.ci/scripts/run-gradle-profiler.sh --benchmark --scenario-file build-tools-internal/build/performanceTests/elasticsearch-build-benchmark-part1.scenarios --project-dir . --output-dir profile-out + mkdir $WORKSPACE/build + tar -czf $WORKSPACE/build/${BUILD_NUMBER}.tar.bz2 profile-out diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+build-benchmark-part2.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+build-benchmark-part2.yml index f1b11ab1ec75a..5f25c9153040e 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+build-benchmark-part2.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+build-benchmark-part2.yml @@ -23,7 +23,8 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/build-bench.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/build-bench.*' github-hooks: true status-context: elasticsearch-ci/build-benchmark-part2 cancel-builds-on-update: true @@ -32,21 +33,17 @@ excluded-regions: - ^docs/.* - ^x-pack/docs/.* - white-list-labels: - - 'build-benchmark' - black-list-labels: - - 'buildkite-opt-in' builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA JAVA8_HOME=$HOME/.java/java8 JAVA11_HOME=$HOME/.java/java11 - shell: | - #!/usr/local/bin/runbld --redirect-stderr - $WORKSPACE/.ci/scripts/run-gradle.sh :build-tools-internal:bootstrapPerformanceTests - $WORKSPACE/.ci/scripts/install-gradle-profiler.sh - $WORKSPACE/.ci/scripts/run-gradle-profiler.sh --benchmark --scenario-file build-tools-internal/build/performanceTests/elasticsearch-build-benchmark-part2.scenarios --project-dir . --output-dir profile-out - mkdir $WORKSPACE/build - tar -czf $WORKSPACE/build/${BUILD_NUMBER}.tar.bz2 profile-out + #!/usr/local/bin/runbld --redirect-stderr + $WORKSPACE/.ci/scripts/run-gradle.sh :build-tools-internal:bootstrapPerformanceTests + $WORKSPACE/.ci/scripts/install-gradle-profiler.sh + $WORKSPACE/.ci/scripts/run-gradle-profiler.sh --benchmark --scenario-file build-tools-internal/build/performanceTests/elasticsearch-build-benchmark-part2.scenarios --project-dir . --output-dir profile-out + mkdir $WORKSPACE/build + tar -czf $WORKSPACE/build/${BUILD_NUMBER}.tar.bz2 profile-out diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+bwc-snapshots-windows.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+bwc-snapshots-windows.yml index c0ed9bf998159..1a0652204b2f2 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+bwc-snapshots-windows.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+bwc-snapshots-windows.yml @@ -16,7 +16,8 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/bwc-snapshots-windows.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/bwc-snapshots-windows.*' github-hooks: true status-context: elasticsearch-ci/bwc-snapshots-windows cancel-builds-on-update: true @@ -25,11 +26,6 @@ excluded-regions: - ^docs/.* - ^x-pack/docs/.* - white-list-labels: - - 'test-windows' - black-list-labels: - - '>test-mute' - - 'buildkite-opt-in' axes: - axis: type: slave @@ -42,7 +38,7 @@ name: "BWC_VERSION" builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$USERPROFILE\\.java\\$ES_BUILD_JAVA JAVA11_HOME=$USERPROFILE\\.java\\java11 diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+bwc-snapshots.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+bwc-snapshots.yml index 676f5f6f629b7..9a20115a72f1c 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+bwc-snapshots.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+bwc-snapshots.yml @@ -16,17 +16,14 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/bwc.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/bwc.*' github-hooks: true status-context: elasticsearch-ci/bwc cancel-builds-on-update: true excluded-regions: - ^docs/.* - ^x-pack/docs/.* - black-list-labels: - - '>test-mute' - - 'test-full-bwc' - - 'buildkite-opt-in' axes: - axis: type: slave @@ -39,7 +36,7 @@ name: "BWC_VERSION" builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA JAVA8_HOME=$HOME/.java/java8 diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+cloud-deploy.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+cloud-deploy.yml index 24548954d8a10..a6f42c147dbeb 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+cloud-deploy.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+cloud-deploy.yml @@ -15,7 +15,8 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/cloud-deploy.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/cloud-deploy.*' github-hooks: true status-context: elasticsearch-ci/cloud-deploy cancel-builds-on-update: true @@ -24,13 +25,9 @@ excluded-regions: - ^docs/.* - ^x-pack/docs/.* - white-list-labels: - - 'cloud-deploy' - black-list-labels: - - 'buildkite-opt-in' builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA - shell: | diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+docs-check.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+docs-check.yml index c766b4379a1f6..58b273de2beb9 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+docs-check.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+docs-check.yml @@ -14,19 +14,17 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/docs-check.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/docs-check.*' github-hooks: true status-context: elasticsearch-ci/docs-check cancel-builds-on-update: true included-regions: - ^docs/.* - ^x-pack/docs/.* - black-list-labels: - - '>test-mute' - - 'buildkite-opt-in' builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA JAVA8_HOME=$HOME/.java/java8 diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+eql-correctness.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+eql-correctness.yml index 0b9eea62ad9bf..c1789e3b8595a 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+eql-correctness.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+eql-correctness.yml @@ -14,7 +14,8 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/eql-correctness.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/eql-correctness.*' github-hooks: true status-context: elasticsearch-ci/eql-correctness cancel-builds-on-update: true @@ -23,12 +24,9 @@ excluded-regions: - ^docs/.* - ^x-pack/docs/.* - black-list-labels: - - '>test-mute' - - 'buildkite-opt-in' builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA - shell: | diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+example-plugins.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+example-plugins.yml index 320a9c6176d5f..339fcd17ec77c 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+example-plugins.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+example-plugins.yml @@ -14,7 +14,8 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/example-plugins.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/example-plugins.*' github-hooks: true status-context: elasticsearch-ci/example-plugins cancel-builds-on-update: true @@ -23,11 +24,9 @@ - build-tools/.* - build-tools-internal/.* - plugins/examples/.* - black-list-labels: - - 'buildkite-opt-in' builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA JAVA8_HOME=$HOME/.java/java8 diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+full-bwc.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+full-bwc.yml index 2a7920e4bae89..4bb38a810e8f1 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+full-bwc.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+full-bwc.yml @@ -16,18 +16,14 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/full-bwc.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/full-bwc.*' github-hooks: true status-context: elasticsearch-ci/full-bwc cancel-builds-on-update: true excluded-regions: - ^docs/.* - ^x-pack/docs/.* - white-list-labels: - - 'test-full-bwc' - black-list-labels: - - '>test-mute' - - 'buildkite-opt-in' axes: - axis: type: slave @@ -40,7 +36,7 @@ name: "BWC_VERSION" builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA JAVA8_HOME=$HOME/.java/java8 diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix-sample.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix-sample.yml index 2d4f372142512..23d94e665f8a3 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix-sample.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix-sample.yml @@ -15,7 +15,8 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/packaging-tests-unix-sample.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/packaging-tests-unix-sample.*' github-hooks: true status-context: elasticsearch-ci/packaging-tests-unix-sample cancel-builds-on-update: true @@ -24,10 +25,6 @@ excluded-regions: - ^docs/.* - ^x-pack/docs/.* - black-list-labels: - - ">test-mute" - - ":Delivery/Packaging" - - "buildkite-opt-in" axes: - axis: type: label-expression diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix.yml index af1d3f493eeb0..901f7bcac3caa 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix.yml @@ -15,7 +15,8 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/packaging-tests-unix.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/packaging-tests-unix.*' github-hooks: true status-context: elasticsearch-ci/packaging-tests-unix cancel-builds-on-update: true @@ -24,11 +25,6 @@ excluded-regions: - ^docs/.* - ^x-pack/docs/.* - white-list-labels: - - ":Delivery/Packaging" - black-list-labels: - - ">test-mute" - - "buildkite-opt-in" axes: - axis: type: label-expression diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-nojdk.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-nojdk.yml index ea4097b1a0b93..c39326380fdaf 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-nojdk.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-nojdk.yml @@ -17,7 +17,8 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/packaging-tests-windows.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/packaging-tests-windows.*' github-hooks: true status-context: elasticsearch-ci/packaging-tests-windows cancel-builds-on-update: true @@ -28,11 +29,6 @@ excluded-regions: - ^docs/.* - ^x-pack/docs/.* - white-list-labels: - - ':Delivery/Packaging' - black-list-labels: - - '>test-mute' - - 'buildkite-opt-in' axes: - axis: type: label-expression @@ -46,11 +42,11 @@ type: user-defined name: PACKAGING_TASK values: - - 'default-windows-archive' - - 'default-windows-archive-no-jdk' + - "default-windows-archive" + - "default-windows-archive-no-jdk" builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$USERPROFILE\\.java\\$ES_BUILD_JAVA - batch: | diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample-nojdk.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample-nojdk.yml index ec644445ef8de..35705f7e759b1 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample-nojdk.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample-nojdk.yml @@ -17,7 +17,8 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/packaging-tests-windows-sample.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/packaging-tests-windows-sample.*' github-hooks: true status-context: elasticsearch-ci/packaging-tests-windows-sample cancel-builds-on-update: true @@ -28,10 +29,6 @@ excluded-regions: - ^docs/.* - ^x-pack/docs/.* - black-list-labels: - - '>test-mute' - - ':Delivery/Packaging' - - 'buildkite-opt-in' axes: - axis: type: label-expression @@ -42,11 +39,11 @@ type: user-defined name: PACKAGING_TASK values: - - 'default-windows-archive' - - 'default-windows-archive-no-jdk' + - "default-windows-archive" + - "default-windows-archive-no-jdk" builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$USERPROFILE\\.java\\$ES_BUILD_JAVA - batch: | diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample.yml index 242e137cb1d83..8a4eff2d30822 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample.yml @@ -17,7 +17,8 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/packaging-tests-windows-sample.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/packaging-tests-windows-sample.*' github-hooks: true status-context: elasticsearch-ci/packaging-tests-windows-sample cancel-builds-on-update: true @@ -27,10 +28,6 @@ excluded-regions: - ^docs/.* - ^x-pack/docs/.* - black-list-labels: - - '>test-mute' - - ':Delivery/Packaging' - - 'buildkite-opt-in' axes: - axis: type: label-expression @@ -41,10 +38,10 @@ type: user-defined name: PACKAGING_TASK values: - - 'default-windows-archive' + - "default-windows-archive" builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$USERPROFILE\\.java\\$ES_BUILD_JAVA - batch: | diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows.yml index a2ffc7b4050ec..d109477620386 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows.yml @@ -17,7 +17,8 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/packaging-tests-windows.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/packaging-tests-windows.*' github-hooks: true status-context: elasticsearch-ci/packaging-tests-windows cancel-builds-on-update: true @@ -28,11 +29,6 @@ excluded-regions: - ^docs/.* - ^x-pack/docs/.* - white-list-labels: - - ':Delivery/Packaging' - black-list-labels: - - '>test-mute' - - 'buildkite-opt-in' axes: - axis: type: label-expression @@ -46,10 +42,10 @@ type: user-defined name: PACKAGING_TASK values: - - 'default-windows-archive' + - "default-windows-archive" builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$USERPROFILE\\.java\\$ES_BUILD_JAVA - batch: | diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-upgrade-tests.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-upgrade-tests.yml index 19ed5398e3e1d..0cc14224375fb 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-upgrade-tests.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-upgrade-tests.yml @@ -16,7 +16,8 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/packaging-upgrade-tests.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/packaging-upgrade-tests.*' github-hooks: true status-context: elasticsearch-ci/packaging-upgrade-tests cancel-builds-on-update: true @@ -25,11 +26,6 @@ excluded-regions: - ^docs/.* - ^x-pack/docs/.* - white-list-labels: - - ":Delivery/Packaging" - black-list-labels: - - ">test-mute" - - "buildkite-opt-in" axes: - axis: type: label-expression diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-fips.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-fips.yml index a661230d3b93b..aaeeed2f0d52b 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-fips.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-fips.yml @@ -14,7 +14,8 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/part-1-fips.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/part-1-fips.*' github-hooks: true status-context: elasticsearch-ci/part-1-fips cancel-builds-on-update: true @@ -23,15 +24,10 @@ excluded-regions: - ^docs/.* - ^x-pack/docs/.* - white-list-labels: - - 'Team:Security' - black-list-labels: - - '>test-mute' - - 'buildkite-opt-in' builders: - inject: # Use FIPS-specific Java versions - properties-file: '.ci/java-versions-fips.properties' + properties-file: ".ci/java-versions-fips.properties" properties-content: | JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA JAVA16_HOME=$HOME/.java/openjdk16 diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-windows.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-windows.yml index d7afdd0ac3277..8b348f94026e0 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-windows.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-windows.yml @@ -15,7 +15,8 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/part-1-windows.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/part-1-windows.*' github-hooks: true status-context: elasticsearch-ci/part-1-windows cancel-builds-on-update: true @@ -24,14 +25,9 @@ excluded-regions: - ^docs/.* - ^x-pack/docs/.* - white-list-labels: - - 'test-windows' - black-list-labels: - - '>test-mute' - - 'buildkite-opt-in' builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$USERPROFILE\\.java\\$ES_BUILD_JAVA JAVA11_HOME=$USERPROFILE\\.java\\java11 diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-fips.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-fips.yml index 913820709dabc..11d168d7567d9 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-fips.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-fips.yml @@ -14,7 +14,8 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/part-2-fips.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/part-2-fips.*' github-hooks: true status-context: elasticsearch-ci/part-2-fips cancel-builds-on-update: true @@ -23,15 +24,10 @@ excluded-regions: - ^docs/.* - ^x-pack/docs/.* - white-list-labels: - - 'Team:Security' - black-list-labels: - - '>test-mute' - - 'buildkite-opt-in' builders: - inject: # Use FIPS-specific Java versions - properties-file: '.ci/java-versions-fips.properties' + properties-file: ".ci/java-versions-fips.properties" properties-content: | JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA JAVA16_HOME=$HOME/.java/openjdk16 diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-windows.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-windows.yml index ae590872be16e..927117cc3bced 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-windows.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-windows.yml @@ -15,7 +15,8 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/part-2-windows.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/part-2-windows.*' github-hooks: true status-context: elasticsearch-ci/part-2-windows cancel-builds-on-update: true @@ -24,14 +25,9 @@ excluded-regions: - ^docs/.* - ^x-pack/docs/.* - white-list-labels: - - 'test-windows' - black-list-labels: - - '>test-mute' - - 'buildkite-opt-in' builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$USERPROFILE\\.java\\$ES_BUILD_JAVA JAVA11_HOME=$USERPROFILE\\.java\\java11 diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3-fips.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3-fips.yml index 6bf6544d40310..3b7984ecbdc43 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3-fips.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3-fips.yml @@ -14,7 +14,8 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/part-3-fips.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/part-3-fips.*' github-hooks: true status-context: elasticsearch-ci/part-3-fips cancel-builds-on-update: true @@ -24,15 +25,10 @@ excluded-regions: - ^docs/.* - ^x-pack/docs/.* - white-list-labels: - - 'Team:Security' - black-list-labels: - - '>test-mute' - - 'buildkite-opt-in' builders: - inject: # Use FIPS-specific Java versions - properties-file: '.ci/java-versions-fips.properties' + properties-file: ".ci/java-versions-fips.properties" properties-content: | JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA JAVA16_HOME=$HOME/.java/openjdk16 diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3-windows.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3-windows.yml index 58bad17954b24..7e835b85015ba 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3-windows.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3-windows.yml @@ -15,7 +15,8 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/part-3-windows.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/part-3-windows.*' github-hooks: true status-context: elasticsearch-ci/part-3-windows cancel-builds-on-update: true @@ -25,14 +26,9 @@ excluded-regions: - ^docs/.* - ^x-pack/docs/.* - white-list-labels: - - 'test-windows' - black-list-labels: - - '>test-mute' - - 'buildkite-opt-in' builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$USERPROFILE\\.java\\$ES_BUILD_JAVA JAVA11_HOME=$USERPROFILE\\.java\\java11 diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3.yml index 0158b909903b4..e306657693f5f 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3.yml @@ -14,22 +14,20 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/part-3.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/part-3.*' github-hooks: true status-context: elasticsearch-ci/part-3 cancel-builds-on-update: true excluded-regions: - ^docs/.* - ^x-pack/docs/.* - black-list-labels: - - '>test-mute' - - 'buildkite-opt-in' black-list-target-branches: - 6.8 - 7.17 builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA JAVA8_HOME=$HOME/.java/java8 diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+precommit.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+precommit.yml index 1267b6a21778e..3994164fba0f3 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+precommit.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+precommit.yml @@ -14,17 +14,14 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/precommit.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/precommit.*' github-hooks: true status-context: elasticsearch-ci/precommit cancel-builds-on-update: true - white-list-labels: - - '>test-mute' - black-list-labels: - - 'buildkite-opt-in' builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA JAVA8_HOME=$HOME/.java/java8 diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+release-tests.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+release-tests.yml index 1ab6bd1ce0e5d..a86496d7199f5 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+release-tests.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+release-tests.yml @@ -16,23 +16,20 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/release-tests.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/release-tests.*' github-hooks: true status-context: elasticsearch-ci/release-tests cancel-builds-on-update: true excluded-regions: - ^docs/.* - ^x-pack/docs/.* - white-list-labels: - - 'test-release' - black-list-labels: - - 'buildkite-opt-in' black-list-target-branches: - 7.15 - 6.8 builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA JAVA8_HOME=$HOME/.java/java8 diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+rest-compatibility.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+rest-compatibility.yml index 216f8ceae2078..0ed86851c7f33 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+rest-compatibility.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+rest-compatibility.yml @@ -14,7 +14,8 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/rest-compatibility.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/rest-compatibility.*' github-hooks: true status-context: elasticsearch-ci/rest-compatibility cancel-builds-on-update: true @@ -26,12 +27,9 @@ excluded-regions: - ^docs/.* - ^x-pack/docs/.* - black-list-labels: - - '>test-mute' - - 'buildkite-opt-in' builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA JAVA8_HOME=$HOME/.java/java8 diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index 4246f34222b21..fe40ec8fd1d29 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,5 +1,4 @@ BWC_VERSION: - "7.17.15" - - "8.10.5" - - "8.11.0" + - "8.11.1" - "8.12.0" diff --git a/.ci/templates.t/pull-request-gradle-unix.yml b/.ci/templates.t/pull-request-gradle-unix.yml index c09e64c56f32d..7c0711a4e3a97 100644 --- a/.ci/templates.t/pull-request-gradle-unix.yml +++ b/.ci/templates.t/pull-request-gradle-unix.yml @@ -14,19 +14,17 @@ org-list: - elastic allow-whitelist-orgs-as-admins: true - trigger-phrase: '.*run\W+elasticsearch-ci/{pr-job}.*' + only-trigger-phrase: true + trigger-phrase: '.*run\W+jenkins\W+elasticsearch-ci/{pr-job}.*' github-hooks: true status-context: elasticsearch-ci/{pr-job} cancel-builds-on-update: true excluded-regions: - ^docs/.* - ^x-pack/docs/.* - black-list-labels: - - '>test-mute' - - 'buildkite-opt-in' builders: - inject: - properties-file: '.ci/java-versions.properties' + properties-file: ".ci/java-versions.properties" properties-content: | JAVA_HOME=$HOME/.java/$ES_BUILD_JAVA JAVA8_HOME=$HOME/.java/java8 diff --git a/benchmarks/build.gradle b/benchmarks/build.gradle index 7cfa23e69ff96..3519434e07d42 100644 --- a/benchmarks/build.gradle +++ b/benchmarks/build.gradle @@ -8,7 +8,8 @@ import org.elasticsearch.gradle.internal.info.BuildParams * Side Public License, v 1. */ -apply plugin: 'elasticsearch.java' +apply plugin: org.elasticsearch.gradle.internal.ElasticsearchJavaBasePlugin +apply plugin: 'java-library' apply plugin: 'application' application { diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/ShardsAvailabilityHealthIndicatorBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/ShardsAvailabilityHealthIndicatorBenchmark.java index 30b37c018af01..ef834fad424e3 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/ShardsAvailabilityHealthIndicatorBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/ShardsAvailabilityHealthIndicatorBenchmark.java @@ -21,7 +21,7 @@ import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.DataTier; -import org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService; +import org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; diff --git a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/DocsTestPlugin.groovy b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/DocsTestPlugin.groovy index 874141f2135ad..38b4cb499eeb9 100644 --- a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/DocsTestPlugin.groovy +++ b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/DocsTestPlugin.groovy @@ -12,6 +12,7 @@ import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.internal.test.rest.CopyRestApiTask import org.elasticsearch.gradle.internal.test.rest.CopyRestTestsTask +import org.gradle.api.Action import org.gradle.api.Plugin import org.gradle.api.Project import org.gradle.api.file.Directory @@ -61,16 +62,24 @@ class DocsTestPlugin implements Plugin { group 'Docs' description 'List each snippet' defaultSubstitutions = commonDefaultSubstitutions - perSnippet { println(it.toString()) } + perSnippet = new Action() { + @Override + void execute(SnippetsTask.Snippet snippet) { + println(snippet.toString()) + } + } } project.tasks.register('listConsoleCandidates', SnippetsTask) { group 'Docs' description 'List snippets that probably should be marked // CONSOLE' defaultSubstitutions = commonDefaultSubstitutions - perSnippet { - if (RestTestsFromSnippetsTask.isConsoleCandidate(it)) { - println(it.toString()) + perSnippet = new Action() { + @Override + void execute(SnippetsTask.Snippet snippet) { + if (RestTestsFromSnippetsTask.isConsoleCandidate(it)) { + println(it.toString()) + } } } } @@ -80,7 +89,7 @@ class DocsTestPlugin implements Plugin { defaultSubstitutions = commonDefaultSubstitutions testRoot.convention(restRootDir) doFirst { - fileOperations.delete(restRootDir) + getFileOperations().delete(testRoot.get()) } } diff --git a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/RestTestsFromSnippetsTask.groovy b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/RestTestsFromSnippetsTask.groovy index eda86355ee306..81207181dc9a7 100644 --- a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/RestTestsFromSnippetsTask.groovy +++ b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/RestTestsFromSnippetsTask.groovy @@ -10,8 +10,10 @@ package org.elasticsearch.gradle.internal.doc import groovy.transform.PackageScope import org.elasticsearch.gradle.internal.doc.SnippetsTask.Snippet +import org.gradle.api.Action import org.gradle.api.InvalidUserDataException import org.gradle.api.file.DirectoryProperty +import org.gradle.api.internal.file.FileOperations import org.gradle.api.tasks.Input import org.gradle.api.tasks.Internal import org.gradle.api.tasks.OutputDirectory @@ -24,7 +26,7 @@ import java.nio.file.Path /** * Generates REST tests for each snippet marked // TEST. */ -class RestTestsFromSnippetsTask extends SnippetsTask { +abstract class RestTestsFromSnippetsTask extends SnippetsTask { /** * These languages aren't supported by the syntax highlighter so we * shouldn't use them. @@ -64,13 +66,23 @@ class RestTestsFromSnippetsTask extends SnippetsTask { @Internal Set names = new HashSet<>() + @Inject + abstract FileOperations getFileOperations(); + @Inject RestTestsFromSnippetsTask(ObjectFactory objectFactory) { testRoot = objectFactory.directoryProperty() TestBuilder builder = new TestBuilder() - perSnippet builder.&handleSnippet - doLast builder.&checkUnconverted - doLast builder.&finishLastTest + perSnippet = new Action() { + @Override + void execute(Snippet snippet) { + builder.handleSnippet(snippet) + } + } + doLast { + builder.checkUnconverted() + builder.finishLastTest() + } } /** @@ -190,6 +202,7 @@ class RestTestsFromSnippetsTask extends SnippetsTask { * Called each time a snippet is encountered. Tracks the snippets and * calls buildTest to actually build the test. */ + void handleSnippet(Snippet snippet) { if (RestTestsFromSnippetsTask.isConsoleCandidate(snippet)) { unconvertedCandidates.add(snippet.path.toString() diff --git a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/SnippetsTask.groovy b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/SnippetsTask.groovy index 1580ec891ed2b..3e4ad91024082 100644 --- a/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/SnippetsTask.groovy +++ b/build-tools-internal/src/main/groovy/org/elasticsearch/gradle/internal/doc/SnippetsTask.groovy @@ -11,8 +11,9 @@ package org.elasticsearch.gradle.internal.doc import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.JsonParseException; -import com.fasterxml.jackson.core.JsonToken; +import com.fasterxml.jackson.core.JsonToken +import org.gradle.api.Action; import org.gradle.api.DefaultTask import org.gradle.api.InvalidUserDataException import org.gradle.api.file.ConfigurableFileTree @@ -44,7 +45,7 @@ class SnippetsTask extends DefaultTask { * instance of Snippet. */ @Internal - Closure perSnippet + Action perSnippet /** * The docs to scan. Defaults to every file in the directory exception the @@ -134,7 +135,7 @@ class SnippetsTask extends DefaultTask { + "After substitutions and munging, the json looks like:\n" + quoted, e); } } - perSnippet(snippet) + perSnippet.execute(snippet) snippet = null } file.eachLine('UTF-8') { String line, int lineNumber -> diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalBwcGitPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalBwcGitPlugin.java index d51770ffd30ed..71c76b2045007 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalBwcGitPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/InternalBwcGitPlugin.java @@ -72,20 +72,19 @@ public void apply(Project project) { createClone.commandLine("git", "clone", buildLayout.getRootDirectory(), gitExtension.getCheckoutDir().get()); }); - ExtraPropertiesExtension extraProperties = project.getExtensions().getExtraProperties(); TaskProvider findRemoteTaskProvider = tasks.register("findRemote", LoggedExec.class, findRemote -> { findRemote.dependsOn(createCloneTaskProvider); findRemote.getWorkingDir().set(gitExtension.getCheckoutDir()); findRemote.commandLine("git", "remote", "-v"); findRemote.getCaptureOutput().set(true); - findRemote.doLast(t -> { extraProperties.set("remoteExists", isRemoteAvailable(remote, findRemote.getOutput())); }); + findRemote.doLast(t -> System.setProperty("remoteExists", String.valueOf(isRemoteAvailable(remote, findRemote.getOutput())))); }); TaskProvider addRemoteTaskProvider = tasks.register("addRemote", addRemote -> { String rootProjectName = project.getRootProject().getName(); addRemote.dependsOn(findRemoteTaskProvider); - addRemote.onlyIf("remote exists", task -> ((boolean) extraProperties.get("remoteExists")) == false); + addRemote.onlyIf("remote exists", task -> (Boolean.valueOf(providerFactory.systemProperty("remoteExists").get()) == false)); addRemote.doLast(new Action() { @Override public void execute(Task task) { diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckForbiddenApisTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckForbiddenApisTask.java index 194d0361980ec..bb0b8dcf04437 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckForbiddenApisTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/precommit/CheckForbiddenApisTask.java @@ -377,6 +377,7 @@ public void checkForbidden() { parameters.getTargetCompatibility().set(getTargetCompatibility()); parameters.getIgnoreFailures().set(getIgnoreFailures()); parameters.getSuccessMarker().set(getSuccessMarker()); + parameters.getSignaturesFiles().from(getSignaturesFiles()); }); } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/CopyRestTestsTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/CopyRestTestsTask.java index 9359272b29610..94345ed80eec7 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/CopyRestTestsTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/CopyRestTestsTask.java @@ -15,6 +15,7 @@ import org.gradle.api.file.FileSystemOperations; import org.gradle.api.file.FileTree; import org.gradle.api.file.ProjectLayout; +import org.gradle.api.internal.file.FileOperations; import org.gradle.api.model.ObjectFactory; import org.gradle.api.provider.ListProperty; import org.gradle.api.tasks.IgnoreEmptyDirectories; @@ -43,7 +44,7 @@ * * @see RestResourcesPlugin */ -public class CopyRestTestsTask extends DefaultTask { +public abstract class CopyRestTestsTask extends DefaultTask { private static final String REST_TEST_PREFIX = "rest-api-spec/test"; private final ListProperty includeCore; private final ListProperty includeXpack; @@ -62,6 +63,9 @@ public class CopyRestTestsTask extends DefaultTask { private final ProjectLayout projectLayout; private final FileSystemOperations fileSystemOperations; + @Inject + public abstract FileOperations getFileOperations(); + @Inject public CopyRestTestsTask( ProjectLayout projectLayout, diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/RestCompatTestTransformTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/RestCompatTestTransformTask.java index 76004e3e5f6db..9b1e8a67deec8 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/RestCompatTestTransformTask.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/test/rest/compat/compat/RestCompatTestTransformTask.java @@ -457,15 +457,17 @@ public void transform() throws IOException { Collections.singletonList(new Skip(skippedFilesWithReason.get(file))) ); } else { + List> transformations = new ArrayList<>(getTransformations().get()); + if (skippedFilesWithTestAndReason.containsKey(file)) { // skip the named tests for this file skippedFilesWithTestAndReason.get(file).forEach(fullTestNameAndReasonPair -> { String prefix = file.getName().replace(".yml", "/"); String singleTestName = fullTestNameAndReasonPair.getLeft().replaceAll(".*" + prefix, ""); - getTransformations().add(new Skip(singleTestName, fullTestNameAndReasonPair.getRight())); + transformations.add(new Skip(singleTestName, fullTestNameAndReasonPair.getRight())); }); } - transformRestTests = transformer.transformRestTests(new LinkedList<>(tests), getTransformations().get()); + transformRestTests = transformer.transformRestTests(new LinkedList<>(tests), transformations); } // convert to url to ensure forward slashes diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOptionsParser.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOptionsParser.java index 5999f618bc0ab..29650e4b74114 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOptionsParser.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/JvmOptionsParser.java @@ -137,7 +137,7 @@ private List jvmOptions( ); substitutedJvmOptions.addAll(machineDependentHeap.determineHeapSettings(config, substitutedJvmOptions)); final List ergonomicJvmOptions = JvmErgonomics.choose(substitutedJvmOptions); - final List systemJvmOptions = SystemJvmOptions.systemJvmOptions(); + final List systemJvmOptions = SystemJvmOptions.systemJvmOptions(args.nodeSettings()); final List apmOptions = APMJvmOptions.apmJvmOptions(args.nodeSettings(), args.secrets(), args.logsDir(), tmpDir); diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java index a55a303517d6f..6e250075f7747 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/SystemJvmOptions.java @@ -8,13 +8,16 @@ package org.elasticsearch.server.cli; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.EsExecutors; + import java.util.List; import java.util.stream.Collectors; import java.util.stream.Stream; final class SystemJvmOptions { - static List systemJvmOptions() { + static List systemJvmOptions(Settings nodeSettings) { return Stream.of( /* * Cache ttl in seconds for positive DNS lookups noting that this overrides the JDK security property networkaddress.cache.ttl; @@ -61,7 +64,8 @@ static List systemJvmOptions() { * explore alternatives. See org.elasticsearch.xpack.searchablesnapshots.preallocate.Preallocate. */ "--add-opens=java.base/java.io=org.elasticsearch.preallocate", - maybeOverrideDockerCgroup() + maybeOverrideDockerCgroup(), + maybeSetActiveProcessorCount(nodeSettings) ).filter(e -> e.isEmpty() == false).collect(Collectors.toList()); } @@ -85,4 +89,16 @@ private static String maybeOverrideDockerCgroup() { } return ""; } + + /* + * node.processors determines thread pool sizes for Elasticsearch. When it + * is set, we need to also tell the JVM to respect a different value + */ + private static String maybeSetActiveProcessorCount(Settings nodeSettings) { + if (EsExecutors.NODE_PROCESSORS_SETTING.exists(nodeSettings)) { + int allocated = EsExecutors.allocatedProcessors(nodeSettings); + return "-XX:ActiveProcessorCount=" + allocated; + } + return ""; + } } diff --git a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmOptionsParserTests.java b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmOptionsParserTests.java index 5d63f29ac584e..03856b1024992 100644 --- a/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmOptionsParserTests.java +++ b/distribution/tools/server-cli/src/test/java/org/elasticsearch/server/cli/JvmOptionsParserTests.java @@ -8,6 +8,8 @@ package org.elasticsearch.server.cli; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Strings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase.WithoutSecurityManager; @@ -28,10 +30,13 @@ import java.util.concurrent.atomic.AtomicBoolean; import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.not; @WithoutSecurityManager public class JvmOptionsParserTests extends ESTestCase { @@ -344,4 +349,27 @@ public void accept(final int lineNumber, final String line) { assertThat(seenInvalidLines, equalTo(invalidLines)); } + public void testNodeProcessorsActiveCount() { + { + final List jvmOptions = SystemJvmOptions.systemJvmOptions(Settings.EMPTY); + assertThat(jvmOptions, not(hasItem(containsString("-XX:ActiveProcessorCount=")))); + } + { + Settings nodeSettings = Settings.builder().put(EsExecutors.NODE_PROCESSORS_SETTING.getKey(), 1).build(); + final List jvmOptions = SystemJvmOptions.systemJvmOptions(nodeSettings); + assertThat(jvmOptions, hasItem("-XX:ActiveProcessorCount=1")); + } + { + // check rounding + Settings nodeSettings = Settings.builder().put(EsExecutors.NODE_PROCESSORS_SETTING.getKey(), 0.2).build(); + final List jvmOptions = SystemJvmOptions.systemJvmOptions(nodeSettings); + assertThat(jvmOptions, hasItem("-XX:ActiveProcessorCount=1")); + } + { + // check validation + Settings nodeSettings = Settings.builder().put(EsExecutors.NODE_PROCESSORS_SETTING.getKey(), 10000).build(); + var e = expectThrows(IllegalArgumentException.class, () -> SystemJvmOptions.systemJvmOptions(nodeSettings)); + assertThat(e.getMessage(), containsString("setting [node.processors] must be <=")); + } + } } diff --git a/docs/changelog/100018.yaml b/docs/changelog/100018.yaml deleted file mode 100644 index b39089db568c0..0000000000000 --- a/docs/changelog/100018.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100018 -summary: Improve time-series error and documentation -area: "TSDB" -type: enhancement -issues: [] diff --git a/docs/changelog/100020.yaml b/docs/changelog/100020.yaml deleted file mode 100644 index 9f97778860eef..0000000000000 --- a/docs/changelog/100020.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100020 -summary: "[CI] `SearchResponseTests#testSerialization` failing resolved" -area: Search -type: bug -issues: - - 100005 diff --git a/docs/changelog/100064.yaml b/docs/changelog/100064.yaml deleted file mode 100644 index f595b7e8e0705..0000000000000 --- a/docs/changelog/100064.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100064 -summary: Update the elastic-apm-agent version -area: Infra/Core -type: enhancement -issues: [] diff --git a/docs/changelog/100092.yaml b/docs/changelog/100092.yaml deleted file mode 100644 index e86b856caf3ad..0000000000000 --- a/docs/changelog/100092.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100092 -summary: Compute SLM retention from `RepositoryData` -area: ILM+SLM -type: bug -issues: - - 99953 diff --git a/docs/changelog/100129.yaml b/docs/changelog/100129.yaml deleted file mode 100644 index aa2c6961b6681..0000000000000 --- a/docs/changelog/100129.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100129 -summary: Refactor `SearchResponseClusters` to use CHM -area: Search -type: enhancement -issues: - - 99101 diff --git a/docs/changelog/100138.yaml b/docs/changelog/100138.yaml deleted file mode 100644 index 0df2004f8539d..0000000000000 --- a/docs/changelog/100138.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100138 -summary: Upgrade main to Lucene 9.8.0 -area: Search -type: upgrade -issues: [] diff --git a/docs/changelog/100143.yaml b/docs/changelog/100143.yaml deleted file mode 100644 index c61a2a8bc7a13..0000000000000 --- a/docs/changelog/100143.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100143 -summary: Preserve order of inference results when calling the _infer API with multiple inputs on a model deployment with more than one allocation the output results order was not guaranteed to match the input order. The fix ensures the output order matches the input order. -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/100154.yaml b/docs/changelog/100154.yaml deleted file mode 100644 index 5e75102390c61..0000000000000 --- a/docs/changelog/100154.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100154 -summary: Log warnings for jobs unassigned for a long time -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/100187.yaml b/docs/changelog/100187.yaml deleted file mode 100644 index f0ab9257e7127..0000000000000 --- a/docs/changelog/100187.yaml +++ /dev/null @@ -1,17 +0,0 @@ -pr: 100187 -summary: GA the data stream lifecycle -area: Data streams -type: "feature" -issues: [] -highlight: - title: The data stream lifecycle is now in Technical Preview - body: "This marks the data stream lifecycle as available in Technical Preview. - Data streams will be able to take advantage of a built-in simplified and - resilient lifecycle implementation. Data streams with a configured lifecycle will - be automatically rolled over and tail merged (a forcemerge implementation that's - lightweight and only merges the long tail of small segments instead of the - whole shard). With the shard and index maintenance tasks being handled automatically - to ensure optimum performance, and trade-off between indexing and searching, - you'll be able to focus on the business related lifecycle aspects like data - retention." - notable: true diff --git a/docs/changelog/100199.yaml b/docs/changelog/100199.yaml deleted file mode 100644 index 0f609194813c5..0000000000000 --- a/docs/changelog/100199.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100199 -summary: "ESQL: Simple check if all blocks get released" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/100205.yaml b/docs/changelog/100205.yaml deleted file mode 100644 index 41b16465ef4c5..0000000000000 --- a/docs/changelog/100205.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100205 -summary: Simplify the Inference Ingest Processor configuration -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/100232.yaml b/docs/changelog/100232.yaml deleted file mode 100644 index 3f8336b6c241c..0000000000000 --- a/docs/changelog/100232.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100232 -summary: "Tracing: Use `doPriv` when working with spans, use `SpanId`" -area: Infra/Core -type: bug -issues: [] diff --git a/docs/changelog/100238.yaml b/docs/changelog/100238.yaml deleted file mode 100644 index 70e3f5340e223..0000000000000 --- a/docs/changelog/100238.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100238 -summary: "ESQL: Remove aliasing inside Eval" -area: ES|QL -type: bug -issues: - - 100174 diff --git a/docs/changelog/100253.yaml b/docs/changelog/100253.yaml deleted file mode 100644 index 7a9d3f3fb13d7..0000000000000 --- a/docs/changelog/100253.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100253 -summary: Propagate cancellation in `DataTiersUsageTransportAction` -area: Data streams -type: bug -issues: [] diff --git a/docs/changelog/100273.yaml b/docs/changelog/100273.yaml deleted file mode 100644 index 4ccd52d033aa7..0000000000000 --- a/docs/changelog/100273.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100273 -summary: Propagate cancellation in `GetHealthAction` -area: Health -type: bug -issues: [] diff --git a/docs/changelog/100323.yaml b/docs/changelog/100323.yaml deleted file mode 100644 index de50da6ec8cf9..0000000000000 --- a/docs/changelog/100323.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100323 -summary: "CCR: Use local cluster state request" -area: CCR -type: bug -issues: [] diff --git a/docs/changelog/100351.yaml b/docs/changelog/100351.yaml deleted file mode 100644 index d8ba19b70cbed..0000000000000 --- a/docs/changelog/100351.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100351 -summary: "ESQL: support metric tsdb fields while querying index patterns" -area: ES|QL -type: bug -issues: - - 100144 diff --git a/docs/changelog/100360.yaml b/docs/changelog/100360.yaml deleted file mode 100644 index 6d0dcafe16a8f..0000000000000 --- a/docs/changelog/100360.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100360 -summary: "ESQL: Limit how many bytes `concat()` can process" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/100370.yaml b/docs/changelog/100370.yaml deleted file mode 100644 index 3e2e1b762c654..0000000000000 --- a/docs/changelog/100370.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 100370 -summary: "ESQL: Page shouldn't close a block twice" -area: ES|QL -type: bug -issues: - - 100356 - - 100365 diff --git a/docs/changelog/100377.yaml b/docs/changelog/100377.yaml deleted file mode 100644 index a4cbb0ba46a61..0000000000000 --- a/docs/changelog/100377.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100377 -summary: "ESQL: Add identity check in Block equality" -area: ES|QL -type: bug -issues: - - 100374 diff --git a/docs/changelog/100388.yaml b/docs/changelog/100388.yaml deleted file mode 100644 index 4b596b6ea23b6..0000000000000 --- a/docs/changelog/100388.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100388 -summary: Fix for inference requests being sent to every node with a model allocation. If there are more nodes than items in the original request then empty requests were sent. -area: Machine Learning -type: bug -issues: - - 100180 diff --git a/docs/changelog/100447.yaml b/docs/changelog/100447.yaml deleted file mode 100644 index c20eb1599cf41..0000000000000 --- a/docs/changelog/100447.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100447 -summary: Reinstate `RepositoryData` BwC -area: Snapshot/Restore -type: bug -issues: [] diff --git a/docs/changelog/100470.yaml b/docs/changelog/100470.yaml deleted file mode 100644 index 3408ae06f7fe9..0000000000000 --- a/docs/changelog/100470.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100470 -summary: DSL waits for the tsdb time boundaries to lapse -area: Data streams -type: bug -issues: - - 99696 diff --git a/docs/changelog/100594.yaml b/docs/changelog/100594.yaml deleted file mode 100644 index 62d2a8933b9ad..0000000000000 --- a/docs/changelog/100594.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100594 -summary: Grant editor and viewer access to profiling -area: Authorization -type: bug -issues: [] diff --git a/docs/changelog/100610.yaml b/docs/changelog/100610.yaml deleted file mode 100644 index 7423ce9225868..0000000000000 --- a/docs/changelog/100610.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 100610 -summary: Fix interruption of `markAllocationIdAsInSync` -area: Recovery -type: bug -issues: - - 96578 - - 100589 diff --git a/docs/changelog/100624.yaml b/docs/changelog/100624.yaml deleted file mode 100644 index 247343bf03ed8..0000000000000 --- a/docs/changelog/100624.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100624 -summary: Make Transform Feature Reset really wait for all the tasks -area: Transform -type: bug -issues: [] diff --git a/docs/changelog/100645.yaml b/docs/changelog/100645.yaml deleted file mode 100644 index e6bb6ab0fd653..0000000000000 --- a/docs/changelog/100645.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 100645 -summary: "ESQL: Graceful handling of non-bool condition in the filter" -area: ES|QL -type: bug -issues: - - 100049 - - 100409 diff --git a/docs/changelog/100647.yaml b/docs/changelog/100647.yaml deleted file mode 100644 index 399407146af68..0000000000000 --- a/docs/changelog/100647.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100647 -summary: "ESQL: Handle queries with non-existing enrich policies and no field" -area: ES|QL -type: bug -issues: - - 100593 diff --git a/docs/changelog/100650.yaml b/docs/changelog/100650.yaml deleted file mode 100644 index 96d7bc0571403..0000000000000 --- a/docs/changelog/100650.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100650 -summary: "ESQL: Improve verifier error for incorrect agg declaration" -area: ES|QL -type: bug -issues: - - 100641 diff --git a/docs/changelog/100656.yaml b/docs/changelog/100656.yaml deleted file mode 100644 index 1ee9a2ad0e47a..0000000000000 --- a/docs/changelog/100656.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100656 -summary: "ESQL: fix non-null value being returned for unsupported data types in `ValueSources`" -area: ES|QL -type: bug -issues: - - 100048 diff --git a/docs/changelog/100707.yaml b/docs/changelog/100707.yaml deleted file mode 100644 index 6808b781b603a..0000000000000 --- a/docs/changelog/100707.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100707 -summary: Allow `enrich_user` to read/view enrich indices -area: Authorization -type: bug -issues: [] diff --git a/docs/changelog/100760.yaml b/docs/changelog/100760.yaml deleted file mode 100644 index b8d149fff5758..0000000000000 --- a/docs/changelog/100760.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100760 -summary: Remove noisy 'Could not find trained model' message -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/100766.yaml b/docs/changelog/100766.yaml deleted file mode 100644 index c7a3d0479afd6..0000000000000 --- a/docs/changelog/100766.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100766 -summary: "ESQL: Properly handle multi-values in fold() and date math" -area: ES|QL -type: bug -issues: - - 100497 diff --git a/docs/changelog/100779.yaml b/docs/changelog/100779.yaml deleted file mode 100644 index 2d7f40f5b34da..0000000000000 --- a/docs/changelog/100779.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100779 -summary: Fix NullPointerException in RotableSecret -area: Security -type: bug -issues: - - 99759 diff --git a/docs/changelog/100782.yaml b/docs/changelog/100782.yaml deleted file mode 100644 index c6007bfb4d9ba..0000000000000 --- a/docs/changelog/100782.yaml +++ /dev/null @@ -1,8 +0,0 @@ -pr: 100782 -summary: "ESQL: `mv_expand` pushes down limit and project and keep the limit after\ - \ it untouched" -area: ES|QL -type: bug -issues: - - 99971 - - 100774 diff --git a/docs/changelog/100808.yaml b/docs/changelog/100808.yaml deleted file mode 100644 index 1abbfdcebf74e..0000000000000 --- a/docs/changelog/100808.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100808 -summary: Make tasks that calculate checkpoints cancellable -area: Transform -type: bug -issues: [] diff --git a/docs/changelog/100846.yaml b/docs/changelog/100846.yaml deleted file mode 100644 index d13fb78b697a2..0000000000000 --- a/docs/changelog/100846.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100846 -summary: Consistent scores for multi-term `SourceConfirmedTestQuery` -area: Search -type: bug -issues: - - 98712 diff --git a/docs/changelog/100866.yaml b/docs/changelog/100866.yaml deleted file mode 100644 index 67a22cc1e0996..0000000000000 --- a/docs/changelog/100866.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100866 -summary: "ESQL: Preserve intermediate aggregation output in local relation" -area: ES|QL -type: bug -issues: - - 100807 diff --git a/docs/changelog/100872.yaml b/docs/changelog/100872.yaml deleted file mode 100644 index 9877afa28982e..0000000000000 --- a/docs/changelog/100872.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100872 -summary: Improve painless error wrapping -area: Infra/Scripting -type: bug -issues: [] diff --git a/docs/changelog/100875.yaml b/docs/changelog/100875.yaml deleted file mode 100644 index bd0ca59e8b8f0..0000000000000 --- a/docs/changelog/100875.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100875 -summary: Preserve subfields for unsupported types -area: "Query Languages" -type: bug -issues: - - 100869 diff --git a/docs/changelog/100886.yaml b/docs/changelog/100886.yaml deleted file mode 100644 index b926f924c7a7c..0000000000000 --- a/docs/changelog/100886.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 100886 -summary: Use the correct writable name for model assignment metadata in mixed version clusters. Prevents a node failure due to IllegalArgumentException Unknown NamedWriteable [trained_model_assignment] -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/100911.yaml b/docs/changelog/100911.yaml deleted file mode 100644 index baab6f2482a76..0000000000000 --- a/docs/changelog/100911.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100911 -summary: '`WaitForSnapshotStep` verifies if the index belongs to the latest snapshot - of that SLM policy' -area: ILM+SLM -type: bug -issues: [] diff --git a/docs/changelog/101001.yaml b/docs/changelog/101001.yaml deleted file mode 100644 index 3ebcefc2c8045..0000000000000 --- a/docs/changelog/101001.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101001 -summary: "ESQL: Support date and time intervals as input params" -area: ES|QL -type: bug -issues: - - 99570 diff --git a/docs/changelog/101012.yaml b/docs/changelog/101012.yaml deleted file mode 100644 index 1d5f62bdddba7..0000000000000 --- a/docs/changelog/101012.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101012 -summary: Adjust `DateHistogram's` bucket accounting to be iteratively -area: Aggregations -type: bug -issues: [] diff --git a/docs/changelog/101051.yaml b/docs/changelog/101051.yaml deleted file mode 100644 index 05e7443dac8b3..0000000000000 --- a/docs/changelog/101051.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101051 -summary: Percolator to support parsing script score query with params -area: Mapping -type: bug -issues: - - 97377 diff --git a/docs/changelog/101120.yaml b/docs/changelog/101120.yaml deleted file mode 100644 index bf359eb21be9f..0000000000000 --- a/docs/changelog/101120.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101120 -summary: "ESQL: Fix escaping of backslash in LIKE operator" -area: ES|QL -type: bug -issues: - - 101106 diff --git a/docs/changelog/101133.yaml b/docs/changelog/101133.yaml deleted file mode 100644 index 546a5392c309a..0000000000000 --- a/docs/changelog/101133.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101133 -summary: Update bundled JDK to 21.0.1 -area: Packaging -type: upgrade -issues: [] diff --git a/docs/changelog/101184.yaml b/docs/changelog/101184.yaml deleted file mode 100644 index ac2f5f3ee8af1..0000000000000 --- a/docs/changelog/101184.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101184 -summary: More robust timeout for repo analysis -area: Snapshot/Restore -type: bug -issues: - - 101182 diff --git a/docs/changelog/101205.yaml b/docs/changelog/101205.yaml deleted file mode 100644 index 528f6fb35846e..0000000000000 --- a/docs/changelog/101205.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101205 -summary: Increase K/V look-back time interval -area: Application -type: bug -issues: [] diff --git a/docs/changelog/101212.yaml b/docs/changelog/101212.yaml deleted file mode 100644 index ed2b433209e8d..0000000000000 --- a/docs/changelog/101212.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101212 -summary: Fix painless execute api and tsdb issue -area: TSDB -type: bug -issues: - - 101072 diff --git a/docs/changelog/101245.yaml b/docs/changelog/101245.yaml deleted file mode 100644 index 2f9fef318f31a..0000000000000 --- a/docs/changelog/101245.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101245 -summary: Make S3 anti-contention delay configurable -area: Snapshot/Restore -type: bug -issues: [] diff --git a/docs/changelog/101255.yaml b/docs/changelog/101255.yaml deleted file mode 100644 index 37d8f7e3c14fe..0000000000000 --- a/docs/changelog/101255.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101255 -summary: Provide stable resampling -area: Application -type: bug -issues: [] diff --git a/docs/changelog/101264.yaml b/docs/changelog/101264.yaml deleted file mode 100644 index 7160240b2f3a0..0000000000000 --- a/docs/changelog/101264.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101264 -summary: Align look-back with client-side cache -area: Application -type: bug -issues: [] diff --git a/docs/changelog/101265.yaml b/docs/changelog/101265.yaml deleted file mode 100644 index f39b57fa9a75e..0000000000000 --- a/docs/changelog/101265.yaml +++ /dev/null @@ -1,13 +0,0 @@ -pr: 101265 -summary: Rollup functionality is now deprecated -area: Rollup -type: deprecation -issues: [] -deprecation: - title: >- - Rollup functionality is now deprecated - area: Rollup - details: |- - {ref}/xpack-rollup[Rollup functionality] has been deprecated and will be removed in a future release. Previously, rollups were available in technical preview. - impact: |- - Use {ref}/downsampling.html[downsampling] to reduce storage costs for time series data by by storing it at reduced granularity. diff --git a/docs/changelog/101344.yaml b/docs/changelog/101344.yaml deleted file mode 100644 index b546e743301f6..0000000000000 --- a/docs/changelog/101344.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101344 -summary: Register `repository_s3` settings -area: Snapshot/Restore -type: bug -issues: [] diff --git a/docs/changelog/101358.yaml b/docs/changelog/101358.yaml deleted file mode 100644 index 3ae2a44e15e5e..0000000000000 --- a/docs/changelog/101358.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101358 -summary: Make DISSECT parameter `append_separator` case insensitive -area: ES|QL -type: bug -issues: - - 101138 diff --git a/docs/changelog/101362.yaml b/docs/changelog/101362.yaml deleted file mode 100644 index e1d763cd416fa..0000000000000 --- a/docs/changelog/101362.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101362 -summary: "ESQL: Remove the swapped-args check for date_xxx()" -area: ES|QL -type: enhancement -issues: - - 99562 diff --git a/docs/changelog/101438.yaml b/docs/changelog/101438.yaml deleted file mode 100644 index 8189ee96b6576..0000000000000 --- a/docs/changelog/101438.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101438 -summary: "ESQL: Fix eval of functions on foldable literals" -area: ES|QL -type: bug -issues: - - 101425 diff --git a/docs/changelog/101456.yaml b/docs/changelog/101456.yaml deleted file mode 100644 index db55dfbde1c64..0000000000000 --- a/docs/changelog/101456.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101456 -summary: "ESQL: adds Enrich implicit `match_fields` to `field_caps` call" -area: ES|QL -type: bug -issues: - - 101328 diff --git a/docs/changelog/101486.yaml b/docs/changelog/101486.yaml deleted file mode 100644 index 99795feda328f..0000000000000 --- a/docs/changelog/101486.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101486 -summary: Improving tika handling -area: Ingest Node -type: bug -issues: [] diff --git a/docs/changelog/101492.yaml b/docs/changelog/101492.yaml deleted file mode 100644 index 2c3cdeee21bbb..0000000000000 --- a/docs/changelog/101492.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101492 -summary: "ESQL: check type before casting" -area: ES|QL -type: bug -issues: - - 101489 diff --git a/docs/changelog/101495.yaml b/docs/changelog/101495.yaml deleted file mode 100644 index f61c9b824b77c..0000000000000 --- a/docs/changelog/101495.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101495 -summary: "[DSL] skip deleting indices that have in-progress downsampling operations" -area: Data streams -type: bug -issues: [] diff --git a/docs/changelog/101497.yaml b/docs/changelog/101497.yaml deleted file mode 100644 index 7909cb1ecdc0d..0000000000000 --- a/docs/changelog/101497.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101497 -summary: Fix snapshot double finalization -area: Snapshot/Restore -type: bug -issues: [] diff --git a/docs/changelog/101516.yaml b/docs/changelog/101516.yaml deleted file mode 100644 index a5445102c33c6..0000000000000 --- a/docs/changelog/101516.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101516 -summary: "Make settings dynamic" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/101627.yaml b/docs/changelog/101627.yaml deleted file mode 100644 index 07992efd8bb3c..0000000000000 --- a/docs/changelog/101627.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101627 -summary: Ignore `IndexNotFound` error when refreshing destination index -area: Transform -type: bug -issues: [] diff --git a/docs/changelog/101629.yaml b/docs/changelog/101629.yaml deleted file mode 100644 index 1b8691c9798ff..0000000000000 --- a/docs/changelog/101629.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101629 -summary: Health report infrastructure doesn't trip the circuit breakers -area: Health -type: bug -issues: [] diff --git a/docs/changelog/101648.yaml b/docs/changelog/101648.yaml deleted file mode 100644 index 48e01739aabc0..0000000000000 --- a/docs/changelog/101648.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 101648 -summary: "ESQL: Fix unreleased block in topn" -area: ES|QL -type: bug -issues: - - 101588 diff --git a/docs/changelog/101652.yaml b/docs/changelog/101652.yaml deleted file mode 100644 index 79e3167696aee..0000000000000 --- a/docs/changelog/101652.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101652 -summary: Fix race condition in `SnapshotsService` -area: Snapshot/Restore -type: bug -issues: [] diff --git a/docs/changelog/101700.yaml b/docs/changelog/101700.yaml new file mode 100644 index 0000000000000..08671360688a7 --- /dev/null +++ b/docs/changelog/101700.yaml @@ -0,0 +1,5 @@ +pr: 101700 +summary: Fix `lastUnsafeSegmentGenerationForGets` for realtime get +area: Engine +type: bug +issues: [] diff --git a/docs/changelog/101705.yaml b/docs/changelog/101705.yaml new file mode 100644 index 0000000000000..baa7e69d48d88 --- /dev/null +++ b/docs/changelog/101705.yaml @@ -0,0 +1,6 @@ +pr: 101705 +summary: Respect regional AWS STS endpoints +area: Snapshot/Restore +type: bug +issues: + - 89175 diff --git a/docs/changelog/101713.yaml b/docs/changelog/101713.yaml deleted file mode 100644 index c3addf9296584..0000000000000 --- a/docs/changelog/101713.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101713 -summary: Disable `weight_matches` when kNN query is present -area: Highlighting -type: bug -issues: [] diff --git a/docs/changelog/101788.yaml b/docs/changelog/101788.yaml new file mode 100644 index 0000000000000..b7cc1e20663e8 --- /dev/null +++ b/docs/changelog/101788.yaml @@ -0,0 +1,6 @@ +pr: 101788 +summary: "ESQL: Narrow catch in convert functions" +area: ES|QL +type: bug +issues: + - 100820 diff --git a/docs/changelog/101799.yaml b/docs/changelog/101799.yaml new file mode 100644 index 0000000000000..a3ef5fb190177 --- /dev/null +++ b/docs/changelog/101799.yaml @@ -0,0 +1,5 @@ +pr: 101799 +summary: Fix memory leak from JWT cache (and fix the usage of the JWT auth cache) +area: Authentication +type: bug +issues: [] diff --git a/docs/changelog/101802.yaml b/docs/changelog/101802.yaml new file mode 100644 index 0000000000000..20e857c32f664 --- /dev/null +++ b/docs/changelog/101802.yaml @@ -0,0 +1,5 @@ +pr: 101802 +summary: Correctly logging watcher history write failures +area: Watcher +type: bug +issues: [] diff --git a/docs/changelog/101815.yaml b/docs/changelog/101815.yaml new file mode 100644 index 0000000000000..511e23beb68ef --- /dev/null +++ b/docs/changelog/101815.yaml @@ -0,0 +1,5 @@ +pr: 101815 +summary: Run `TransportGetAliasesAction` on local node +area: Indices APIs +type: enhancement +issues: [] diff --git a/docs/changelog/101846.yaml b/docs/changelog/101846.yaml new file mode 100644 index 0000000000000..52dfff8801c62 --- /dev/null +++ b/docs/changelog/101846.yaml @@ -0,0 +1,5 @@ +pr: 101846 +summary: Set `ActiveProcessorCount` when `node.processors` is set +area: Infra/CLI +type: enhancement +issues: [] diff --git a/docs/changelog/101847.yaml b/docs/changelog/101847.yaml new file mode 100644 index 0000000000000..91922b9e23ed0 --- /dev/null +++ b/docs/changelog/101847.yaml @@ -0,0 +1,6 @@ +pr: 101847 +summary: Add an additional tiebreaker to RRF +area: Ranking +type: bug +issues: + - 101232 diff --git a/docs/changelog/94607.yaml b/docs/changelog/94607.yaml deleted file mode 100644 index eea9264ce90f9..0000000000000 --- a/docs/changelog/94607.yaml +++ /dev/null @@ -1,18 +0,0 @@ -pr: 94607 -summary: Use `IndexWriter.flushNextBuffer()` to reclaim memory from indexing buffers -area: Engine -type: enhancement -issues: [] -highlight: - title: Use `IndexWriter.flushNextBuffer()` to reclaim memory from indexing buffers - body: |- - Rather than forcing a refresh to reclaim memory from indexing buffers, which flushes all - segments no matter how large, Elasticsearch now takes advantage of - `IndexWriter#flushNextBuffer` which only flushes the largest pending segment. This should smooth - out indexing allowing for larger segment sizes, fewer merges and higher throughput. - - Furthermore, the selection algorithm to pick which shard to reclaim memory from next was - changed, from picking the shard that uses the most RAM to going over shards in a round-robin - fashion. This approach has proved to work significantly better in practice. - - notable: true diff --git a/docs/changelog/97317.yaml b/docs/changelog/97317.yaml deleted file mode 100644 index 64fcd55e67e28..0000000000000 --- a/docs/changelog/97317.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 97317 -summary: "Fix merges of mappings with `subobjects: false` for composable index templates" -area: Mapping -type: bug -issues: - - 96768 diff --git a/docs/changelog/97397.yaml b/docs/changelog/97397.yaml deleted file mode 100644 index 5c1867d55f9bd..0000000000000 --- a/docs/changelog/97397.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 97397 -summary: Return a 410 (Gone) status code for unavailable API endpoints -area: Infra/REST API -type: enhancement -issues: [] diff --git a/docs/changelog/97409.yaml b/docs/changelog/97409.yaml deleted file mode 100644 index 8c05d6254f7cc..0000000000000 --- a/docs/changelog/97409.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 97409 -summary: Trim stored fields for `_id` field in tsdb -area: TSDB -type: enhancement -issues: [] diff --git a/docs/changelog/97450.yaml b/docs/changelog/97450.yaml deleted file mode 100644 index a057e0beefaca..0000000000000 --- a/docs/changelog/97450.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 97450 -summary: Make `_index` optional for pinned query docs -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/97642.yaml b/docs/changelog/97642.yaml deleted file mode 100644 index cf519e04e2d38..0000000000000 --- a/docs/changelog/97642.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 97642 -summary: fix fuzzy query rewrite parameter not work -area: Search -type: bug -issues: [] diff --git a/docs/changelog/97729.yaml b/docs/changelog/97729.yaml deleted file mode 100644 index f80a04bc58f68..0000000000000 --- a/docs/changelog/97729.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 97729 -summary: Allow parsing on non-string routing fields -area: Aggregations -type: bug -issues: [] diff --git a/docs/changelog/97972.yaml b/docs/changelog/97972.yaml deleted file mode 100644 index d4d55e33b4bb2..0000000000000 --- a/docs/changelog/97972.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 97972 -summary: Automatically flatten objects when subobjects:false -area: Mapping -type: enhancement -issues: - - 88934 diff --git a/docs/changelog/98038.yaml b/docs/changelog/98038.yaml deleted file mode 100644 index d99db24664f30..0000000000000 --- a/docs/changelog/98038.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98038 -summary: Update enrich execution to only set index false on fields that support it -area: Ingest Node -type: bug -issues: - - 98019 diff --git a/docs/changelog/98061.yaml b/docs/changelog/98061.yaml deleted file mode 100644 index 3955b262017f0..0000000000000 --- a/docs/changelog/98061.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98061 -summary: Fix possible NPE when getting transform stats for failed transforms -area: Transform -type: bug -issues: - - 98052 diff --git a/docs/changelog/98268.yaml b/docs/changelog/98268.yaml deleted file mode 100644 index ef6f98b8d016c..0000000000000 --- a/docs/changelog/98268.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98268 -summary: Dense vector field types are indexed by default -area: Vector Search -type: enhancement -issues: [] diff --git a/docs/changelog/98309.yaml b/docs/changelog/98309.yaml deleted file mode 100644 index 550f50b3569a1..0000000000000 --- a/docs/changelog/98309.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98309 -summary: "Integrate Elasticsearch Query Language, ES|QL" -area: Query Languages -type: feature -issues: [] diff --git a/docs/changelog/98332.yaml b/docs/changelog/98332.yaml deleted file mode 100644 index 6446707515b3c..0000000000000 --- a/docs/changelog/98332.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98332 -summary: Correct behaviour of `ContentPath::remove()` -area: Mapping -type: bug -issues: - - 98327 diff --git a/docs/changelog/98337.yaml b/docs/changelog/98337.yaml deleted file mode 100644 index 8664ae15eed00..0000000000000 --- a/docs/changelog/98337.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98337 -summary: TopN sorting with min and max for multi-value fields -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/98360.yaml b/docs/changelog/98360.yaml deleted file mode 100644 index b6b8696259c98..0000000000000 --- a/docs/changelog/98360.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98360 -summary: Use a competitive iterator in `FiltersAggregator` -area: Aggregations -type: enhancement -issues: - - 97544 diff --git a/docs/changelog/98406.yaml b/docs/changelog/98406.yaml deleted file mode 100644 index f62af64171944..0000000000000 --- a/docs/changelog/98406.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98406 -summary: Safely drain deployment request queues before allowing node to shutdown -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/98457.yaml b/docs/changelog/98457.yaml deleted file mode 100644 index 465c9ed30cc5b..0000000000000 --- a/docs/changelog/98457.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98457 -summary: Support cluster/details for CCS minimize_roundtrips=false -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/98470.yaml b/docs/changelog/98470.yaml deleted file mode 100644 index 498b1db244d22..0000000000000 --- a/docs/changelog/98470.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98470 -summary: Reduce verbosity of the bulk indexing audit log -area: Audit -type: enhancement -issues: [] diff --git a/docs/changelog/98512.yaml b/docs/changelog/98512.yaml deleted file mode 100644 index c2108a18c6b91..0000000000000 --- a/docs/changelog/98512.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98512 -summary: Automatically map float arrays of lengths 128 - 2048 as dense_vector -area: Application -type: feature -issues: - - 97532 diff --git a/docs/changelog/98518.yaml b/docs/changelog/98518.yaml deleted file mode 100644 index 2f961fc11ce69..0000000000000 --- a/docs/changelog/98518.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98518 -summary: Add `index.look_back_time` setting for tsdb data streams -area: TSDB -type: enhancement -issues: - - 98463 diff --git a/docs/changelog/98528.yaml b/docs/changelog/98528.yaml deleted file mode 100644 index 0004499e58f83..0000000000000 --- a/docs/changelog/98528.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98528 -summary: "ESQL: Add support for TEXT fields in comparison operators and SORT" -area: ES|QL -type: enhancement -issues: - - 98642 diff --git a/docs/changelog/98550.yaml b/docs/changelog/98550.yaml deleted file mode 100644 index 30c9891b15182..0000000000000 --- a/docs/changelog/98550.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98550 -summary: Report a node's "roles" setting in the /_cluster/allocation/explain response -area: Allocation -type: enhancement -issues: [97859] diff --git a/docs/changelog/98574.yaml b/docs/changelog/98574.yaml deleted file mode 100644 index bf016b4c241c8..0000000000000 --- a/docs/changelog/98574.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98574 -summary: Specify correct current `IndexVersion` after 8.10 release -area: Infra/Core -type: bug -issues: - - 98555 diff --git a/docs/changelog/98590.yaml b/docs/changelog/98590.yaml deleted file mode 100644 index f3ef3cdd56a12..0000000000000 --- a/docs/changelog/98590.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98590 -summary: "ESQL: LTRIM, RTRIM and fix unicode whitespace" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/98622.yaml b/docs/changelog/98622.yaml deleted file mode 100644 index 8c41444b6c725..0000000000000 --- a/docs/changelog/98622.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98622 -summary: Add 'dataset' size to cat indices and cat shards -area: CAT APIs -type: enhancement -issues: - - 95092 diff --git a/docs/changelog/98628.yaml b/docs/changelog/98628.yaml deleted file mode 100644 index 2ecd9dd23e0ef..0000000000000 --- a/docs/changelog/98628.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98628 -summary: Add ESQL own flavor of arithmetic operators -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/98630.yaml b/docs/changelog/98630.yaml deleted file mode 100644 index 444c593f87d0b..0000000000000 --- a/docs/changelog/98630.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98630 -summary: "ESQL: LEAST and GREATEST functions" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/98635.yaml b/docs/changelog/98635.yaml deleted file mode 100644 index 274096951fcf6..0000000000000 --- a/docs/changelog/98635.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98635 -summary: Fix NPE in `StableMasterHealthIndicatorService` -area: Health -type: bug -issues: [] diff --git a/docs/changelog/98653.yaml b/docs/changelog/98653.yaml deleted file mode 100644 index 384a29c3cc4ab..0000000000000 --- a/docs/changelog/98653.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98653 -summary: Reset `GatewayService` flags before reroute -area: Cluster Coordination -type: bug -issues: - - 98606 diff --git a/docs/changelog/98654.yaml b/docs/changelog/98654.yaml deleted file mode 100644 index ea63edb93eb58..0000000000000 --- a/docs/changelog/98654.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98654 -summary: Allow native users/roles to be disabled via setting -area: Authentication -type: enhancement -issues: [] diff --git a/docs/changelog/98684.yaml b/docs/changelog/98684.yaml deleted file mode 100644 index 552e85a04151a..0000000000000 --- a/docs/changelog/98684.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98684 -summary: Explicit parsing object capabilities of `FieldMappers` -area: Mapping -type: enhancement -issues: - - 98537 diff --git a/docs/changelog/98711.yaml b/docs/changelog/98711.yaml deleted file mode 100644 index 43e0c2a03e8fa..0000000000000 --- a/docs/changelog/98711.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98711 -summary: Support unsigned long in sqrt and log10 for ESQL -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/98759.yaml b/docs/changelog/98759.yaml deleted file mode 100644 index df6180bddc192..0000000000000 --- a/docs/changelog/98759.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98759 -summary: "ESQL: Support queries that don't return underlying fields" -area: ES|QL -type: bug -issues: - - 98404 diff --git a/docs/changelog/98809.yaml b/docs/changelog/98809.yaml deleted file mode 100644 index f9f5be523e179..0000000000000 --- a/docs/changelog/98809.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 98809 -summary: Avoiding the use of nodes that are no longer in the cluster when computing - master stability -area: Health -type: enhancement -issues: - - 98636 diff --git a/docs/changelog/98811.yaml b/docs/changelog/98811.yaml deleted file mode 100644 index 338efbcf1d8c9..0000000000000 --- a/docs/changelog/98811.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98811 -summary: Allow explain data stream lifecycle to accept a data stream -area: Data streams -type: enhancement -issues: [] diff --git a/docs/changelog/98824.yaml b/docs/changelog/98824.yaml deleted file mode 100644 index 7e2c43d266232..0000000000000 --- a/docs/changelog/98824.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98824 -summary: Consider node shutdown in `DataTierAllocationDecider` -area: "Allocation" -type: bug -issues: - - 97207 diff --git a/docs/changelog/98840.yaml b/docs/changelog/98840.yaml deleted file mode 100644 index bb358916354dc..0000000000000 --- a/docs/changelog/98840.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98840 -summary: Don't ignore empty index template that have no template definition -area: TSDB -type: bug -issues: - - 98834 diff --git a/docs/changelog/98843.yaml b/docs/changelog/98843.yaml deleted file mode 100644 index 742ae25697718..0000000000000 --- a/docs/changelog/98843.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98843 -summary: Fix UnsignedLong field range query gt "0" can get the result equal to 0 -area: Search -type: bug -issues: [] diff --git a/docs/changelog/98844.yaml b/docs/changelog/98844.yaml deleted file mode 100644 index a5870e7344d15..0000000000000 --- a/docs/changelog/98844.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98844 -summary: Add accessors required to recreate `TransformStats` object from the fields -area: Transform -type: enhancement -issues: [] diff --git a/docs/changelog/98847.yaml b/docs/changelog/98847.yaml deleted file mode 100644 index ab7455bd783c3..0000000000000 --- a/docs/changelog/98847.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98847 -summary: "ESQL: Add `CEIL` function" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/98870.yaml b/docs/changelog/98870.yaml deleted file mode 100644 index b719fbb0caf22..0000000000000 --- a/docs/changelog/98870.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98870 -summary: "ESQL: Add ability to perform date math" -area: ES|QL -type: enhancement -issues: - - 98402 diff --git a/docs/changelog/98874.yaml b/docs/changelog/98874.yaml new file mode 100644 index 0000000000000..e3eb7b5acc63f --- /dev/null +++ b/docs/changelog/98874.yaml @@ -0,0 +1,5 @@ +pr: 98874 +summary: Estimate the memory required to deploy trained models more accurately +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/98878.yaml b/docs/changelog/98878.yaml deleted file mode 100644 index 4fa8b23851bf9..0000000000000 --- a/docs/changelog/98878.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98878 -summary: Fix percolator query for stored queries that expand on wildcard field names -area: Percolator -type: bug -issues: [] diff --git a/docs/changelog/98888.yaml b/docs/changelog/98888.yaml deleted file mode 100644 index 1f2f7ea27ff19..0000000000000 --- a/docs/changelog/98888.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98888 -summary: Revert "Kibana system index does not allow user templates to affect it" -area: Infra/Core -type: bug -issues: [] diff --git a/docs/changelog/98915.yaml b/docs/changelog/98915.yaml deleted file mode 100644 index c23ddcc55d98e..0000000000000 --- a/docs/changelog/98915.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98915 -summary: Avoid risk of OOM in datafeeds when memory is constrained -area: Machine Learning -type: bug -issues: [89769] diff --git a/docs/changelog/98930.yaml b/docs/changelog/98930.yaml deleted file mode 100644 index e6a2c74192ebe..0000000000000 --- a/docs/changelog/98930.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98930 -summary: Frozen index input clone copy cache file -area: Snapshot/Restore -type: bug -issues: [] diff --git a/docs/changelog/98942.yaml b/docs/changelog/98942.yaml deleted file mode 100644 index 4d8eeee5192e5..0000000000000 --- a/docs/changelog/98942.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98942 -summary: "ESQL: LEFT function" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/98972.yaml b/docs/changelog/98972.yaml deleted file mode 100644 index acd336ff7d666..0000000000000 --- a/docs/changelog/98972.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 98972 -summary: "ES|QL: Implement serialization of `InvalidMappedField`" -area: ES|QL -type: bug -issues: - - 98851 diff --git a/docs/changelog/98974.yaml b/docs/changelog/98974.yaml deleted file mode 100644 index 90950986141ab..0000000000000 --- a/docs/changelog/98974.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98974 -summary: "ESQL: RIGHT function" -area: ES|QL -type: feature -issues: [] diff --git a/docs/changelog/98996.yaml b/docs/changelog/98996.yaml deleted file mode 100644 index 1f1bdd35ff643..0000000000000 --- a/docs/changelog/98996.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 98996 -summary: Reintroduce `sparse_vector` mapping -area: Mapping -type: enhancement -issues: [] diff --git a/docs/changelog/99054.yaml b/docs/changelog/99054.yaml deleted file mode 100644 index a9e4128e7ae97..0000000000000 --- a/docs/changelog/99054.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99054 -summary: "ESQL: Mark counter fields as unsupported" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/99058.yaml b/docs/changelog/99058.yaml deleted file mode 100644 index a112834add071..0000000000000 --- a/docs/changelog/99058.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99058 -summary: "ESQL: log query and execution time" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/99091.yaml b/docs/changelog/99091.yaml deleted file mode 100644 index 2c7be19b161ba..0000000000000 --- a/docs/changelog/99091.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99091 -summary: Add flamegraph API -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/99106.yaml b/docs/changelog/99106.yaml deleted file mode 100644 index 21cb121595d2b..0000000000000 --- a/docs/changelog/99106.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99106 -summary: "Add support for Persian language stemmer" -area: Analysis -type: feature -issues: - - 98911 diff --git a/docs/changelog/99107.yaml b/docs/changelog/99107.yaml deleted file mode 100644 index a808fb57fcf80..0000000000000 --- a/docs/changelog/99107.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99107 -summary: Wait to gracefully stop deployments until alternative allocation exists -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/99117.yaml b/docs/changelog/99117.yaml deleted file mode 100644 index 491692f232081..0000000000000 --- a/docs/changelog/99117.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99117 -summary: Do not report failure after connections are made -area: Network -type: bug -issues: [] diff --git a/docs/changelog/99163.yaml b/docs/changelog/99163.yaml deleted file mode 100644 index f7a44c7f24869..0000000000000 --- a/docs/changelog/99163.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99163 -summary: Use `NamedWritable` to enable `GeoBoundingBox` serialisation -area: Geo -type: bug -issues: - - 99089 diff --git a/docs/changelog/99188.yaml b/docs/changelog/99188.yaml deleted file mode 100644 index c22e3ba4b36e5..0000000000000 --- a/docs/changelog/99188.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99188 -summary: "ESQL: skip synthetic attributes when planning the physical fragment" -area: ES|QL -type: bug -issues: - - 99170 diff --git a/docs/changelog/99193.yaml b/docs/changelog/99193.yaml deleted file mode 100644 index 9db646dc80435..0000000000000 --- a/docs/changelog/99193.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99193 -summary: Wait for cluster state in recovery -area: Recovery -type: enhancement -issues: [] diff --git a/docs/changelog/99215.yaml b/docs/changelog/99215.yaml deleted file mode 100644 index 99227839b491e..0000000000000 --- a/docs/changelog/99215.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99215 -summary: Skip `DisiPriorityQueue` on single filter agg -area: Aggregations -type: enhancement -issues: - - 99202 diff --git a/docs/changelog/99219.yaml b/docs/changelog/99219.yaml deleted file mode 100644 index 811e2df5f83d0..0000000000000 --- a/docs/changelog/99219.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99219 -summary: Reduce copying when creating scroll/PIT ids -area: Search -type: enhancement -issues: [] diff --git a/docs/changelog/99222.yaml b/docs/changelog/99222.yaml deleted file mode 100644 index 025c5e01d2a53..0000000000000 --- a/docs/changelog/99222.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99222 -summary: Fork response-sending in `OpenPointInTimeAction` -area: Search -type: bug -issues: [] diff --git a/docs/changelog/99223.yaml b/docs/changelog/99223.yaml deleted file mode 100644 index 914441931033b..0000000000000 --- a/docs/changelog/99223.yaml +++ /dev/null @@ -1,11 +0,0 @@ -pr: 99223 -summary: Remove `transport_versions` from cluster state API -area: Infra/Core -type: breaking -issues: [] -breaking: - title: Remove `transport_versions` from cluster state API - area: REST API - details: The `transport_versions` subobject of the response to `GET _cluster/state` has been replaced by the `nodes_versions` subobject. - impact: If needed, retrieve the per-node transport versions from the `nodes_versions` subobject. - notable: false diff --git a/docs/changelog/99224.yaml b/docs/changelog/99224.yaml deleted file mode 100644 index cde4084ab0e84..0000000000000 --- a/docs/changelog/99224.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99224 -summary: Add new _inference API -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/99278.yaml b/docs/changelog/99278.yaml deleted file mode 100644 index f2788a00e6369..0000000000000 --- a/docs/changelog/99278.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99278 -summary: Support rotatating the JWT shared secret -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/99286.yaml b/docs/changelog/99286.yaml deleted file mode 100644 index 1b37416d51ba6..0000000000000 --- a/docs/changelog/99286.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99286 -summary: "ESQL: Log execution time consistently" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/99300.yaml b/docs/changelog/99300.yaml deleted file mode 100644 index 508001b98f29e..0000000000000 --- a/docs/changelog/99300.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99300 -summary: Change `GetFromTranslog` to indices action -area: CRUD -type: bug -issues: [] diff --git a/docs/changelog/99303.yaml b/docs/changelog/99303.yaml deleted file mode 100644 index 479c3a3e280c7..0000000000000 --- a/docs/changelog/99303.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99303 -summary: Use DEBUG log level to report ESQL execution steps -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/99310.yaml b/docs/changelog/99310.yaml deleted file mode 100644 index 8b595fe93fd33..0000000000000 --- a/docs/changelog/99310.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99310 -summary: "ESQL: \"params\" correctly parses the values including an optional \"type\"" -area: ES|QL -type: bug -issues: - - 99294 diff --git a/docs/changelog/99316.yaml b/docs/changelog/99316.yaml deleted file mode 100644 index 78857b433b385..0000000000000 --- a/docs/changelog/99316.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99316 -summary: "ESQL: Compact topn" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/99346.yaml b/docs/changelog/99346.yaml deleted file mode 100644 index fc6fe02e6bf14..0000000000000 --- a/docs/changelog/99346.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99346 -summary: Automatically disable `ignore_malformed` on datastream `@timestamp` fields -area: Mapping -type: bug -issues: [] diff --git a/docs/changelog/99382.yaml b/docs/changelog/99382.yaml deleted file mode 100644 index 5f5eb932ed458..0000000000000 --- a/docs/changelog/99382.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99382 -summary: "ESQL: create a Vector when needed for IN" -area: ES|QL -type: bug -issues: - - 99347 diff --git a/docs/changelog/99417.yaml b/docs/changelog/99417.yaml deleted file mode 100644 index 8c88a5a548dff..0000000000000 --- a/docs/changelog/99417.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99417 -summary: Disable `FilterByFilterAggregator` through `ClusterSettings` -area: Aggregations -type: enhancement -issues: - - 99335 diff --git a/docs/changelog/99432.yaml b/docs/changelog/99432.yaml deleted file mode 100644 index df4c5a7f78199..0000000000000 --- a/docs/changelog/99432.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99432 -summary: "ESQL: Enable arithmetics for durations and periods" -area: ES|QL -type: enhancement -issues: [99293] diff --git a/docs/changelog/99470.yaml b/docs/changelog/99470.yaml deleted file mode 100644 index 3e784595cc6ac..0000000000000 --- a/docs/changelog/99470.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99470 -summary: "ESQL: Improve log messages" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/99474.yaml b/docs/changelog/99474.yaml deleted file mode 100644 index ea23481069833..0000000000000 --- a/docs/changelog/99474.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99474 -summary: Add `java.net.NetPermission` to APM module's permissions -area: Infra/Core -type: bug -issues: [] diff --git a/docs/changelog/99515.yaml b/docs/changelog/99515.yaml deleted file mode 100644 index 7de237531a506..0000000000000 --- a/docs/changelog/99515.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99515 -summary: Add `IndexVersion` to node info -area: Infra/REST API -type: enhancement -issues: [] diff --git a/docs/changelog/99527.yaml b/docs/changelog/99527.yaml deleted file mode 100644 index 19eef621fa500..0000000000000 --- a/docs/changelog/99527.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99445 -summary: Add new max_inner_product vector similarity function -area: Vector Search -type: enhancement -issues: [] diff --git a/docs/changelog/99532.yaml b/docs/changelog/99532.yaml deleted file mode 100644 index 859ba963600a8..0000000000000 --- a/docs/changelog/99532.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99532 -summary: Adds `nested` support for indexed `dense_vector` fields -area: Vector Search -type: enhancement -issues: [] diff --git a/docs/changelog/99555.yaml b/docs/changelog/99555.yaml deleted file mode 100644 index 5e53e8782e08c..0000000000000 --- a/docs/changelog/99555.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99555 -summary: Use mappings version to retrieve system index mappings at creation time -area: Infra/Core -type: enhancement -issues: [] diff --git a/docs/changelog/99566.yaml b/docs/changelog/99566.yaml deleted file mode 100644 index caad871bf58ed..0000000000000 --- a/docs/changelog/99566.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99566 -summary: Add additional counters to `_clusters` response for all Cluster search states -area: Search -type: enhancement -issues: - - 98927 diff --git a/docs/changelog/99567.yaml b/docs/changelog/99567.yaml deleted file mode 100644 index aea65e55b6ee2..0000000000000 --- a/docs/changelog/99567.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99567 -summary: Make tsdb settings public in Serverless -area: TSDB -type: bug -issues: - - 99563 diff --git a/docs/changelog/99584.yaml b/docs/changelog/99584.yaml deleted file mode 100644 index 229e3d8024506..0000000000000 --- a/docs/changelog/99584.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99584 -summary: Adding an option for trained models to be platform specific -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/99588.yaml b/docs/changelog/99588.yaml deleted file mode 100644 index 7cbb53376fdf0..0000000000000 --- a/docs/changelog/99588.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99588 -summary: Make ESQL more resilient to non-indexed fields -area: ES|QL -type: bug -issues: - - 99506 diff --git a/docs/changelog/99601.yaml b/docs/changelog/99601.yaml deleted file mode 100644 index 9deba859a5cef..0000000000000 --- a/docs/changelog/99601.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99601 -summary: "ESQL: continue resolving attributes for Eval" -area: ES|QL -type: bug -issues: - - 99576 diff --git a/docs/changelog/99627.yaml b/docs/changelog/99627.yaml deleted file mode 100644 index 84abdf6418dc2..0000000000000 --- a/docs/changelog/99627.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99627 -summary: Fix thread context in `getRepositoryData` -area: Snapshot/Restore -type: bug -issues: [] diff --git a/docs/changelog/99631.yaml b/docs/changelog/99631.yaml deleted file mode 100644 index d9174de76f1ea..0000000000000 --- a/docs/changelog/99631.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99631 -summary: Add component info versions to node info in a pluggable way -area: Infra/REST API -type: enhancement -issues: [] diff --git a/docs/changelog/99641.yaml b/docs/changelog/99641.yaml deleted file mode 100644 index c74f7380bd93a..0000000000000 --- a/docs/changelog/99641.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99641 -summary: Chunk the cluster allocation explain response -area: Network -type: enhancement -issues: [97803] diff --git a/docs/changelog/99644.yaml b/docs/changelog/99644.yaml deleted file mode 100644 index 10c10448c074c..0000000000000 --- a/docs/changelog/99644.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99644 -summary: Add links to docs from failing bootstrap checks -area: Infra/Node Lifecycle -type: enhancement -issues: [99614] - diff --git a/docs/changelog/99655.yaml b/docs/changelog/99655.yaml deleted file mode 100644 index 3d1e76ec47aa3..0000000000000 --- a/docs/changelog/99655.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99655 -summary: "[Profiling] Allow to wait until resources created" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/99682.yaml b/docs/changelog/99682.yaml deleted file mode 100644 index 48e99a5145674..0000000000000 --- a/docs/changelog/99682.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99682 -summary: Increase the max vector dims to 4096 -area: Vector Search -type: enhancement -issues: [] diff --git a/docs/changelog/99685.yaml b/docs/changelog/99685.yaml deleted file mode 100644 index 43dac2abbb312..0000000000000 --- a/docs/changelog/99685.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99685 -summary: Fix `advanceExact` for doc values from sources -area: Search -type: bug -issues: [] diff --git a/docs/changelog/99694.yaml b/docs/changelog/99694.yaml deleted file mode 100644 index a449ecb2ae378..0000000000000 --- a/docs/changelog/99694.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99694 -summary: Remove shard data files when they fail to write for snapshot -area: Snapshot/Restore -type: enhancement -issues: [] diff --git a/docs/changelog/99695.yaml b/docs/changelog/99695.yaml deleted file mode 100644 index 6dc4037a57763..0000000000000 --- a/docs/changelog/99695.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99695 -summary: "ESQL: Better management of not stored TEXT fiels with synthetic source" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/99711.yaml b/docs/changelog/99711.yaml deleted file mode 100644 index 34731a52818f0..0000000000000 --- a/docs/changelog/99711.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99711 -summary: "ESQL: Date math for negatives" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/99712.yaml b/docs/changelog/99712.yaml deleted file mode 100644 index c5fa1ac1e64ec..0000000000000 --- a/docs/changelog/99712.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99712 -summary: Make downsample target index replicas configurable -area: Downsampling -type: bug -issues: [] diff --git a/docs/changelog/99717.yaml b/docs/changelog/99717.yaml deleted file mode 100644 index db48c69ed68a2..0000000000000 --- a/docs/changelog/99717.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99717 -summary: Treating watcher webhook response header names as case-insensitive -area: Watcher -type: bug -issues: [] diff --git a/docs/changelog/99726.yaml b/docs/changelog/99726.yaml deleted file mode 100644 index 23350fdb85bd0..0000000000000 --- a/docs/changelog/99726.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99726 -summary: "ESQL: Account for an exception being thrown when building a `BytesRefArrayBlock`" -area: ES|QL -type: bug -issues: - - 99472 diff --git a/docs/changelog/99736.yaml b/docs/changelog/99736.yaml deleted file mode 100644 index fbf177ea152a8..0000000000000 --- a/docs/changelog/99736.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99736 -summary: "ESQL: enhance SHOW FUNCTIONS command" -area: ES|QL -type: enhancement -issues: - - 99507 diff --git a/docs/changelog/99746.yaml b/docs/changelog/99746.yaml deleted file mode 100644 index c4cdbc00f82c1..0000000000000 --- a/docs/changelog/99746.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99746 -summary: "ESQL: Log start and end of queries" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/99775.yaml b/docs/changelog/99775.yaml deleted file mode 100644 index 0c0dbdb1fce87..0000000000000 --- a/docs/changelog/99775.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99775 -summary: Adding support for exist queries to `sparse_vector` fields -area: Search -type: enhancement -issues: - - 99319 diff --git a/docs/changelog/99796.yaml b/docs/changelog/99796.yaml deleted file mode 100644 index cad10564ed294..0000000000000 --- a/docs/changelog/99796.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99796 -summary: Support runtime fields in synthetic source -area: Aggregations -type: bug -issues: - - 98287 diff --git a/docs/changelog/99797.yaml b/docs/changelog/99797.yaml deleted file mode 100644 index e46d4501291b5..0000000000000 --- a/docs/changelog/99797.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99797 -summary: Wait for cluster to recover before resolving index template -area: CRUD -type: bug -issues: [] diff --git a/docs/changelog/99798.yaml b/docs/changelog/99798.yaml deleted file mode 100644 index bd8b9da71541d..0000000000000 --- a/docs/changelog/99798.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 99798 -summary: Chunk `SingleNodeShutdownStatus` and `ShutdownShardMigrationStatus` (and - related action) response -area: Infra/Node Lifecycle -type: enhancement -issues: - - 99678 diff --git a/docs/changelog/99804.yaml b/docs/changelog/99804.yaml deleted file mode 100644 index b4c226217e352..0000000000000 --- a/docs/changelog/99804.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99804 -summary: Correctly handle `ScriptScoreQuery` in plain highlighter -area: Highlighting -type: bug -issues: - - 99700 diff --git a/docs/changelog/99816.yaml b/docs/changelog/99816.yaml deleted file mode 100644 index 4caf8a36f54b4..0000000000000 --- a/docs/changelog/99816.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99816 -summary: "ESQL: Lower the implicit limit, if none is user-provided" -area: ES|QL -type: enhancement -issues: - - 99458 diff --git a/docs/changelog/99827.yaml b/docs/changelog/99827.yaml deleted file mode 100644 index 3e6690a8e9e68..0000000000000 --- a/docs/changelog/99827.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99827 -summary: "ESQL: Fix NPE when aggregating literals" -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/99832.yaml b/docs/changelog/99832.yaml deleted file mode 100644 index 9bd83591ba920..0000000000000 --- a/docs/changelog/99832.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99832 -summary: APM Metering API -area: Infra/Core -type: enhancement -issues: [] diff --git a/docs/changelog/99873.yaml b/docs/changelog/99873.yaml deleted file mode 100644 index d726ba00a1558..0000000000000 --- a/docs/changelog/99873.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99873 -summary: "[Profiling] Tighten resource creation check" -area: Application -type: bug -issues: [] diff --git a/docs/changelog/99874.yaml b/docs/changelog/99874.yaml deleted file mode 100644 index d23fc1ea6edde..0000000000000 --- a/docs/changelog/99874.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99874 -summary: "ESQL: Use exact attributes for data source extraction" -area: ES|QL -type: bug -issues: - - 99183 diff --git a/docs/changelog/99909.yaml b/docs/changelog/99909.yaml deleted file mode 100644 index 2051a30e4efa1..0000000000000 --- a/docs/changelog/99909.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99909 -summary: "[Profiling] Allow to customize the ILM policy" -area: Application -type: enhancement -issues: [] diff --git a/docs/changelog/99912.yaml b/docs/changelog/99912.yaml deleted file mode 100644 index 06f0f9baa9661..0000000000000 --- a/docs/changelog/99912.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99912 -summary: Represent histogram value count as long -area: Aggregations -type: enhancement -issues: - - 99820 diff --git a/docs/changelog/99938.yaml b/docs/changelog/99938.yaml deleted file mode 100644 index 4349b73516cae..0000000000000 --- a/docs/changelog/99938.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99938 -summary: "Prune unnecessary information from TransportNodesInfoAction.NodeInfoRequest" -area: Stats -type: enhancement -issues: [99744] diff --git a/docs/changelog/99947.yaml b/docs/changelog/99947.yaml deleted file mode 100644 index 61996c8fde92b..0000000000000 --- a/docs/changelog/99947.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99947 -summary: GET `_data_stream` displays both ILM and DSL information -area: Data streams -type: feature -issues: [] diff --git a/docs/changelog/99956.yaml b/docs/changelog/99956.yaml deleted file mode 100644 index 04646a98898a3..0000000000000 --- a/docs/changelog/99956.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 99956 -summary: "ESQL: Serialize the source in expressions" -area: ES|QL -type: enhancement -issues: [] diff --git a/docs/changelog/99995.yaml b/docs/changelog/99995.yaml deleted file mode 100644 index d67cbdaec1f37..0000000000000 --- a/docs/changelog/99995.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 99995 -summary: When a primary is inactive but this is considered expected, the same applies for the replica of this shard. -area: Health -type: enhancement -issues: - - 99951 diff --git a/docs/plugins/development/creating-stable-plugins.asciidoc b/docs/plugins/development/creating-stable-plugins.asciidoc index a8efc86c5beac..c9a8a1f6c7e2a 100644 --- a/docs/plugins/development/creating-stable-plugins.asciidoc +++ b/docs/plugins/development/creating-stable-plugins.asciidoc @@ -59,7 +59,7 @@ for the plugin. If you need other resources, package them into a resources JAR. [discrete] ==== Development process -Elastic provides a Grade plugin, `elasticsearch.stable-esplugin`, that makes it +Elastic provides a Gradle plugin, `elasticsearch.stable-esplugin`, that makes it easier to develop and package stable plugins. The steps in this section assume you use this plugin. However, you don't need Gradle to create plugins. @@ -128,4 +128,4 @@ extend `ESClientYamlSuiteTestCase`. [[plugin-descriptor-file-stable]] ==== The plugin descriptor file for stable plugins -include::plugin-descriptor-file.asciidoc[] \ No newline at end of file +include::plugin-descriptor-file.asciidoc[] diff --git a/docs/reference/esql/esql-examples.asciidoc b/docs/reference/esql/esql-examples.asciidoc index 569dcf1172b38..817ec4f7b6f24 100644 --- a/docs/reference/esql/esql-examples.asciidoc +++ b/docs/reference/esql/esql-examples.asciidoc @@ -13,11 +13,11 @@ ---- FROM logs-* | WHERE event.code IS NOT NULL -| STATS event_code_count = count(event.code) by event.code,host.name -| ENRICH win_events on event.code with event_description +| STATS event_code_count = COUNT(event.code) BY event.code,host.name +| ENRICH win_events ON event.code WITH event_description | WHERE event_description IS NOT NULL and host.name IS NOT NULL -| RENAME event_description as event.description -| SORT event_code_count desc +| RENAME event_description AS event.description +| SORT event_code_count DESC | KEEP event_code_count,event.code,host.name,event.description ---- @@ -40,7 +40,7 @@ FROM logs-endpoint | WHERE process.name == "curl.exe" | STATS bytes = SUM(destination.bytes) BY destination.address | EVAL kb = bytes/1024 -| SORT kb desc +| SORT kb DESC | LIMIT 10 | KEEP kb,destination.address ---- @@ -60,7 +60,7 @@ FROM logs-endpoint ---- FROM logs-* | GROK dns.question.name "%{DATA}\\.%{GREEDYDATA:dns.question.registered_domain:string}" -| STATS unique_queries = count_distinct(dns.question.name) by dns.question.registered_domain, process.name +| STATS unique_queries = COUNT_DISTINCT(dns.question.name) BY dns.question.registered_domain, process.name | WHERE unique_queries > 10 | SORT unique_queries DESC | RENAME unique_queries AS `Unique Queries`, dns.question.registered_domain AS `Registered Domain`, process.name AS `Process` @@ -85,7 +85,7 @@ FROM logs-* | ENRICH ldap_lookup_new ON user.name | WHERE group.name IS NOT NULL | EVAL follow_up = CASE(destcount >= 100, "true","false") -| SORT destcount desc +| SORT destcount DESC | KEEP destcount, host.name, user.name, group.name, follow_up ---- diff --git a/docs/reference/esql/esql-limitations.asciidoc b/docs/reference/esql/esql-limitations.asciidoc index 303f9a337b6c4..85f107feeb8fd 100644 --- a/docs/reference/esql/esql-limitations.asciidoc +++ b/docs/reference/esql/esql-limitations.asciidoc @@ -10,20 +10,8 @@ === Result set size limit By default, an {esql} query returns up to 500 rows. You can increase the number -of rows up to 10,000 using the <> command. Queries do not return -more than 10,000 rows, regardless of the `LIMIT` command's value. - -This limit only applies to the number of rows that are retrieved by the query -and displayed in Discover. Queries and aggregations run on the full data set. - -To overcome this limitation: - -* Reduce the result set size by modifying the query to only return relevant -data. Use <> to select a smaller subset of the data. -* Shift any post-query processing to the query itself. You can use the {esql} -<> command to aggregate data in the query. -* Increase the limit with the `esql.query.result_truncation_max_size` static -cluster setting. +of rows up to 10,000 using the <> command. +include::processing-commands/limit.asciidoc[tag=limitation] [discrete] [[esql-supported-types]] diff --git a/docs/reference/esql/esql-security-solution.asciidoc b/docs/reference/esql/esql-security-solution.asciidoc new file mode 100644 index 0000000000000..45e8e44e44bdd --- /dev/null +++ b/docs/reference/esql/esql-security-solution.asciidoc @@ -0,0 +1,41 @@ +[[esql-elastic-security]] +=== Using {esql} in {elastic-sec} + +++++ +Using {esql} in {elastic-sec} +++++ + +You can use {esql} in {elastic-sec} to investigate events in Timeline and create +detection rules. Use the Elastic AI Assistant to build {esql} queries, or answer +questions about the {esql} query language. + +[discrete] +[[esql-elastic-security-timeline]] +=== Use {esql} to investigate events in Timeline + +You can use {esql} in Timeline to filter, transform, and analyze event data +stored in {es}. To start using {esql}, open the the **{esql}** tab. To learn +more, refer to {security-guide}/timelines-ui.html#esql-in-timeline[Investigate +events in Timeline]. + +[discrete] +[[esql-elastic-security-detection-rules]] +=== Use {esql} to create detection rules + +Use the {esql} rule type to create detection rules using {esql} queries. The +{esql} rule type supports aggregating and non-aggregating queries. To learn +more, refer to {security-guide}/rules-ui-create.html#create-esql-rule[Create an +{esql} rule]. + +[discrete] +[[esql-elastic-security-ai-assistant]] +=== Elastic AI Assistant + +Use the Elastic AI Assistant to build {esql} queries, or answer questions about +the {esql} query language. To learn more, refer to +{security-guide}/security-assistant.html[AI Assistant]. + +NOTE: For AI Assistant to answer questions about {esql} and write {esql} +queries, you need to +{security-guide}/security-assistant.html#set-up-ai-assistant[enable knowledge +base]. \ No newline at end of file diff --git a/docs/reference/esql/esql-using.asciidoc b/docs/reference/esql/esql-using.asciidoc index f586f3a28de5c..dbab521ead4d1 100644 --- a/docs/reference/esql/esql-using.asciidoc +++ b/docs/reference/esql/esql-using.asciidoc @@ -6,11 +6,16 @@ Information about using the <>. <>:: Using {esql} in {kib} to query and aggregate your data, create visualizations, -and set up alerts. +and set up alerts. + +<>:: +Using {esql} in {elastic-sec} to investigate events in Timeline and create +detection rules. <>:: Using the <> to list and cancel {esql} queries. include::esql-rest.asciidoc[] include::esql-kibana.asciidoc[] +include::esql-security-solution.asciidoc[] include::task-management.asciidoc[] \ No newline at end of file diff --git a/docs/reference/esql/functions/case.asciidoc b/docs/reference/esql/functions/case.asciidoc index b243adf875cb4..73cefba12dfa5 100644 --- a/docs/reference/esql/functions/case.asciidoc +++ b/docs/reference/esql/functions/case.asciidoc @@ -4,7 +4,7 @@ *Syntax* -[source,txt] +[source,esql] ---- CASE(condition1, value1[, ..., conditionN, valueN][, default_value]) ---- diff --git a/docs/reference/esql/functions/date_parse.asciidoc b/docs/reference/esql/functions/date_parse.asciidoc index c74656ff1dbd7..9580ae238b663 100644 --- a/docs/reference/esql/functions/date_parse.asciidoc +++ b/docs/reference/esql/functions/date_parse.asciidoc @@ -4,7 +4,7 @@ *Syntax* -[source,txt] +[source,esql] ---- DATE_PARSE([format,] date_string) ---- diff --git a/docs/reference/esql/functions/date_trunc.asciidoc b/docs/reference/esql/functions/date_trunc.asciidoc index cacfefe73d0fd..ad0e1eb1170b4 100644 --- a/docs/reference/esql/functions/date_trunc.asciidoc +++ b/docs/reference/esql/functions/date_trunc.asciidoc @@ -8,6 +8,6 @@ Rounds down a date to the closest interval. Intervals can be expressed using the ---- FROM employees | EVAL year_hired = DATE_TRUNC(1 year, hire_date) -| STATS count(emp_no) BY year_hired +| STATS COUNT(emp_no) BY year_hired | SORT year_hired ---- diff --git a/docs/reference/esql/index.asciidoc b/docs/reference/esql/index.asciidoc index 2946f4e61d629..dcbe426b1bcac 100644 --- a/docs/reference/esql/index.asciidoc +++ b/docs/reference/esql/index.asciidoc @@ -55,14 +55,14 @@ fields>> and <>. And guidance for GROK>> and <>. <>:: -An overview of using the <>, <>, and -<>. +An overview of using the <>, <>, +<>, and <>. <>:: The current limitations of {esql}. <>:: -A few examples of what you can with {esql}. +A few examples of what you can do with {esql}. include::esql-get-started.asciidoc[] diff --git a/docs/reference/esql/processing-commands/dissect.asciidoc b/docs/reference/esql/processing-commands/dissect.asciidoc index eca10c201c968..c48b72af0de7e 100644 --- a/docs/reference/esql/processing-commands/dissect.asciidoc +++ b/docs/reference/esql/processing-commands/dissect.asciidoc @@ -4,9 +4,9 @@ **Syntax** -[source,txt] +[source,esql] ---- -DISSECT input "pattern" [ append_separator=""] +DISSECT input "pattern" [APPEND_SEPARATOR=""] ---- *Parameters* @@ -16,9 +16,9 @@ The column that contains the string you want to structure. If the column has multiple values, `DISSECT` will process each value. `pattern`:: -A dissect pattern. +A <>. -`append_separator=""`:: +``:: A string used as the separator between appended values, when using the <>. *Description* @@ -29,7 +29,7 @@ delimiter-based pattern, and extracts the specified keys as columns. Refer to <> for the syntax of dissect patterns. -*Example* +*Examples* // tag::examples[] The following example parses a string that contains a timestamp, some text, and diff --git a/docs/reference/esql/processing-commands/drop.asciidoc b/docs/reference/esql/processing-commands/drop.asciidoc index 50e3b27fb1b28..4787c5f137314 100644 --- a/docs/reference/esql/processing-commands/drop.asciidoc +++ b/docs/reference/esql/processing-commands/drop.asciidoc @@ -2,7 +2,23 @@ [[esql-drop]] === `DROP` -Use `DROP` to remove columns: +**Syntax** + +[source,esql] +---- +DROP columns +---- + +*Parameters* + +`columns`:: +A comma-separated list of columns to remove. Supports wildcards. + +*Description* + +The `DROP` processing command removes one or more columns. + +*Examples* [source,esql] ---- diff --git a/docs/reference/esql/processing-commands/enrich.asciidoc b/docs/reference/esql/processing-commands/enrich.asciidoc index df402f3b1bd50..603683858b8c0 100644 --- a/docs/reference/esql/processing-commands/enrich.asciidoc +++ b/docs/reference/esql/processing-commands/enrich.asciidoc @@ -4,7 +4,7 @@ **Syntax** -[source,txt] +[source,esql] ---- ENRICH policy [ON match_field] [WITH [new_name1 = ]field1, [new_name2 = ]field2, ...] ---- @@ -15,18 +15,18 @@ ENRICH policy [ON match_field] [WITH [new_name1 = ]field1, [new_name2 = ]field2, The name of the enrich policy. You need to <> and <> the enrich policy first. -`ON match_field`:: +`match_field`:: The match field. `ENRICH` uses its value to look for records in the enrich index. If not specified, the match will be performed on the column with the same name as the `match_field` defined in the <>. -`WITH fieldX`:: +`fieldX`:: The enrich fields from the enrich index that are added to the result as new columns. If a column with the same name as the enrich field already exists, the existing column will be replaced by the new column. If not specified, each of the enrich fields defined in the policy is added -`new_nameX =`:: +`new_nameX`:: Enables you to change the name of the column that's added for each of the enrich fields. Defaults to the enrich field name. @@ -74,7 +74,7 @@ include::{esql-specs}/docs-IT_tests_only.csv-spec[tag=enrich_on-result] By default, each of the enrich fields defined in the policy is added as a column. To explicitly select the enrich fields that are added, use -`WITH , ...`: +`WITH , , ...`: [source.merge.styled,esql] ---- diff --git a/docs/reference/esql/processing-commands/eval.asciidoc b/docs/reference/esql/processing-commands/eval.asciidoc index a0a78f2a3bf97..eb69a587014ab 100644 --- a/docs/reference/esql/processing-commands/eval.asciidoc +++ b/docs/reference/esql/processing-commands/eval.asciidoc @@ -1,7 +1,30 @@ [discrete] [[esql-eval]] === `EVAL` -`EVAL` enables you to append new columns: + +**Syntax** + +[source,esql] +---- +EVAL column1 = value1[, ..., columnN = valueN] +---- + +*Parameters* + +`columnX`:: +The column name. + +`valueX`:: +The value for the column. Can be a literal, an expression, or a +<>. + +*Description* + +The `EVAL` processing command enables you to append new columns with calculated +values. `EVAL` supports various functions for calculating values. Refer to +<> for more information. + +*Examples* [source.merge.styled,esql] ---- @@ -23,8 +46,3 @@ include::{esql-specs}/docs.csv-spec[tag=evalReplace] |=== include::{esql-specs}/docs.csv-spec[tag=evalReplace-result] |=== - -[discrete] -==== Functions -`EVAL` supports various functions for calculating values. Refer to -<> for more information. diff --git a/docs/reference/esql/processing-commands/grok.asciidoc b/docs/reference/esql/processing-commands/grok.asciidoc index c95fe59f888ce..d5d58a9eaee12 100644 --- a/docs/reference/esql/processing-commands/grok.asciidoc +++ b/docs/reference/esql/processing-commands/grok.asciidoc @@ -4,7 +4,7 @@ **Syntax** -[source,txt] +[source,esql] ---- GROK input "pattern" ---- diff --git a/docs/reference/esql/processing-commands/keep.asciidoc b/docs/reference/esql/processing-commands/keep.asciidoc index 3e54e5a7d1c5c..7515583b1bfd1 100644 --- a/docs/reference/esql/processing-commands/keep.asciidoc +++ b/docs/reference/esql/processing-commands/keep.asciidoc @@ -2,11 +2,25 @@ [[esql-keep]] === `KEEP` -The `KEEP` command enables you to specify what columns are returned and the -order in which they are returned. +**Syntax** -To limit the columns that are returned, use a comma-separated list of column -names. The columns are returned in the specified order: +[source,esql] +---- +KEEP columns +---- + +*Parameters* +`columns`:: +A comma-separated list of columns to keep. Supports wildcards. + +*Description* + +The `KEEP` processing command enables you to specify what columns are returned +and the order in which they are returned. + +*Examples* + +The columns are returned in the specified order: [source.merge.styled,esql] ---- @@ -27,7 +41,7 @@ include::{esql-specs}/docs.csv-spec[tag=keepWildcard] The asterisk wildcard (`*`) by itself translates to all columns that do not match the other arguments. This query will first return all columns with a name -that starts with an h, followed by all other columns: +that starts with `h`, followed by all other columns: [source,esql] ---- diff --git a/docs/reference/esql/processing-commands/limit.asciidoc b/docs/reference/esql/processing-commands/limit.asciidoc index c02b534af59e1..5f659fc493a75 100644 --- a/docs/reference/esql/processing-commands/limit.asciidoc +++ b/docs/reference/esql/processing-commands/limit.asciidoc @@ -2,12 +2,46 @@ [[esql-limit]] === `LIMIT` -The `LIMIT` processing command enables you to limit the number of rows: +**Syntax** [source,esql] ---- -include::{esql-specs}/docs.csv-spec[tag=limit] +LIMIT max_number_of_rows ---- -If not specified, `LIMIT` defaults to `500`. A single query will not return -more than 10,000 rows, regardless of the `LIMIT` value. +*Parameters* + +`max_number_of_rows`:: +The maximum number of rows to return. + +*Description* + +The `LIMIT` processing command enables you to limit the number of rows that are +returned. +// tag::limitation[] +Queries do not return more than 10,000 rows, regardless of the `LIMIT` command's +value. + +This limit only applies to the number of rows that are retrieved by the query. +Queries and aggregations run on the full data set. + +To overcome this limitation: + +* Reduce the result set size by modifying the query to only return relevant +data. Use <> to select a smaller subset of the data. +* Shift any post-query processing to the query itself. You can use the {esql} +<> command to aggregate data in the query. + +The default and maximum limits can be changed using these dynamic cluster +settings: + +* `esql.query.result_truncation_default_size` +* `esql.query.result_truncation_max_size` +// end::limitation[] + +*Example* + +[source,esql] +---- +include::{esql-specs}/docs.csv-spec[tag=limit] +---- diff --git a/docs/reference/esql/processing-commands/mv_expand.asciidoc b/docs/reference/esql/processing-commands/mv_expand.asciidoc index d62b28aabe440..46dc4fd0a33cf 100644 --- a/docs/reference/esql/processing-commands/mv_expand.asciidoc +++ b/docs/reference/esql/processing-commands/mv_expand.asciidoc @@ -2,7 +2,24 @@ [[esql-mv_expand]] === `MV_EXPAND` -The `MV_EXPAND` processing command expands multivalued fields into one row per value, duplicating other fields: +**Syntax** + +[source,esql] +---- +MV_EXPAND column +---- + +*Parameters* + +`column`:: +The multivalued column to expand. + +*Description* + +The `MV_EXPAND` processing command expands multivalued columns into one row per +value, duplicating other columns. + +*Example* [source.merge.styled,esql] ---- diff --git a/docs/reference/esql/processing-commands/rename.asciidoc b/docs/reference/esql/processing-commands/rename.asciidoc index 1dda424317976..773fe8b640f75 100644 --- a/docs/reference/esql/processing-commands/rename.asciidoc +++ b/docs/reference/esql/processing-commands/rename.asciidoc @@ -2,22 +2,33 @@ [[esql-rename]] === `RENAME` -Use `RENAME` to rename a column using the following syntax: +**Syntax** [source,esql] ---- -RENAME AS +RENAME old_name1 AS new_name1[, ..., old_nameN AS new_nameN] ---- -For example: +*Parameters* + +`old_nameX`:: +The name of a column you want to rename. + +`new_nameX`:: +The new name of the column. + +*Description* + +The `RENAME` processing command renames one or more columns. If a column with +the new name already exists, it will be replaced by the new column. + +*Examples* [source,esql] ---- include::{esql-specs}/docs.csv-spec[tag=rename] ---- -If a column with the new name already exists, it will be replaced by the new -column. Multiple columns can be renamed with a single `RENAME` command: diff --git a/docs/reference/esql/processing-commands/sort.asciidoc b/docs/reference/esql/processing-commands/sort.asciidoc index 76a9193375932..fea7bfaf0c65f 100644 --- a/docs/reference/esql/processing-commands/sort.asciidoc +++ b/docs/reference/esql/processing-commands/sort.asciidoc @@ -1,35 +1,59 @@ [discrete] [[esql-sort]] === `SORT` -Use the `SORT` command to sort rows on one or more fields: + +**Syntax** + +[source,esql] +---- +SORT column1 [ASC/DESC][NULLS FIRST/NULLS LAST][, ..., columnN [ASC/DESC][NULLS FIRST/NULLS LAST]] +---- + +*Parameters* + +`columnX`:: +The column to sort on. + +*Description* + +The `SORT` processing command sorts a table on one or more columns. + +The default sort order is ascending. Use `ASC` or `DESC` to specify an explicit +sort order. + +Two rows with the same sort key are considered equal. You can provide additional +sort expressions to act as tie breakers. + +Sorting on multivalued columns uses the lowest value when sorting ascending and +the highest value when sorting descending. + +By default, `null` values are treated as being larger than any other value. With +an ascending sort order, `null` values are sorted last, and with a descending +sort order, `null` values are sorted first. You can change that by providing +`NULLS FIRST` or `NULLS LAST`. + +*Examples* [source,esql] ---- include::{esql-specs}/docs.csv-spec[tag=sort] ---- -The default sort order is ascending. Set an explicit sort order using `ASC` or -`DESC`: +Explicitly sorting in ascending order with `ASC`: [source,esql] ---- include::{esql-specs}/docs.csv-spec[tag=sortDesc] ---- -Two rows with the same sort key are considered equal. You can provide additional -sort expressions to act as tie breakers: +Providing additional sort expressions to act as tie breakers: [source,esql] ---- include::{esql-specs}/docs.csv-spec[tag=sortTie] ---- -[discrete] -==== `null` values -By default, `null` values are treated as being larger than any other value. With -an ascending sort order, `null` values are sorted last, and with a descending -sort order, `null` values are sorted first. You can change that by providing -`NULLS FIRST` or `NULLS LAST`: +Sorting `null` values first using `NULLS FIRST`: [source,esql] ---- diff --git a/docs/reference/esql/processing-commands/stats.asciidoc b/docs/reference/esql/processing-commands/stats.asciidoc index e0a9bbb52b03e..cbdb74d350fb1 100644 --- a/docs/reference/esql/processing-commands/stats.asciidoc +++ b/docs/reference/esql/processing-commands/stats.asciidoc @@ -1,8 +1,49 @@ [discrete] [[esql-stats-by]] === `STATS ... BY` -Use `STATS ... BY` to group rows according to a common value and calculate one -or more aggregated values over the grouped rows. + +**Syntax** + +[source,esql] +---- +STATS [column1 =] expression1[, ..., [columnN =] expressionN] [BY grouping_column1[, ..., grouping_columnN]] +---- + +*Parameters* + +`columnX`:: +The name by which the aggregated value is returned. If omitted, the name is +equal to the corresponding expression (`expressionX`). + +`expressionX`:: +An expression that computes an aggregated value. + +`grouping_columnX`:: +The column containing the values to group by. + +*Description* + +The `STATS ... BY` processing command groups rows according to a common value +and calculate one or more aggregated values over the grouped rows. If `BY` is +omitted, the output table contains exactly one row with the aggregations applied +over the entire dataset. + +The following aggregation functions are supported: + +include::../functions/aggregation-functions.asciidoc[tag=agg_list] + +NOTE: `STATS` without any groups is much much faster than adding a group. + +NOTE: Grouping on a single column is currently much more optimized than grouping + on many columns. In some tests we have seen grouping on a single `keyword` + column to be five times faster than grouping on two `keyword` columns. Do + not try to work around this by combining the two columns together with + something like <> and then grouping - that is not going to be + faster. + +*Examples* + +Calculating a statistic and grouping by the values of another column: [source.merge.styled,esql] ---- @@ -13,8 +54,8 @@ include::{esql-specs}/docs.csv-spec[tag=stats] include::{esql-specs}/docs.csv-spec[tag=stats-result] |=== -If `BY` is omitted, the output table contains exactly one row with the -aggregations applied over the entire dataset: +Omitting `BY` returns one row with the aggregations applied over the entire +dataset: [source.merge.styled,esql] ---- @@ -39,15 +80,3 @@ keyword family fields): ---- include::{esql-specs}/docs.csv-spec[tag=statsGroupByMultipleValues] ---- - -The following aggregation functions are supported: - -include::../functions/aggregation-functions.asciidoc[tag=agg_list] - -NOTE: `STATS` without any groups is much much faster than adding group. - -NOTE: Grouping on a single field is currently much more optimized than grouping - on many fields. In some tests we've seen grouping on a single `keyword` - field to be five times faster than grouping on two `keyword` fields. Don't - try to work around this combining the two fields together with something - like <> and then grouping - that's not going to be faster. diff --git a/docs/reference/esql/processing-commands/where.asciidoc b/docs/reference/esql/processing-commands/where.asciidoc index 8dd55df12b9e7..e723a977bf99c 100644 --- a/docs/reference/esql/processing-commands/where.asciidoc +++ b/docs/reference/esql/processing-commands/where.asciidoc @@ -2,8 +2,27 @@ [[esql-where]] === `WHERE` -Use `WHERE` to produce a table that contains all the rows from the input table -for which the provided condition evaluates to `true`: +**Syntax** + +[source,esql] +---- +WHERE expression +---- + +*Parameters* + +`expression`:: +A boolean expression. + +*Description* + +The `WHERE` processing command produces a table that contains all the rows from +the input table for which the provided condition evaluates to `true`. + +`WHERE` supports various <> and +<>. + +*Examples* [source,esql] ---- @@ -17,15 +36,7 @@ Which, if `still_hired` is a boolean field, can be simplified to: include::{esql-specs}/docs.csv-spec[tag=whereBoolean] ---- -[discrete] -==== Operators - -Refer to <> for an overview of the supported operators. - -[discrete] -==== Functions -`WHERE` supports various functions for calculating values. Refer to -<> for more information. +Using a function: [source,esql] ---- diff --git a/docs/reference/esql/source-commands/from.asciidoc b/docs/reference/esql/source-commands/from.asciidoc index 5718bfc27ac1c..6f54a42ddad35 100644 --- a/docs/reference/esql/source-commands/from.asciidoc +++ b/docs/reference/esql/source-commands/from.asciidoc @@ -2,10 +2,47 @@ [[esql-from]] === `FROM` -The `FROM` source command returns a table with up to 10,000 documents from a -data stream, index, or alias. Each row in the resulting table represents a -document. Each column corresponds to a field, and can be accessed by the name -of that field. +**Syntax** + +[source,esql] +---- +FROM index_pattern [METADATA fields] +---- + +*Parameters* + +`index_pattern`:: +A list of indices, data streams or aliases. Supports wildcards and date math. + +`fields`:: +A comma-separated list of <> to retrieve. + +*Description* + +The `FROM` source command returns a table with data from a data stream, index, +or alias. Each row in the resulting table represents a document. Each column +corresponds to a field, and can be accessed by the name of that field. + +[NOTE] +==== +By default, an {esql} query without an explicit <> uses an implicit +limit of 500. This applies to `FROM` too. A `FROM` command without `LIMIT`: + +[source,esql] +---- +FROM employees +---- + +is executed as: + +[source,esql] +---- +FROM employees +| LIMIT 500 +---- +==== + +*Examples* [source,esql] ---- diff --git a/docs/reference/esql/source-commands/row.asciidoc b/docs/reference/esql/source-commands/row.asciidoc index edfe5ecbf7cf3..adce844f365b8 100644 --- a/docs/reference/esql/source-commands/row.asciidoc +++ b/docs/reference/esql/source-commands/row.asciidoc @@ -2,9 +2,29 @@ [[esql-row]] === `ROW` +**Syntax** + +[source,esql] +---- +ROW column1 = value1[, ..., columnN = valueN] +---- + +*Parameters* + +`columnX`:: +The column name. + +`valueX`:: +The value for the column. Can be a literal, an expression, or a +<>. + +*Description* + The `ROW` source command produces a row with one or more columns with values that you specify. This can be useful for testing. +*Examples* + [source.merge.styled,esql] ---- include::{esql-specs}/row.csv-spec[tag=example] diff --git a/docs/reference/esql/source-commands/show.asciidoc b/docs/reference/esql/source-commands/show.asciidoc index 956baf628e9f3..ea8c83ceb772a 100644 --- a/docs/reference/esql/source-commands/show.asciidoc +++ b/docs/reference/esql/source-commands/show.asciidoc @@ -1,10 +1,35 @@ [discrete] [[esql-show]] -=== `SHOW ` +=== `SHOW` -The `SHOW ` source command returns information about the deployment and +**Syntax** + +[source,esql] +---- +SHOW item +---- + +*Parameters* + +`item`:: +Can be `INFO` or `FUNCTIONS`. + +*Description* + +The `SHOW` source command returns information about the deployment and its capabilities: * Use `SHOW INFO` to return the deployment's version, build date and hash. * Use `SHOW FUNCTIONS` to return a list of all supported functions and a synopsis of each function. + +*Examples* + +[source.merge.styled,esql] +---- +include::{esql-specs}/show.csv-spec[tag=showFunctionsFiltered] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/show.csv-spec[tag=showFunctionsFiltered-result] +|=== diff --git a/docs/reference/indices/resolve.asciidoc b/docs/reference/indices/resolve.asciidoc index 1f405a2e49a7a..c919bba5c7651 100644 --- a/docs/reference/indices/resolve.asciidoc +++ b/docs/reference/indices/resolve.asciidoc @@ -88,9 +88,11 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=allow-no-indices] + Defaults to `true`. -include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=ignore_throttled] +`ignore_throttled`:: +(Optional, Boolean) If `true`, concrete, expanded or aliased indices are +ignored when frozen. Defaults to `false`. + -Defaults to `false`. +deprecated:[7.16.0] [[resolve-index-api-example]] ==== {api-examples-title} diff --git a/docs/reference/landing-page.asciidoc b/docs/reference/landing-page.asciidoc index 1ddd0cfa28128..a53a5770fe030 100644 --- a/docs/reference/landing-page.asciidoc +++ b/docs/reference/landing-page.asciidoc @@ -105,6 +105,9 @@
  • Troubleshooting
  • +
  • + Enterprise Search server +
  • @@ -119,6 +122,12 @@
  • Adding data to Elasticsearch
  • +
  • + Connectors +
  • +
  • + Web crawler +
  • Data streams
  • @@ -145,6 +154,12 @@
  • Query data with the Query DSL, ES|QL, EQL, or SQL
  • +
  • + Search applications +
  • +
  • + Search analytics +
  • Aggregations
  • @@ -207,7 +222,7 @@
    - +

    diff --git a/docs/reference/mapping/fields/source-field.asciidoc b/docs/reference/mapping/fields/source-field.asciidoc index f905be3d452ba..ec824e421e015 100644 --- a/docs/reference/mapping/fields/source-field.asciidoc +++ b/docs/reference/mapping/fields/source-field.asciidoc @@ -43,6 +43,8 @@ available then a number of features are not supported: * The <>, <>, and <> APIs. +* In the {kib} link:{kibana-ref}/discover.html[Discover] application, field data will not be displayed. + * On the fly <>. * The ability to reindex from one Elasticsearch index to another, either diff --git a/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc index ec2ef3631f0c6..05e23d901d5d3 100644 --- a/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/put-datafeed.asciidoc @@ -66,8 +66,6 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] Defaults to `open`. include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=ignore_throttled] -+ -deprecated:[7.16.0] `ignore_unavailable`:: (Optional, Boolean) If `true`, unavailable indices (missing or closed) are diff --git a/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc b/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc index 48893f1aadb82..5e6121cd01ac9 100644 --- a/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc +++ b/docs/reference/ml/anomaly-detection/apis/update-datafeed.asciidoc @@ -55,8 +55,6 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] Defaults to `open`. include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=ignore_throttled] -+ -deprecated:[7.16.0] `ignore_unavailable`:: (Optional, Boolean) If `true`, unavailable indices (missing or closed) are diff --git a/docs/reference/rest-api/common-parms.asciidoc b/docs/reference/rest-api/common-parms.asciidoc index 41fd3eefc31f2..55f277218d210 100644 --- a/docs/reference/rest-api/common-parms.asciidoc +++ b/docs/reference/rest-api/common-parms.asciidoc @@ -438,6 +438,8 @@ tag::ignore_throttled[] `ignore_throttled`:: (Optional, Boolean) If `true`, concrete, expanded or aliased indices are ignored when frozen. Defaults to `true`. ++ +deprecated:[7.16.0] end::ignore_throttled[] tag::index-ignore-unavailable[] diff --git a/docs/reference/search/multi-search.asciidoc b/docs/reference/search/multi-search.asciidoc index e8d29e00ba486..90056d5036558 100644 --- a/docs/reference/search/multi-search.asciidoc +++ b/docs/reference/search/multi-search.asciidoc @@ -84,10 +84,7 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] + Defaults to `open`. -`ignore_throttled`:: -(Optional, Boolean) -If `true`, concrete, expanded or aliased indices are ignored when frozen. -Defaults to `true`. +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=ignore_throttled] include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable] diff --git a/docs/reference/search/search-template-api.asciidoc b/docs/reference/search/search-template-api.asciidoc index 55142b953a194..539048a324746 100644 --- a/docs/reference/search/search-template-api.asciidoc +++ b/docs/reference/search/search-template-api.asciidoc @@ -92,9 +92,7 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=expand-wildcards] (Optional, Boolean) If `true`, the response includes additional details about score computation as part of a hit. Defaults to `false`. -`ignore_throttled`:: -(Optional, Boolean) If `true`, specified concrete, expanded, or aliased indices -are not included in the response when throttled. Defaults to `true`. +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=ignore_throttled] include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable] diff --git a/docs/reference/search/search-your-data/ccs-version-compat-matrix.asciidoc b/docs/reference/search/search-your-data/ccs-version-compat-matrix.asciidoc index cae5627d65b54..8c289c27a2d31 100644 --- a/docs/reference/search/search-your-data/ccs-version-compat-matrix.asciidoc +++ b/docs/reference/search/search-your-data/ccs-version-compat-matrix.asciidoc @@ -1,20 +1,21 @@ -[cols="^,^,^,^,^,^,^,^,^,^,^,^,^,^,^"] +[cols="^,^,^,^,^,^,^,^,^,^,^,^,^,^,^,^"] |==== -| 14+^h| Remote cluster version +| 15+^h| Remote cluster version h| Local cluster version - | 6.8 | 7.1–7.16 | 7.17 | 8.0 | 8.1 | 8.2 | 8.3 | 8.4 | 8.5 |8.6 |8.7 |8.8 |8.9 |8.10 -| 6.8 | {yes-icon} | {yes-icon} | {yes-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} -| 7.1–7.16 | {yes-icon} | {yes-icon} | {yes-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} -| 7.17 | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.0 | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.1 | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.2 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.3 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon}|{yes-icon} | {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.4 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} |{yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.5 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} |{yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.6 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.7 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon}| {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.8 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon}| {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.9 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon}| {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} -| 8.10 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon}| {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} + | 6.8 | 7.1–7.16 | 7.17 | 8.0 | 8.1 | 8.2 | 8.3 | 8.4 | 8.5 |8.6 |8.7 |8.8 |8.9 |8.10 |8.11 +| 6.8 | {yes-icon} | {yes-icon} | {yes-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} +| 7.1–7.16 | {yes-icon} | {yes-icon} | {yes-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} +| 7.17 | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.0 | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.1 | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.2 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon}| {yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.3 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon}|{yes-icon} | {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.4 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} |{yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.5 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} |{yes-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.6 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon}| {yes-icon}| {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.7 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon}| {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.8 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon}| {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.9 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon}| {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.10 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon}| {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} | {yes-icon} +| 8.11 | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon}| {no-icon} | {no-icon} | {no-icon} | {no-icon} | {no-icon} | {yes-icon} | {yes-icon} |==== \ No newline at end of file diff --git a/docs/reference/search/search.asciidoc b/docs/reference/search/search.asciidoc index f953ce03ab1eb..68d286b3f267b 100644 --- a/docs/reference/search/search.asciidoc +++ b/docs/reference/search/search.asciidoc @@ -109,9 +109,7 @@ By default, you cannot page through more than 10,000 hits using the `from` and `size` parameters. To page through more hits, use the <> parameter. -`ignore_throttled`:: -(Optional, Boolean) If `true`, concrete, expanded or aliased indices will be -ignored when frozen. Defaults to `true`. +include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=ignore_throttled] include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable] diff --git a/modules/analysis-common/src/internalClusterTest/java/org/elasticsearch/analysis/common/ReloadSynonymAnalyzerIT.java b/modules/analysis-common/src/internalClusterTest/java/org/elasticsearch/analysis/common/ReloadSynonymAnalyzerIT.java index a9ffdb60419f9..f0063f663142d 100644 --- a/modules/analysis-common/src/internalClusterTest/java/org/elasticsearch/analysis/common/ReloadSynonymAnalyzerIT.java +++ b/modules/analysis-common/src/internalClusterTest/java/org/elasticsearch/analysis/common/ReloadSynonymAnalyzerIT.java @@ -17,7 +17,6 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.InternalTestCluster; import java.io.FileNotFoundException; import java.io.IOException; @@ -44,15 +43,6 @@ protected Collection> nodePlugins() { return Arrays.asList(CommonAnalysisPlugin.class); } - /** - * This test needs to write to the config directory, this is difficult in an external cluster so we overwrite this to force running with - * {@link InternalTestCluster} - */ - @Override - protected boolean ignoreExternalCluster() { - return true; - } - public void testSynonymsUpdateable() throws FileNotFoundException, IOException, InterruptedException { testSynonymsUpdate(false); } diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PredicateTokenScriptFilterTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PredicateTokenScriptFilterTests.java index 3a519f594a57f..b333c8534d19b 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PredicateTokenScriptFilterTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/PredicateTokenScriptFilterTests.java @@ -97,9 +97,6 @@ private static class MockClient extends AbstractClient { super(settings, threadPool); } - @Override - public void close() {} - @Override protected void doExecute( ActionType action, diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/ScriptedConditionTokenFilterTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/ScriptedConditionTokenFilterTests.java index 81df5836015f0..98fdb551c27f1 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/ScriptedConditionTokenFilterTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/ScriptedConditionTokenFilterTests.java @@ -97,9 +97,6 @@ private class MockClient extends AbstractClient { super(settings, threadPool); } - @Override - public void close() {} - @Override protected void doExecute( ActionType action, diff --git a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/MeterRegistryConcurrencyTests.java b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/MeterRegistryConcurrencyTests.java index d1c74681c2bd7..e8dafd996f5b0 100644 --- a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/MeterRegistryConcurrencyTests.java +++ b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/MeterRegistryConcurrencyTests.java @@ -90,7 +90,6 @@ public ObservableLongCounter buildWithCallback(Consumer assertThat(setProviderThread.getState(), equalTo(Thread.State.WAITING))); // assert that the old lockingMeter is still in place - assertBusy(() -> assertThat(meterRegistrar.getMeter(), sameInstance(lockingMeter))); + assertThat(meterRegistrar.getMeter(), sameInstance(lockingMeter)); // finish long-running registration registerLatch.countDown(); + // wait for everything to quiesce, registerLatch.countDown() doesn't ensure lock has been released + setProviderThread.join(); + registerThread.join(); // assert that a meter was overriden - assertBusy(() -> assertThat(meterRegistrar.getMeter(), sameInstance(lockingMeter))); - + assertThat(meterRegistrar.getMeter(), sameInstance(noopMeter)); } } diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudDataStreamLifecycleIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudDataStreamLifecycleIT.java index ff84501697e21..e33b1fdcfa57a 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudDataStreamLifecycleIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/CrudDataStreamLifecycleIT.java @@ -36,10 +36,6 @@ protected Collection> nodePlugins() { return List.of(DataStreamsPlugin.class, MockTransportService.TestPlugin.class); } - protected boolean ignoreExternalCluster() { - return true; - } - public void testGetLifecycle() throws Exception { DataStreamLifecycle lifecycle = randomLifecycle(); putComposableIndexTemplate("id1", null, List.of("with-lifecycle*"), null, null, lifecycle); diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java index e4f4f88254977..0d3588ba20b9a 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java @@ -90,10 +90,6 @@ protected Collection> nodePlugins() { return List.of(DataStreamsPlugin.class, MockTransportService.TestPlugin.class); } - protected boolean ignoreExternalCluster() { - return true; - } - @Override protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { Settings.Builder settings = Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings)); diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/ExplainDataStreamLifecycleIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/ExplainDataStreamLifecycleIT.java index 6ff50d88aeb05..c9968a545cb7d 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/ExplainDataStreamLifecycleIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/ExplainDataStreamLifecycleIT.java @@ -62,10 +62,6 @@ protected Collection> nodePlugins() { return List.of(DataStreamsPlugin.class, MockTransportService.TestPlugin.class); } - protected boolean ignoreExternalCluster() { - return true; - } - @Override protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { Settings.Builder settings = Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings)); diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java index bd6100c95b412..0ee168d130986 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java @@ -126,7 +126,6 @@ public class DataStreamLifecycleServiceTests extends ESTestCase { private ThreadPool threadPool; private DataStreamLifecycleService dataStreamLifecycleService; private List clientSeenRequests; - private Client client; private DoExecuteDelegate clientDelegate; private ClusterService clusterService; @@ -145,7 +144,7 @@ public void setupServices() { Clock clock = Clock.fixed(Instant.ofEpochMilli(now), ZoneId.of(randomFrom(ZoneId.getAvailableZoneIds()))); clientSeenRequests = new CopyOnWriteArrayList<>(); - client = getTransportRequestsRecordingClient(); + final Client client = getTransportRequestsRecordingClient(); AllocationService allocationService = new AllocationService( new AllocationDeciders( new HashSet<>( @@ -178,7 +177,6 @@ public void cleanup() { dataStreamLifecycleService.close(); clusterService.close(); threadPool.shutdownNow(); - client.close(); } public void testOperationsExecutedOnce() { @@ -1499,7 +1497,7 @@ private static DiscoveryNode getNode(String nodeId) { * (it does not even notify the listener), but tests can provide an implementation of clientDelegate to provide any needed behavior. */ private Client getTransportRequestsRecordingClient() { - return new NoOpClient(getTestName()) { + return new NoOpClient(threadPool) { @Override protected void doExecute( ActionType action, diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/SizedBucketAggregatorBuilder.java b/modules/health-shards-availability/src/main/java/module-info.java similarity index 52% rename from server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/SizedBucketAggregatorBuilder.java rename to modules/health-shards-availability/src/main/java/module-info.java index 55729094ace57..4ee4cafeb5f96 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/SizedBucketAggregatorBuilder.java +++ b/modules/health-shards-availability/src/main/java/module-info.java @@ -6,13 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.search.aggregations.bucket.histogram; - -import java.util.concurrent.TimeUnit; - -/** - * An aggregator capable of reporting bucket sizes in milliseconds. Used by RateAggregator for calendar-based buckets. - */ -public interface SizedBucketAggregatorBuilder { - double calendarDivider(TimeUnit timeUnit); +module org.elasticsearch.shardhealth { + requires org.elasticsearch.server; + requires org.apache.lucene.core; } diff --git a/modules/health-shards-availability/src/main/java/org/elasticsearch/health/plugin/ShardsAvailabilityPlugin.java b/modules/health-shards-availability/src/main/java/org/elasticsearch/health/plugin/ShardsAvailabilityPlugin.java index aeb45424ebc58..eac72c36bef18 100644 --- a/modules/health-shards-availability/src/main/java/org/elasticsearch/health/plugin/ShardsAvailabilityPlugin.java +++ b/modules/health-shards-availability/src/main/java/org/elasticsearch/health/plugin/ShardsAvailabilityPlugin.java @@ -9,7 +9,7 @@ package org.elasticsearch.health.plugin; import org.apache.lucene.util.SetOnce; -import org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService; +import org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService; import org.elasticsearch.health.HealthIndicatorService; import org.elasticsearch.plugins.HealthPlugin; import org.elasticsearch.plugins.Plugin; diff --git a/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/ingest/common/IngestRestartIT.java b/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/ingest/common/IngestRestartIT.java index 96ca77a5f65f9..5709fbd9d8bfc 100644 --- a/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/ingest/common/IngestRestartIT.java +++ b/modules/ingest-common/src/internalClusterTest/java/org/elasticsearch/ingest/common/IngestRestartIT.java @@ -50,11 +50,6 @@ protected Collection> nodePlugins() { return Arrays.asList(IngestCommonPlugin.class, CustomScriptPlugin.class); } - @Override - protected boolean ignoreExternalCluster() { - return true; - } - public static class CustomScriptPlugin extends MockScriptPlugin { @Override protected Map, Object>> pluginScripts() { diff --git a/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/ParentChildTestCase.java b/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/ParentChildTestCase.java index b4846a1c003a6..a67ebd4cbca22 100644 --- a/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/ParentChildTestCase.java +++ b/modules/parent-join/src/internalClusterTest/java/org/elasticsearch/join/query/ParentChildTestCase.java @@ -29,11 +29,6 @@ @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE) public abstract class ParentChildTestCase extends ESIntegTestCase { - @Override - protected boolean ignoreExternalCluster() { - return true; - } - @Override protected Collection> nodePlugins() { return Arrays.asList(InternalSettingsPlugin.class, ParentJoinPlugin.class); diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java index 9947d8a727d28..50284008eef48 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java @@ -56,11 +56,6 @@ public class ReindexDocumentationIT extends ESIntegTestCase { private static final Semaphore ALLOWED_OPERATIONS = new Semaphore(0); private static final String INDEX_NAME = "source_index"; - @Override - protected boolean ignoreExternalCluster() { - return true; - } - @Override protected Collection> nodePlugins() { return Arrays.asList(ReindexPlugin.class, ReindexCancellationPlugin.class); diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java index 8878e988eb4fb..3c5a3eb2e40f9 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java @@ -126,6 +126,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase { private PlainActionFuture listener; private String scrollId; private ThreadPool threadPool; + private ThreadPool clientThreadPool; private TaskManager taskManager; private BulkByScrollTask testTask; private WorkerBulkByScrollTaskState worker; @@ -154,16 +155,18 @@ public void setupForTest() { } private void setupClient(ThreadPool threadPool) { - if (client != null) { - client.close(); + if (clientThreadPool != null) { + terminate(clientThreadPool); } + clientThreadPool = threadPool; client = new MyMockClient(new NoOpClient(threadPool)); client.threadPool().getThreadContext().putHeader(expectedHeaders); } @After public void tearDownAndVerifyCommonStuff() throws Exception { - client.close(); + terminate(clientThreadPool); + clientThreadPool = null; terminate(threadPool); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java index 58bda3229cb42..c7c441e3eaff9 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java @@ -267,9 +267,6 @@ public void val ((ExecuteRequest) executeRequest).validateRequest(action, validator); } - @Override - public void close() {} - public synchronized void awaitOperation() throws InterruptedException { if (executeRequest == null) { wait(10000); diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java index 291cf84019cd1..25bba12db6952 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java @@ -9,6 +9,7 @@ package org.elasticsearch.repositories.s3; import com.amazonaws.ClientConfiguration; +import com.amazonaws.SDKGlobalConfiguration; import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.AWSCredentialsProviderChain; @@ -320,6 +321,7 @@ static class CustomWebIdentityTokenCredentialsProvider implements AWSCredentials private STSAssumeRoleWithWebIdentitySessionCredentialsProvider credentialsProvider; private AWSSecurityTokenService stsClient; + private String stsRegion; CustomWebIdentityTokenCredentialsProvider( Environment environment, @@ -361,10 +363,24 @@ static class CustomWebIdentityTokenCredentialsProvider implements AWSCredentials ); AWSSecurityTokenServiceClientBuilder stsClientBuilder = AWSSecurityTokenServiceClient.builder(); - // Custom system property used for specifying a mocked version of the STS for testing - String customStsEndpoint = jvmEnvironment.getProperty("com.amazonaws.sdk.stsMetadataServiceEndpointOverride", STS_HOSTNAME); - // Set the region explicitly via the endpoint URL, so the AWS SDK doesn't make any guesses internally. - stsClientBuilder.withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(customStsEndpoint, null)); + // Check if we need to use regional STS endpoints + // https://docs.aws.amazon.com/sdkref/latest/guide/feature-sts-regionalized-endpoints.html + if ("regional".equalsIgnoreCase(systemEnvironment.getEnv("AWS_STS_REGIONAL_ENDPOINTS"))) { + // AWS_REGION should be injected by the EKS pod identity webhook: + // https://github.com/aws/amazon-eks-pod-identity-webhook/pull/41 + stsRegion = systemEnvironment.getEnv(SDKGlobalConfiguration.AWS_REGION_ENV_VAR); + if (stsRegion != null) { + stsClientBuilder.withRegion(stsRegion); + } else { + LOGGER.warn("Unable to use regional STS endpoints because the AWS_REGION environment variable is not set"); + } + } + if (stsRegion == null) { + // Custom system property used for specifying a mocked version of the STS for testing + String customStsEndpoint = jvmEnvironment.getProperty("com.amazonaws.sdk.stsMetadataServiceEndpointOverride", STS_HOSTNAME); + // Set the region explicitly via the endpoint URL, so the AWS SDK doesn't make any guesses internally. + stsClientBuilder.withEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(customStsEndpoint, null)); + } stsClientBuilder.withCredentials(new AWSStaticCredentialsProvider(new AnonymousAWSCredentials())); stsClient = SocketAccess.doPrivileged(stsClientBuilder::build); try { @@ -383,6 +399,10 @@ boolean isActive() { return credentialsProvider != null; } + String getStsRegion() { + return stsRegion; + } + @Override public AWSCredentials getCredentials() { Objects.requireNonNull(credentialsProvider, "credentialsProvider is not set"); diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/CustomWebIdentityTokenCredentialsProviderTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/CustomWebIdentityTokenCredentialsProviderTests.java index 04c47bb9b55e6..cecb0cd147897 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/CustomWebIdentityTokenCredentialsProviderTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/CustomWebIdentityTokenCredentialsProviderTests.java @@ -22,6 +22,7 @@ import org.junit.Assert; import org.mockito.Mockito; +import java.io.IOException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.URLDecoder; @@ -42,6 +43,15 @@ public class CustomWebIdentityTokenCredentialsProviderTests extends ESTestCase { private static final String ROLE_ARN = "arn:aws:iam::123456789012:role/FederatedWebIdentityRole"; private static final String ROLE_NAME = "aws-sdk-java-1651084775908"; + private static Environment getEnvironment() throws IOException { + Path configDirectory = createTempDir("web-identity-token-test"); + Files.createDirectory(configDirectory.resolve("repository-s3")); + Files.writeString(configDirectory.resolve("repository-s3/aws-web-identity-token-file"), "YXdzLXdlYi1pZGVudGl0eS10b2tlbi1maWxl"); + Environment environment = Mockito.mock(Environment.class); + Mockito.when(environment.configFile()).thenReturn(configDirectory); + return environment; + } + @SuppressForbidden(reason = "HTTP server is used for testing") public void testCreateWebIdentityTokenCredentialsProvider() throws Exception { HttpServer httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress().getHostAddress(), 0), 0); @@ -88,11 +98,7 @@ public void testCreateWebIdentityTokenCredentialsProvider() throws Exception { }); httpServer.start(); - Path configDirectory = Files.createTempDirectory("web-identity-token-test"); - Files.createDirectory(configDirectory.resolve("repository-s3")); - Files.writeString(configDirectory.resolve("repository-s3/aws-web-identity-token-file"), "YXdzLXdlYi1pZGVudGl0eS10b2tlbi1maWxl"); - Environment environment = Mockito.mock(Environment.class); - Mockito.when(environment.configFile()).thenReturn(configDirectory); + Environment environment = getEnvironment(); // No region is set, but the SDK shouldn't fail because of that Map environmentVariables = Map.of( @@ -125,4 +131,32 @@ public void testCreateWebIdentityTokenCredentialsProvider() throws Exception { httpServer.stop(0); } } + + public void testSupportRegionalizedEndpoints() throws Exception { + Map environmentVariables = Map.of( + "AWS_WEB_IDENTITY_TOKEN_FILE", + "/var/run/secrets/eks.amazonaws.com/serviceaccount/token", + "AWS_ROLE_ARN", + ROLE_ARN, + "AWS_STS_REGIONAL_ENDPOINTS", + "regional", + "AWS_REGION", + "us-west-2" + ); + Map systemProperties = Map.of(); + + var webIdentityTokenCredentialsProvider = new S3Service.CustomWebIdentityTokenCredentialsProvider( + getEnvironment(), + environmentVariables::get, + systemProperties::getOrDefault, + Clock.systemUTC() + ); + // We can't verify that webIdentityTokenCredentialsProvider's STS client uses the "https://sts.us-west-2.amazonaws.com" + // endpoint in a unit test. The client depends on hardcoded RegionalEndpointsOptionResolver that in turn depends + // on the system environment that we can't change in the test. So we just verify we that we called `withRegion` + // on stsClientBuilder which should internally correctly configure the endpoint when the STS client is built. + assertEquals("us-west-2", webIdentityTokenCredentialsProvider.getStsRegion()); + + webIdentityTokenCredentialsProvider.shutdown(); + } } diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/ESNetty4IntegTestCase.java b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/ESNetty4IntegTestCase.java index 09c6b3d50a380..c996f55198bf6 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/ESNetty4IntegTestCase.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/ESNetty4IntegTestCase.java @@ -19,11 +19,6 @@ public abstract class ESNetty4IntegTestCase extends ESIntegTestCase { - @Override - protected boolean ignoreExternalCluster() { - return true; - } - @Override protected boolean addMockTransportService() { return false; diff --git a/qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java index eb05d331af033..7af6ad49fb001 100644 --- a/qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade-legacy/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.upgrades; import org.apache.http.util.EntityUtils; -import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.Version; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.Request; @@ -54,7 +53,6 @@ /** * In depth testing of the recovery mechanism during a rolling restart. */ -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/99778") public class RecoveryIT extends AbstractRollingTestCase { private static String CLUSTER_NAME = System.getProperty("tests.clustername"); diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/HttpSmokeTestCase.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/HttpSmokeTestCase.java index 2533b213d469c..4536e2ee25fd6 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/HttpSmokeTestCase.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/HttpSmokeTestCase.java @@ -41,11 +41,6 @@ protected Collection> nodePlugins() { return List.of(getTestTransportPlugin(), MainRestPlugin.class); } - @Override - protected boolean ignoreExternalCluster() { - return true; - } - public static void assertOK(Response response) { assertThat(response.getStatusLine().getStatusCode(), oneOf(200, 201)); } diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/IndicesRecoveryRestCancellationIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/IndicesRecoveryRestCancellationIT.java deleted file mode 100644 index 55870bed5e851..0000000000000 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/IndicesRecoveryRestCancellationIT.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.http; - -import org.apache.http.client.methods.HttpGet; -import org.elasticsearch.action.admin.indices.recovery.RecoveryAction; -import org.elasticsearch.action.admin.indices.recovery.TransportRecoveryAction; -import org.elasticsearch.action.admin.indices.recovery.TransportRecoveryActionHelper; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.client.Cancellable; -import org.elasticsearch.client.Request; -import org.elasticsearch.client.Response; -import org.elasticsearch.core.Releasable; -import org.elasticsearch.core.Releasables; - -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.CancellationException; -import java.util.concurrent.Semaphore; - -import static org.elasticsearch.action.support.ActionTestUtils.wrapAsRestResponseListener; -import static org.elasticsearch.test.TaskAssertions.assertAllCancellableTasksAreCancelled; -import static org.elasticsearch.test.TaskAssertions.assertAllTasksHaveFinished; -import static org.elasticsearch.test.TaskAssertions.awaitTaskWithPrefix; -import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.not; - -public class IndicesRecoveryRestCancellationIT extends HttpSmokeTestCase { - - public void testIndicesRecoveryRestCancellation() throws Exception { - runTest(new Request(HttpGet.METHOD_NAME, "/_recovery")); - } - - public void testCatRecoveryRestCancellation() throws Exception { - runTest(new Request(HttpGet.METHOD_NAME, "/_cat/recovery")); - } - - private void runTest(Request request) throws Exception { - - createIndex("test"); - ensureGreen("test"); - - final List operationBlocks = new ArrayList<>(); - for (final TransportRecoveryAction transportRecoveryAction : internalCluster().getInstances(TransportRecoveryAction.class)) { - final Semaphore operationBlock = new Semaphore(1); - operationBlocks.add(operationBlock); - TransportRecoveryActionHelper.setOnShardOperation(transportRecoveryAction, () -> { - try { - operationBlock.acquire(); - } catch (InterruptedException e) { - throw new AssertionError(e); - } - operationBlock.release(); - }); - } - assertThat(operationBlocks, not(empty())); - - final List releasables = new ArrayList<>(); - try { - for (final Semaphore operationBlock : operationBlocks) { - operationBlock.acquire(); - releasables.add(operationBlock::release); - } - - final PlainActionFuture future = new PlainActionFuture<>(); - logger.info("--> sending request"); - final Cancellable cancellable = getRestClient().performRequestAsync(request, wrapAsRestResponseListener(future)); - - awaitTaskWithPrefix(RecoveryAction.NAME); - - logger.info("--> waiting for at least one task to hit a block"); - assertBusy(() -> assertTrue(operationBlocks.stream().anyMatch(Semaphore::hasQueuedThreads))); - - logger.info("--> cancelling request"); - cancellable.cancel(); - expectThrows(CancellationException.class, future::actionGet); - - assertAllCancellableTasksAreCancelled(RecoveryAction.NAME); - } finally { - Releasables.close(releasables); - } - - assertAllTasksHaveFinished(RecoveryAction.NAME); - } - -} diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RestActionCancellationIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RestActionCancellationIT.java new file mode 100644 index 0000000000000..d46868094907d --- /dev/null +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/RestActionCancellationIT.java @@ -0,0 +1,144 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.http; + +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPost; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction; +import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; +import org.elasticsearch.action.admin.indices.alias.get.GetAliasesAction; +import org.elasticsearch.action.admin.indices.recovery.RecoveryAction; +import org.elasticsearch.action.support.CancellableActionTestPlugin; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.RefCountingListener; +import org.elasticsearch.action.support.SubscribableListener; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.rest.ObjectPath; + +import java.util.Collection; +import java.util.concurrent.CancellationException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.action.support.ActionTestUtils.wrapAsRestResponseListener; +import static org.elasticsearch.test.TaskAssertions.assertAllTasksHaveFinished; +import static org.hamcrest.Matchers.greaterThan; + +public class RestActionCancellationIT extends HttpSmokeTestCase { + + public void testIndicesRecoveryRestCancellation() { + createIndex("test"); + ensureGreen("test"); + runRestActionCancellationTest(new Request(HttpGet.METHOD_NAME, "/_recovery"), RecoveryAction.NAME); + } + + public void testCatRecoveryRestCancellation() { + createIndex("test"); + ensureGreen("test"); + runRestActionCancellationTest(new Request(HttpGet.METHOD_NAME, "/_cat/recovery"), RecoveryAction.NAME); + } + + public void testClusterHealthRestCancellation() { + runRestActionCancellationTest(new Request(HttpGet.METHOD_NAME, "/_cluster/health"), ClusterHealthAction.NAME); + } + + public void testClusterStateRestCancellation() { + runRestActionCancellationTest(new Request(HttpGet.METHOD_NAME, "/_cluster/state"), ClusterStateAction.NAME); + } + + public void testGetAliasesCancellation() { + runRestActionCancellationTest(new Request(HttpGet.METHOD_NAME, "/_alias"), GetAliasesAction.NAME); + } + + public void testCatAliasesCancellation() { + runRestActionCancellationTest(new Request(HttpGet.METHOD_NAME, "/_cat/aliases"), GetAliasesAction.NAME); + } + + private void runRestActionCancellationTest(Request request, String actionName) { + final var node = usually() ? internalCluster().getRandomNodeName() : internalCluster().startCoordinatingOnlyNode(Settings.EMPTY); + + try ( + var restClient = createRestClient(node); + var capturingAction = CancellableActionTestPlugin.capturingActionOnNode(actionName, node) + ) { + final var responseFuture = new PlainActionFuture(); + final var restInvocation = restClient.performRequestAsync(request, wrapAsRestResponseListener(responseFuture)); + + if (randomBoolean()) { + // cancel by aborting the REST request + capturingAction.captureAndCancel(restInvocation::cancel); + expectThrows(ExecutionException.class, CancellationException.class, () -> responseFuture.get(10, TimeUnit.SECONDS)); + } else { + // cancel via the task management API + final var cancelFuture = new PlainActionFuture(); + capturingAction.captureAndCancel( + () -> SubscribableListener + + .newForked( + l -> restClient.performRequestAsync( + getListTasksRequest(node, actionName), + wrapAsRestResponseListener(l.map(ObjectPath::createFromResponse)) + ) + ) + + .andThen((l, listTasksResponse) -> { + final var taskCount = listTasksResponse.evaluateArraySize("tasks"); + assertThat(taskCount, greaterThan(0)); + try (var listeners = new RefCountingListener(l)) { + for (int i = 0; i < taskCount; i++) { + final var taskPrefix = "tasks." + i + "."; + assertTrue(listTasksResponse.evaluate(taskPrefix + "cancellable")); + assertFalse(listTasksResponse.evaluate(taskPrefix + "cancelled")); + restClient.performRequestAsync( + getCancelTaskRequest( + listTasksResponse.evaluate(taskPrefix + "node"), + listTasksResponse.evaluate(taskPrefix + "id") + ), + wrapAsRestResponseListener(listeners.acquire(HttpSmokeTestCase::assertOK)) + ); + } + } + }) + + .addListener(cancelFuture) + ); + cancelFuture.get(10, TimeUnit.SECONDS); + expectThrows(Exception.class, () -> responseFuture.get(10, TimeUnit.SECONDS)); + } + + assertAllTasksHaveFinished(actionName); + } catch (Exception e) { + fail(e); + } + } + + private static Request getListTasksRequest(String taskNode, String actionName) { + final var listTasksRequest = new Request(HttpGet.METHOD_NAME, "/_tasks"); + listTasksRequest.addParameter("nodes", taskNode); + listTasksRequest.addParameter("actions", actionName); + listTasksRequest.addParameter("group_by", "none"); + return listTasksRequest; + } + + private static Request getCancelTaskRequest(String taskNode, int taskId) { + final var cancelTaskRequest = new Request(HttpPost.METHOD_NAME, Strings.format("/_tasks/%s:%d/_cancel", taskNode, taskId)); + cancelTaskRequest.addParameter("wait_for_completion", null); + return cancelTaskRequest; + } + + @Override + protected Collection> nodePlugins() { + return CollectionUtils.appendToCopy(super.nodePlugins(), CancellableActionTestPlugin.class); + } +} diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index e484b98d3188e..787d684c3779e 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -228,8 +228,9 @@ tasks.named("yamlRestTestV7CompatTransform").configure { task -> } tasks.register('enforceYamlTestConvention').configure { + def tree = fileTree('src/main/resources/rest-api-spec/test') doLast { - if (fileTree('src/main/resources/rest-api-spec/test').files) { + if (tree.files) { throw new GradleException("There are YAML tests in src/main source set. These should be moved to src/yamlRestTest.") } } diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/10_basic.yml index 49f5958ad8da1..96998a2a6218e 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/cat.aliases/10_basic.yml @@ -496,3 +496,16 @@ test_alias \s+ test_index\n my_alias \s+ test_index\n $/ + +--- +"Deprecated local parameter": + - skip: + version: "- 8.11.99" + features: ["warnings"] + reason: verifying deprecation warnings from 8.12.0 onwards + + - do: + cat.aliases: + local: true + warnings: + - "the [?local=true] query parameter to cat-aliases requests has no effect and will be removed in a future version" diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.exists_alias/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.exists_alias/10_basic.yml index fba0512ca372f..bf499de8463bd 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.exists_alias/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.exists_alias/10_basic.yml @@ -37,9 +37,14 @@ --- "Test indices.exists_alias with local flag": + - skip: + features: ["allowed_warnings"] + - do: indices.exists_alias: name: test_alias local: true + allowed_warnings: + - "the [?local=true] query parameter to get-aliases requests has no effect and will be removed in a future version" - is_false: '' diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_alias/10_basic.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_alias/10_basic.yml index 721c7bc709032..d765decda68a8 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_alias/10_basic.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.get_alias/10_basic.yml @@ -291,10 +291,14 @@ setup: --- "Get alias with local flag": + - skip: + features: ["allowed_warnings"] - do: indices.get_alias: local: true + allowed_warnings: + - "the [?local=true] query parameter to get-aliases requests has no effect and will be removed in a future version" - is_true: test_index @@ -325,3 +329,17 @@ setup: - is_true: test_index - is_false: test_index_2 + + +--- +"Deprecated local parameter": + - skip: + version: "- 8.11.99" + features: ["warnings"] + reason: verifying deprecation warnings from 8.12.0 onwards + + - do: + indices.get_alias: + local: true + warnings: + - "the [?local=true] query parameter to get-aliases requests has no effect and will be removed in a future version" diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorServiceIT.java similarity index 97% rename from server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorServiceIT.java rename to server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorServiceIT.java index e85edc5805482..b862d0b2f20b6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorServiceIT.java @@ -6,13 +6,14 @@ * Side Public License, v 1. */ -package org.elasticsearch.cluster.routing.allocation; +package org.elasticsearch.cluster.routing.allocation.shards; import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.routing.RoutingNodes; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.health.HealthIndicatorResult; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestStatsNamesAndTypesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestStatsNamesAndTypesIT.java index 547da987dcb91..2a4174ba427af 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestStatsNamesAndTypesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestStatsNamesAndTypesIT.java @@ -45,11 +45,6 @@ protected Collection> nodePlugins() { return List.of(CustomIngestTestPlugin.class, CustomScriptPlugin.class); } - @Override - protected boolean ignoreExternalCluster() { - return true; - } - @SuppressWarnings("unchecked") public void testIngestStatsNamesAndTypes() throws IOException { String pipeline1 = org.elasticsearch.core.Strings.format(""" diff --git a/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorFullRestartIT.java b/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorFullRestartIT.java index f51ff1da9bfc9..d1c72a9650b85 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorFullRestartIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorFullRestartIT.java @@ -34,10 +34,6 @@ protected Collection> nodePlugins() { return Collections.singletonList(TestPersistentTasksPlugin.class); } - protected boolean ignoreExternalCluster() { - return true; - } - public void testFullClusterRestart() throws Exception { PersistentTasksService service = internalCluster().getInstance(PersistentTasksService.class); int numberOfTasks = randomIntBetween(1, 10); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java index c91f5138e919f..3cc90a6795e37 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java @@ -51,10 +51,6 @@ protected Collection> nodePlugins() { return Collections.singletonList(TestPersistentTasksPlugin.class); } - protected boolean ignoreExternalCluster() { - return true; - } - @Before public void resetNonClusterStateCondition() { TestPersistentTasksExecutor.setNonClusterStateCondition(true); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java b/server/src/internalClusterTest/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java index cb24b78a499ac..d9aa15ed6e2f5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java @@ -36,11 +36,6 @@ protected Collection> nodePlugins() { return singletonList(TestPersistentTasksPlugin.class); } - @Override - protected boolean ignoreExternalCluster() { - return true; - } - /** * Test that the {@link EnableAssignmentDecider#CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING} setting correctly * prevents persistent tasks to be assigned after a cluster restart. diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java index 474d4ebc12843..480556b942ac8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java @@ -193,11 +193,6 @@ protected boolean addMockHttpTransport() { return false; // enable http } - @Override - protected boolean ignoreExternalCluster() { - return true; - } - public void testFieldAlias() { FieldCapabilitiesResponse response = client().prepareFieldCaps().setFields("distance", "route_length_miles").get(); diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java index d8b45adbebb1f..98dd182900f88 100644 --- a/server/src/main/java/module-info.java +++ b/server/src/main/java/module-info.java @@ -419,4 +419,6 @@ org.elasticsearch.index.codec.bloomfilter.ES85BloomFilterPostingsFormat, org.elasticsearch.index.codec.bloomfilter.ES87BloomFilterPostingsFormat; provides org.apache.lucene.codecs.DocValuesFormat with ES87TSDBDocValuesFormat; + + exports org.elasticsearch.cluster.routing.allocation.shards to org.elasticsearch.shardhealth, org.elasticsearch.serverless.shardhealth; } diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 928297397f15c..56a00e25022d4 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -153,8 +153,8 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_8_10_2 = new Version(8_10_02_99); public static final Version V_8_10_3 = new Version(8_10_03_99); public static final Version V_8_10_4 = new Version(8_10_04_99); - public static final Version V_8_10_5 = new Version(8_10_05_99); public static final Version V_8_11_0 = new Version(8_11_00_99); + public static final Version V_8_11_1 = new Version(8_11_01_99); public static final Version V_8_12_0 = new Version(8_12_00_99); public static final Version CURRENT = V_8_12_0; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesAction.java index c05b19043e88b..a04c7c2c2af60 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesAction.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.admin.indices.alias.get; import org.elasticsearch.action.ActionType; +import org.elasticsearch.common.io.stream.Writeable; public class GetAliasesAction extends ActionType { @@ -16,6 +17,6 @@ public class GetAliasesAction extends ActionType { public static final String NAME = "indices:admin/aliases/get"; private GetAliasesAction() { - super(NAME, GetAliasesResponse::new); + super(NAME, Writeable.Reader.localOnly()); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java index d801b441fecea..ee6797ca58fb9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesRequest.java @@ -10,12 +10,17 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.AliasesRequest; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.action.support.master.MasterNodeReadRequest; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; import java.io.IOException; +import java.util.Map; public class GetAliasesRequest extends MasterNodeReadRequest implements AliasesRequest { @@ -33,6 +38,11 @@ public GetAliasesRequest(String... aliases) { public GetAliasesRequest() {} + /** + * NB prior to 8.12 get-aliases was a TransportMasterNodeReadAction so for BwC we must remain able to read these requests until we no + * longer need to support {@link org.elasticsearch.TransportVersions#CLUSTER_FEATURES_ADDED} and earlier. Once we remove this we can + * also make this class a regular ActionRequest instead of a MasterNodeReadRequest. + */ public GetAliasesRequest(StreamInput in) throws IOException { super(in); indices = in.readStringArray(); @@ -43,11 +53,7 @@ public GetAliasesRequest(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeStringArray(indices); - out.writeStringArray(aliases); - indicesOptions.writeIndicesOptions(out); - out.writeStringArray(originalAliases); + TransportAction.localOnly(); } @Override @@ -108,4 +114,9 @@ public ActionRequestValidationException validate() { public boolean includeDataStreams() { return true; } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new CancellableTask(id, type, action, "", parentTaskId, headers); + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java index 91c6f49101e85..c0e26b16585c4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.metadata.AliasMetadata; import org.elasticsearch.cluster.metadata.DataStreamAlias; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; @@ -29,12 +28,6 @@ public GetAliasesResponse(Map> aliases, Map i.readCollectionAsList(AliasMetadata::new)); - dataStreamAliases = in.readMap(in1 -> in1.readCollectionAsList(DataStreamAlias::new)); - } - public Map> getAliases() { return aliases; } @@ -43,6 +36,10 @@ public Map> getDataStreamAliases() { return dataStreamAliases; } + /** + * NB prior to 8.12 get-aliases was a TransportMasterNodeReadAction so for BwC we must remain able to write these responses until we no + * longer need to support {@link org.elasticsearch.TransportVersions#CLUSTER_FEATURES_ADDED} and earlier. + */ @Override public void writeTo(StreamOutput out) throws IOException { out.writeMap(aliases, StreamOutput::writeCollection); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java index 456b2cc7b899f..e43d1a825c233 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java @@ -9,7 +9,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; +import org.elasticsearch.action.support.TransportLocalClusterStateAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; @@ -26,6 +26,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.indices.SystemIndices.SystemIndexAccessLevel; +import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -38,32 +39,37 @@ import java.util.Map; import java.util.function.Predicate; -public class TransportGetAliasesAction extends TransportMasterNodeReadAction { +/** + * NB prior to 8.12 this was a TransportMasterNodeReadAction so for BwC it must be registered with the TransportService (i.e. a + * HandledTransportAction) until we no longer need to support {@link org.elasticsearch.TransportVersions#CLUSTER_FEATURES_ADDED} and + * earlier. + */ +public class TransportGetAliasesAction extends TransportLocalClusterStateAction { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(TransportGetAliasesAction.class); + private final IndexNameExpressionResolver indexNameExpressionResolver; private final SystemIndices systemIndices; + private final ThreadContext threadContext; @Inject public TransportGetAliasesAction( TransportService transportService, - ClusterService clusterService, - ThreadPool threadPool, ActionFilters actionFilters, + ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver, SystemIndices systemIndices ) { super( GetAliasesAction.NAME, - transportService, clusterService, - threadPool, + transportService, actionFilters, GetAliasesRequest::new, - indexNameExpressionResolver, - GetAliasesResponse::new, - threadPool.executor(ThreadPool.Names.MANAGEMENT) + clusterService.threadPool().executor(ThreadPool.Names.MANAGEMENT) ); + this.indexNameExpressionResolver = indexNameExpressionResolver; this.systemIndices = systemIndices; + this.threadContext = clusterService.threadPool().getThreadContext(); } @Override @@ -77,15 +83,22 @@ protected ClusterBlockException checkBlock(GetAliasesRequest request, ClusterSta } @Override - protected void masterOperation(Task task, GetAliasesRequest request, ClusterState state, ActionListener listener) { + protected void localClusterStateOperation( + Task task, + GetAliasesRequest request, + ClusterState state, + ActionListener listener + ) { assert Transports.assertNotTransportThread("no need to avoid the context switch and may be expensive if there are many aliases"); + final var cancellableTask = (CancellableTask) task; // resolve all concrete indices upfront and warn/error later final String[] concreteIndices = indexNameExpressionResolver.concreteIndexNamesWithSystemIndexAccess(state, request); final SystemIndexAccessLevel systemIndexAccessLevel = indexNameExpressionResolver.getSystemIndexAccessLevel(); Map> aliases = state.metadata().findAliases(request.aliases(), concreteIndices); + cancellableTask.ensureNotCancelled(); listener.onResponse( new GetAliasesResponse( - postProcess(request, concreteIndices, aliases, state, systemIndexAccessLevel, threadPool.getThreadContext(), systemIndices), + postProcess(request, concreteIndices, aliases, state, systemIndexAccessLevel, threadContext, systemIndices), postProcess(indexNameExpressionResolver, request, state) ) ); @@ -122,7 +135,7 @@ static Map> postProcess( } final Map> finalResponse = Collections.unmodifiableMap(mapBuilder); if (systemIndexAccessLevel != SystemIndexAccessLevel.ALL) { - checkSystemIndexAccess(request, systemIndices, state, finalResponse, systemIndexAccessLevel, threadContext); + checkSystemIndexAccess(systemIndices, state, finalResponse, systemIndexAccessLevel, threadContext); } return finalResponse; } @@ -151,7 +164,6 @@ static Map> postProcess( } private static void checkSystemIndexAccess( - GetAliasesRequest request, SystemIndices systemIndices, ClusterState state, Map> aliasesMap, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java index 0bd51eba85ff9..c74981d475389 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryAction.java @@ -20,7 +20,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.core.Nullable; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; @@ -102,7 +101,6 @@ protected RecoveryRequest readRequestFrom(StreamInput in) throws IOException { protected void shardOperation(RecoveryRequest request, ShardRouting shardRouting, Task task, ActionListener listener) { ActionListener.completeWith(listener, () -> { assert task instanceof CancellableTask; - runOnShardOperation(); IndexService indexService = indicesService.indexServiceSafe(shardRouting.shardId().getIndex()); IndexShard indexShard = indexService.getShard(shardRouting.shardId().id()); return indexShard.recoveryState(); @@ -123,19 +121,4 @@ protected ClusterBlockException checkGlobalBlock(ClusterState state, RecoveryReq protected ClusterBlockException checkRequestBlock(ClusterState state, RecoveryRequest request, String[] concreteIndices) { return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, concreteIndices); } - - @Nullable // unless running tests that inject extra behaviour - private volatile Runnable onShardOperation; - - private void runOnShardOperation() { - final Runnable onShardOperation = this.onShardOperation; - if (onShardOperation != null) { - onShardOperation.run(); - } - } - - // exposed for tests: inject some extra behaviour that runs when shardOperation() is called - void setOnShardOperation(@Nullable Runnable onShardOperation) { - this.onShardOperation = onShardOperation; - } } diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index 2f3266f9e0099..b56cb0ca5926c 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -228,27 +228,7 @@ public final void run() { skipShard(iterator); } if (shardsIts.size() > 0) { - assert request.allowPartialSearchResults() != null : "SearchRequest missing setting for allowPartialSearchResults"; - if (request.allowPartialSearchResults() == false) { - final StringBuilder missingShards = new StringBuilder(); - // Fail-fast verification of all shards being available - for (int index = 0; index < shardsIts.size(); index++) { - final SearchShardIterator shardRoutings = shardsIts.get(index); - if (shardRoutings.size() == 0) { - if (missingShards.length() > 0) { - missingShards.append(", "); - } - missingShards.append(shardRoutings.shardId()); - } - } - if (missingShards.length() > 0) { - // Status red - shard is missing all copies and would produce partial results for an index search - final String msg = "Search rejected due to missing shards [" - + missingShards - + "]. Consider using `allow_partial_search_results` setting to bypass this error."; - throw new SearchPhaseExecutionException(getName(), msg, null, ShardSearchFailure.EMPTY_ARRAY); - } - } + doCheckNoMissingShards(getName(), request, shardsIts); Version version = request.minCompatibleShardNode(); if (version != null && Version.CURRENT.minimumCompatibilityVersion().equals(version) == false) { if (checkMinimumVersion(shardsIts) == false) { @@ -434,7 +414,6 @@ public final void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPha logger.debug(() -> format("%s shards failed for phase: [%s]", numShardFailures, currentPhase.getName()), cause); } onPhaseFailure(currentPhase, "Partial shards failure", null); - return; } else { int discrepancy = getNumShards() - successfulOps.get(); assert discrepancy > 0 : "discrepancy: " + discrepancy; @@ -449,8 +428,8 @@ public final void executeNextPhase(SearchPhase currentPhase, SearchPhase nextPha ); } onPhaseFailure(currentPhase, "Partial shards failure (" + discrepancy + " shards unavailable)", null); - return; } + return; } if (logger.isTraceEnabled()) { final String resultsFrom = results.getSuccessfulResults() @@ -840,7 +819,7 @@ void executeNext(Runnable runnable, Thread originalThread) { private static final class PendingExecutions { private final int permits; private int permitsTaken = 0; - private ArrayDeque queue = new ArrayDeque<>(); + private final ArrayDeque queue = new ArrayDeque<>(); PendingExecutions(int permits) { assert permits > 0 : "not enough permits: " + permits; diff --git a/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java index cef6bf92cc5e6..6e553f254ee8b 100644 --- a/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/CanMatchPreFilterSearchPhase.java @@ -31,7 +31,6 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.Transport; -import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; @@ -127,7 +126,7 @@ private static boolean assertSearchCoordinationThread() { } @Override - public void run() throws IOException { + public void run() { assert assertSearchCoordinationThread(); checkNoMissingShards(); Version version = request.minCompatibleShardNode(); @@ -159,9 +158,7 @@ private void runCoordinatorRewritePhase() { ); final ShardSearchRequest request = canMatchNodeRequest.createShardSearchRequest(buildShardLevelRequest(searchShardIterator)); if (searchShardIterator.prefiltered()) { - CanMatchShardResponse result = new CanMatchShardResponse(searchShardIterator.skip() == false, null); - result.setShardIndex(request.shardRequestIndex()); - results.consumeResult(result, () -> {}); + consumeResult(searchShardIterator.skip() == false, request); continue; } boolean canMatch = true; @@ -178,9 +175,7 @@ private void runCoordinatorRewritePhase() { if (canMatch) { matchedShardLevelRequests.add(searchShardIterator); } else { - CanMatchShardResponse result = new CanMatchShardResponse(canMatch, null); - result.setShardIndex(request.shardRequestIndex()); - results.consumeResult(result, () -> {}); + consumeResult(false, request); } } if (matchedShardLevelRequests.isEmpty()) { @@ -190,29 +185,15 @@ private void runCoordinatorRewritePhase() { } } + private void consumeResult(boolean canMatch, ShardSearchRequest request) { + CanMatchShardResponse result = new CanMatchShardResponse(canMatch, null); + result.setShardIndex(request.shardRequestIndex()); + results.consumeResult(result, () -> {}); + } + private void checkNoMissingShards() { assert assertSearchCoordinationThread(); - assert request.allowPartialSearchResults() != null : "SearchRequest missing setting for allowPartialSearchResults"; - if (request.allowPartialSearchResults() == false) { - final StringBuilder missingShards = new StringBuilder(); - // Fail-fast verification of all shards being available - for (int index = 0; index < shardsIts.size(); index++) { - final SearchShardIterator shardRoutings = shardsIts.get(index); - if (shardRoutings.size() == 0) { - if (missingShards.length() > 0) { - missingShards.append(", "); - } - missingShards.append(shardRoutings.shardId()); - } - } - if (missingShards.length() > 0) { - // Status red - shard is missing all copies and would produce partial results for an index search - final String msg = "Search rejected due to missing shards [" - + missingShards - + "]. Consider using `allow_partial_search_results` setting to bypass this error."; - throw new SearchPhaseExecutionException(getName(), msg, null, ShardSearchFailure.EMPTY_ARRAY); - } - } + doCheckNoMissingShards(getName(), request, shardsIts); } private Map> groupByNode(GroupShardsIterator shards) { @@ -425,7 +406,7 @@ public void onFailure(Exception e) { } @Override - protected void doRun() throws IOException { + protected void doRun() { CanMatchPreFilterSearchPhase.this.run(); } }); diff --git a/server/src/main/java/org/elasticsearch/action/search/ClearScrollResponse.java b/server/src/main/java/org/elasticsearch/action/search/ClearScrollResponse.java index 0a7b53ea8b9c4..8b1116951df82 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ClearScrollResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/ClearScrollResponse.java @@ -85,7 +85,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * Parse the clear scroll response body into a new {@link ClearScrollResponse} object */ - public static ClosePointInTimeResponse fromXContent(XContentParser parser) throws IOException { + public static ClosePointInTimeResponse fromXContent(XContentParser parser) { return PARSER.apply(parser, null); } diff --git a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java index dca269f06a3d3..e010e840d3f2d 100644 --- a/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/DfsQueryPhase.java @@ -24,7 +24,6 @@ import org.elasticsearch.search.vectors.KnnScoreDocQueryBuilder; import org.elasticsearch.transport.Transport; -import java.io.IOException; import java.util.ArrayList; import java.util.Comparator; import java.util.List; @@ -71,7 +70,7 @@ final class DfsQueryPhase extends SearchPhase { } @Override - public void run() throws IOException { + public void run() { // TODO we can potentially also consume the actual per shard results from the initial phase here in the aggregateDfs // to free up memory early final CountedCollector counter = new CountedCollector<>( diff --git a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java index e7d6eca23498f..cadcd6ca57334 100644 --- a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java @@ -51,7 +51,6 @@ */ public class MultiSearchRequest extends ActionRequest implements CompositeIndicesRequest { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(RestSearchAction.class); - public static final String TYPES_DEPRECATION_MESSAGE = "[types removal]" + " Specifying types in search requests is deprecated."; public static final String FIRST_LINE_EMPTY_DEPRECATION_MESSAGE = "support for empty first line before any action metadata in msearch API is deprecated " + "and will be removed in the next major version"; diff --git a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequestBuilder.java index 6f1e8d429edab..57c536f3d371e 100644 --- a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequestBuilder.java @@ -63,11 +63,4 @@ public MultiSearchRequestBuilder setIndicesOptions(IndicesOptions indicesOptions return this; } - /** - * Sets how many search requests specified in this multi search requests are allowed to be ran concurrently. - */ - public MultiSearchRequestBuilder setMaxConcurrentSearchRequests(int maxConcurrentSearchRequests) { - request().maxConcurrentSearchRequests(maxConcurrentSearchRequests); - return this; - } } diff --git a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java index c6463bcb00f67..92a2a1503aefc 100644 --- a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeResponse.java @@ -11,27 +11,16 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xcontent.ConstructingObjectParser; -import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Objects; -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; - public final class OpenPointInTimeResponse extends ActionResponse implements ToXContentObject { private static final ParseField ID = new ParseField("id"); - private static final ConstructingObjectParser PARSER; - - static { - PARSER = new ConstructingObjectParser<>("open_point_in_time", true, a -> new OpenPointInTimeResponse((String) a[0])); - PARSER.declareField(constructorArg(), (parser, context) -> parser.text(), ID, ObjectParser.ValueType.STRING); - } private final String pointInTimeId; public OpenPointInTimeResponse(String pointInTimeId) { @@ -60,7 +49,4 @@ public String getPointInTimeId() { return pointInTimeId; } - public static OpenPointInTimeResponse fromXContent(XContentParser parser) throws IOException { - return PARSER.parse(parser, null); - } } diff --git a/server/src/main/java/org/elasticsearch/action/search/ParsedScrollId.java b/server/src/main/java/org/elasticsearch/action/search/ParsedScrollId.java index ca68b1865495d..a9f3502bfa631 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ParsedScrollId.java +++ b/server/src/main/java/org/elasticsearch/action/search/ParsedScrollId.java @@ -16,22 +16,15 @@ public class ParsedScrollId { public static final String QUERY_AND_FETCH_TYPE = "queryAndFetch"; - private final String source; - private final String type; private final SearchContextIdForNode[] context; - ParsedScrollId(String source, String type, SearchContextIdForNode[] context) { - this.source = source; + ParsedScrollId(String type, SearchContextIdForNode[] context) { this.type = type; this.context = context; } - public String getSource() { - return source; - } - public String getType() { return type; } diff --git a/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java b/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java index f78d5f4005755..ee956b5179902 100644 --- a/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java +++ b/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java @@ -520,7 +520,7 @@ private record MergeResult( private static class MergeTask { private final List emptyResults; private QuerySearchResult[] buffer; - private long aggsBufferSize; + private final long aggsBufferSize; private Runnable next; private MergeTask(QuerySearchResult[] buffer, long aggsBufferSize, List emptyResults, Runnable next) { diff --git a/server/src/main/java/org/elasticsearch/action/search/RestOpenPointInTimeAction.java b/server/src/main/java/org/elasticsearch/action/search/RestOpenPointInTimeAction.java index 5de59cc6ce878..815deac07dfcd 100644 --- a/server/src/main/java/org/elasticsearch/action/search/RestOpenPointInTimeAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/RestOpenPointInTimeAction.java @@ -18,7 +18,6 @@ import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestToXContentListener; -import java.io.IOException; import java.util.List; import static org.elasticsearch.rest.RestRequest.Method.POST; @@ -37,7 +36,7 @@ public List routes() { } @Override - public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) { final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); final OpenPointInTimeRequest openRequest = new OpenPointInTimeRequest(indices); openRequest.indicesOptions(IndicesOptions.fromRequest(request, OpenPointInTimeRequest.DEFAULT_INDICES_OPTIONS)); diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java b/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java index 2b7105cffe2bb..f10650a6401d6 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java @@ -41,7 +41,7 @@ public final class SearchContextId { private final Map shards; private final Map aliasFilter; - private transient Set contextIds; + private final transient Set contextIds; SearchContextId(Map shards, Map aliasFilter) { this.shards = shards; diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java index 88da2fdfa3a9e..9d3eadcc42bf9 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhase.java @@ -7,6 +7,7 @@ */ package org.elasticsearch.action.search; +import org.elasticsearch.cluster.routing.GroupShardsIterator; import org.elasticsearch.core.CheckedRunnable; import java.io.IOException; @@ -37,4 +38,28 @@ public void start() { throw new UncheckedIOException(e); } } + + static void doCheckNoMissingShards(String phaseName, SearchRequest request, GroupShardsIterator shardsIts) { + assert request.allowPartialSearchResults() != null : "SearchRequest missing setting for allowPartialSearchResults"; + if (request.allowPartialSearchResults() == false) { + final StringBuilder missingShards = new StringBuilder(); + // Fail-fast verification of all shards being available + for (int index = 0; index < shardsIts.size(); index++) { + final SearchShardIterator shardRoutings = shardsIts.get(index); + if (shardRoutings.size() == 0) { + if (missingShards.isEmpty() == false) { + missingShards.append(", "); + } + missingShards.append(shardRoutings.shardId()); + } + } + if (missingShards.isEmpty() == false) { + // Status red - shard is missing all copies and would produce partial results for an index search + final String msg = "Search rejected due to missing shards [" + + missingShards + + "]. Consider using `allow_partial_search_results` setting to bypass this error."; + throw new SearchPhaseExecutionException(phaseName, msg, null, ShardSearchFailure.EMPTY_ARRAY); + } + } + } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index fb554232503f2..5af5c4c2ec602 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -667,7 +667,7 @@ private static void validateMergeSortValueFormats(Collection statsGroups) { - sourceBuilder().stats(statsGroups); - return this; - } - /** * Indicates whether the response should contain the stored _source for every hit */ diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java index b6a9179b1e956..56b58cd8ced6c 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java @@ -144,10 +144,6 @@ public RestStatus status() { return RestStatus.status(successfulShards, totalShards, shardFailures); } - public SearchResponseSections getInternalResponse() { - return internalResponse; - } - /** * The search hits. */ @@ -387,7 +383,7 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE } } else if (token == Token.START_ARRAY) { if (RestActions.FAILURES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - while ((token = parser.nextToken()) != Token.END_ARRAY) { + while (parser.nextToken() != Token.END_ARRAY) { failures.add(ShardSearchFailure.fromXContent(parser)); } } else { @@ -479,7 +475,7 @@ public static final class Clusters implements ToXContentFragment, Writeable { private final Map clusterInfo; // not Writeable since it is only needed on the (primary) CCS coordinator - private transient Boolean ccsMinimizeRoundtrips; + private final transient Boolean ccsMinimizeRoundtrips; /** * For use with cross-cluster searches. @@ -985,7 +981,7 @@ public static class Builder { private List failures; private TimeValue took; private Boolean timedOut; - private Cluster original; + private final Cluster original; public Builder(Cluster copyFrom) { this.original = copyFrom; @@ -1167,7 +1163,7 @@ public static Cluster fromXContent(String clusterAlias, XContentParser parser) t } } else if (token == Token.START_ARRAY) { if (RestActions.FAILURES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - while ((token = parser.nextToken()) != Token.END_ARRAY) { + while (parser.nextToken() != Token.END_ARRAY) { failures.add(ShardSearchFailure.fromXContent(parser)); } } else { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java index 35aae0764e251..df16c107a2619 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java @@ -23,7 +23,6 @@ import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.transport.Transport; -import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.HashSet; @@ -230,7 +229,7 @@ protected SearchPhase sendResponsePhase( ) { return new SearchPhase("fetch") { @Override - public void run() throws IOException { + public void run() { sendResponse(queryPhase, fetchResults); } }; diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index d02958567a873..800ad7afbb8db 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -51,6 +51,7 @@ import org.elasticsearch.transport.TransportActionProxy; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportResponseHandler; @@ -366,7 +367,7 @@ public Map getPendingSearchRequests() { } static class ScrollFreeContextRequest extends TransportRequest { - private ShardSearchContextId contextId; + private final ShardSearchContextId contextId; ScrollFreeContextRequest(ShardSearchContextId contextId) { this.contextId = Objects.requireNonNull(contextId); @@ -390,7 +391,7 @@ public ShardSearchContextId id() { } static class SearchFreeContextRequest extends ScrollFreeContextRequest implements IndicesRequest { - private OriginalIndices originalIndices; + private final OriginalIndices originalIndices; SearchFreeContextRequest(OriginalIndices originalIndices, ShardSearchContextId id) { super(id); @@ -428,7 +429,7 @@ public IndicesOptions indicesOptions() { public static class SearchFreeContextResponse extends TransportResponse { - private boolean freed; + private final boolean freed; SearchFreeContextResponse(StreamInput in) throws IOException { freed = in.readBoolean(); @@ -541,13 +542,16 @@ public static void registerRequestHandler(TransportService transportService, Sea ); TransportActionProxy.registerProxyAction(transportService, QUERY_FETCH_SCROLL_ACTION_NAME, true, ScrollQueryFetchSearchResult::new); + TransportRequestHandler shardFetchHandler = (request, channel, task) -> searchService.executeFetchPhase( + request, + (SearchShardTask) task, + new ChannelActionListener<>(channel) + ); transportService.registerRequestHandler( FETCH_ID_SCROLL_ACTION_NAME, EsExecutors.DIRECT_EXECUTOR_SERVICE, ShardFetchRequest::new, - (request, channel, task) -> { - searchService.executeFetchPhase(request, (SearchShardTask) task, new ChannelActionListener<>(channel)); - } + shardFetchHandler ); TransportActionProxy.registerProxyAction(transportService, FETCH_ID_SCROLL_ACTION_NAME, true, FetchSearchResult::new); @@ -557,9 +561,7 @@ public static void registerRequestHandler(TransportService transportService, Sea true, true, ShardFetchSearchRequest::new, - (request, channel, task) -> { - searchService.executeFetchPhase(request, (SearchShardTask) task, new ChannelActionListener<>(channel)); - } + shardFetchHandler ); TransportActionProxy.registerProxyAction(transportService, FETCH_ID_ACTION_NAME, true, FetchSearchResult::new); diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchType.java b/server/src/main/java/org/elasticsearch/action/search/SearchType.java index 519f1ce98a7b6..8e6511db62136 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchType.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchType.java @@ -39,7 +39,7 @@ public enum SearchType { */ public static final SearchType[] CURRENTLY_SUPPORTED = { QUERY_THEN_FETCH, DFS_QUERY_THEN_FETCH }; - private byte id; + private final byte id; SearchType(byte id) { this.id = id; diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java index a4a35789db258..a2324010876bf 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java @@ -16,7 +16,6 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.EsExecutors; @@ -47,13 +46,7 @@ public TransportMultiSearchAction( ActionFilters actionFilters, NodeClient client ) { - super( - MultiSearchAction.NAME, - transportService, - actionFilters, - (Writeable.Reader) MultiSearchRequest::new, - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + super(MultiSearchAction.NAME, transportService, actionFilters, MultiSearchRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.threadPool = threadPool; this.clusterService = clusterService; this.allocatedProcessors = EsExecutors.allocatedProcessors(settings); @@ -70,13 +63,7 @@ public TransportMultiSearchAction( LongSupplier relativeTimeProvider, NodeClient client ) { - super( - MultiSearchAction.NAME, - transportService, - actionFilters, - (Writeable.Reader) MultiSearchRequest::new, - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + super(MultiSearchAction.NAME, transportService, actionFilters, MultiSearchRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.threadPool = threadPool; this.clusterService = clusterService; this.allocatedProcessors = allocatedProcessors; diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java index aeb71a3b03d8f..ae3c735e079e9 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java @@ -268,7 +268,7 @@ public void writeTo(StreamOutput out) throws IOException { private class ShardOpenReaderRequestHandler implements TransportRequestHandler { @Override - public void messageReceived(ShardOpenReaderRequest request, TransportChannel channel, Task task) throws Exception { + public void messageReceived(ShardOpenReaderRequest request, TransportChannel channel, Task task) { searchService.openReaderContext( request.getShardId(), request.keepAlive, diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index a2739e2c2a85e..5030bd875a0f6 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -39,7 +39,6 @@ import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Setting; @@ -159,13 +158,7 @@ public TransportSearchAction( NamedWriteableRegistry namedWriteableRegistry, ExecutorSelector executorSelector ) { - super( - SearchAction.NAME, - transportService, - actionFilters, - (Writeable.Reader) SearchRequest::new, - EsExecutors.DIRECT_EXECUTOR_SERVICE - ); + super(SearchAction.NAME, transportService, actionFilters, SearchRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.threadPool = threadPool; this.circuitBreaker = circuitBreakerService.getBreaker(CircuitBreaker.REQUEST); this.searchPhaseController = searchPhaseController; @@ -514,7 +507,7 @@ static void ccsRemoteReduce( clusterAlias, remoteClientResponseExecutor ); - remoteClusterClient.search(ccsSearchRequest, new ActionListener() { + remoteClusterClient.search(ccsSearchRequest, new ActionListener<>() { @Override public void onResponse(SearchResponse searchResponse) { // TODO: in CCS fail fast ticket we may need to fail the query if the cluster is marked as FAILED @@ -749,14 +742,7 @@ private static ActionListener createCCSListener( SearchResponse.Clusters clusters, ActionListener originalListener ) { - return new CCSActionListener( - clusterAlias, - skipUnavailable, - countDown, - exceptions, - clusters, - originalListener - ) { + return new CCSActionListener<>(clusterAlias, skipUnavailable, countDown, exceptions, clusters, originalListener) { @Override void innerOnResponse(SearchResponse searchResponse) { // TODO: in CCS fail fast ticket we may need to fail the query if the cluster gets marked as FAILED @@ -1417,7 +1403,6 @@ abstract static class CCSActionListener implements Acti private final AtomicReference exceptions; protected final SearchResponse.Clusters clusters; private final ActionListener originalListener; - protected final long startTime; /** * Used by both minimize_roundtrips true and false @@ -1436,7 +1421,6 @@ abstract static class CCSActionListener implements Acti this.exceptions = exceptions; this.clusters = clusters; this.originalListener = originalListener; - this.startTime = System.currentTimeMillis(); } @Override @@ -1454,12 +1438,12 @@ public final void onFailure(Exception e) { SearchResponse.Cluster cluster = clusters.getCluster(clusterAlias); if (skipUnavailable) { if (cluster != null) { - ccsClusterInfoUpdate(f, clusters, clusterAlias, skipUnavailable); + ccsClusterInfoUpdate(f, clusters, clusterAlias, true); } // skippedClusters.incrementAndGet(); } else { if (cluster != null) { - ccsClusterInfoUpdate(f, clusters, clusterAlias, skipUnavailable); + ccsClusterInfoUpdate(f, clusters, clusterAlias, false); } Exception exception = e; if (RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY.equals(clusterAlias) == false) { diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java index 632fbafa0536b..ffaecedb62bba 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java @@ -93,7 +93,7 @@ static ParsedScrollId parseScrollId(String scrollId) { if (in.available() > 0) { throw new IllegalArgumentException("Not all bytes were read"); } - return new ParsedScrollId(scrollId, type, context); + return new ParsedScrollId(type, context); } catch (Exception e) { throw new IllegalArgumentException("Cannot parse scroll id", e); } diff --git a/server/src/main/java/org/elasticsearch/action/support/TransportLocalClusterStateAction.java b/server/src/main/java/org/elasticsearch/action/support/TransportLocalClusterStateAction.java new file mode 100644 index 0000000000000..6af5a3a1e8384 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/support/TransportLocalClusterStateAction.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.support; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.transport.TransportService; + +import java.util.concurrent.Executor; + +/** + * Analogue of {@link org.elasticsearch.action.support.master.TransportMasterNodeReadAction} except that it runs on the local node rather + * than delegating to the master. + */ +public abstract class TransportLocalClusterStateAction extends + HandledTransportAction { + + protected final ClusterService clusterService; + protected final Executor executor; + + protected TransportLocalClusterStateAction( + String actionName, + ClusterService clusterService, + TransportService transportService, + ActionFilters actionFilters, + Writeable.Reader requestReader, + Executor executor + ) { + // TODO replace DIRECT_EXECUTOR_SERVICE when removing workaround for https://github.com/elastic/elasticsearch/issues/97916 + super(actionName, transportService, actionFilters, requestReader, EsExecutors.DIRECT_EXECUTOR_SERVICE); + this.clusterService = clusterService; + this.executor = executor; + } + + protected abstract ClusterBlockException checkBlock(Request request, ClusterState state); + + @Override + protected final void doExecute(Task task, Request request, ActionListener listener) { + final var state = clusterService.state(); + final var clusterBlockException = checkBlock(request, state); + if (clusterBlockException != null) { + throw clusterBlockException; + } + + // Workaround for https://github.com/elastic/elasticsearch/issues/97916 - TODO remove this when we can + executor.execute(ActionRunnable.wrap(listener, l -> localClusterStateOperation(task, request, state, l))); + } + + protected abstract void localClusterStateOperation(Task task, Request request, ClusterState state, ActionListener listener) + throws Exception; +} diff --git a/server/src/main/java/org/elasticsearch/client/internal/Client.java b/server/src/main/java/org/elasticsearch/client/internal/Client.java index 89cb764549767..5ae3870338c35 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/Client.java +++ b/server/src/main/java/org/elasticsearch/client/internal/Client.java @@ -55,7 +55,6 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Nullable; -import org.elasticsearch.core.Releasable; import java.util.Map; import java.util.concurrent.Executor; @@ -71,7 +70,7 @@ * * @see org.elasticsearch.node.Node#client() */ -public interface Client extends ElasticsearchClient, Releasable { +public interface Client extends ElasticsearchClient { // Note: This setting is registered only for bwc. The value is never read. Setting CLIENT_TYPE_SETTING_S = new Setting<>("client.type", "node", (s) -> { diff --git a/server/src/main/java/org/elasticsearch/client/internal/FilterClient.java b/server/src/main/java/org/elasticsearch/client/internal/FilterClient.java index 235fc0a150066..53a8e2e189244 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/FilterClient.java +++ b/server/src/main/java/org/elasticsearch/client/internal/FilterClient.java @@ -45,11 +45,6 @@ protected FilterClient(Settings settings, ThreadPool threadPool, Client in) { this.in = in; } - @Override - public void close() { - in().close(); - } - @Override protected void doExecute( ActionType action, diff --git a/server/src/main/java/org/elasticsearch/client/internal/node/NodeClient.java b/server/src/main/java/org/elasticsearch/client/internal/node/NodeClient.java index b1dfc22cf27d3..0228dc7cc61ea 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/node/NodeClient.java +++ b/server/src/main/java/org/elasticsearch/client/internal/node/NodeClient.java @@ -75,11 +75,6 @@ public List getActionNames() { return actions.keySet().stream().map(ActionType::name).toList(); } - @Override - public void close() { - // nothing really to do - } - @Override public void doExecute( ActionType action, diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java index 625591ba8b90b..048ade3ef86c5 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java @@ -261,6 +261,12 @@ private void allocateUnassigned() { // desired node no longer exists continue; } + if (routingNode.getByShardId(shard.shardId()) != null) { + // node already contains same shard. + // Skipping it allows us to exclude NO decisions from SameShardAllocationDecider and only log more relevant + // NO or THROTTLE decisions of the preventing shard from starting on assigned node + continue; + } final var decision = allocation.deciders().canAllocate(shard, routingNode, allocation); switch (decision.type()) { case YES -> { @@ -287,10 +293,10 @@ private void allocateUnassigned() { case THROTTLE -> { nodeIdsIterator.wasThrottled = true; unallocatedStatus = AllocationStatus.DECIDERS_THROTTLED; - logger.trace("Couldn't assign shard [{}] to [{}]: {}", shard.shardId(), nodeId, decision); + logger.debug("Couldn't assign shard [{}] to [{}]: {}", shard.shardId(), nodeId, decision); } case NO -> { - logger.trace("Couldn't assign shard [{}] to [{}]: {}", shard.shardId(), nodeId, decision); + logger.debug("Couldn't assign shard [{}] to [{}]: {}", shard.shardId(), nodeId, decision); } } } @@ -505,11 +511,14 @@ private void balance() { } } - maybeLogUndesiredAllocationsWarning(allAllocations, undesiredAllocations); + maybeLogUndesiredAllocationsWarning(allAllocations, undesiredAllocations, routingNodes.size()); } - private void maybeLogUndesiredAllocationsWarning(long allAllocations, long undesiredAllocations) { - if (allAllocations > 0 && undesiredAllocations > undesiredAllocationsLogThreshold * allAllocations) { + private void maybeLogUndesiredAllocationsWarning(long allAllocations, long undesiredAllocations, int nodeCount) { + // more shards than cluster can relocate with one reroute + final boolean nonEmptyRelocationBacklog = undesiredAllocations > 2L * nodeCount; + final boolean warningThresholdReached = undesiredAllocations > undesiredAllocationsLogThreshold * allAllocations; + if (allAllocations > 0 && nonEmptyRelocationBacklog && warningThresholdReached) { undesiredAllocationLogInterval.maybeExecute( () -> logger.warn( "[{}] of assigned shards ({}/{}) are not on their desired nodes, which exceeds the warn threshold of [{}]", diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorService.java similarity index 97% rename from server/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorService.java rename to server/src/main/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorService.java index d17f3a297e805..76ca9f88b4b58 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorService.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.cluster.routing.allocation; +package org.elasticsearch.cluster.routing.allocation.shards; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -25,6 +25,13 @@ import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.routing.allocation.AllocateUnassignedDecision; +import org.elasticsearch.cluster.routing.allocation.AllocationDecision; +import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.cluster.routing.allocation.DataTier; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationResult; +import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; @@ -127,7 +134,7 @@ public String name() { * @param metadata Metadata for the cluster * @return A new ShardAllocationStatus that has not yet been filled. */ - ShardAllocationStatus createNewStatus(Metadata metadata) { + public ShardAllocationStatus createNewStatus(Metadata metadata) { return new ShardAllocationStatus(metadata); } @@ -415,18 +422,18 @@ static void updateShardAllocationStatus( ) ); - class ShardAllocationCounts { + public class ShardAllocationCounts { int unassigned = 0; int unassigned_new = 0; int unassigned_restarting = 0; int initializing = 0; int started = 0; int relocating = 0; - final Set indicesWithUnavailableShards = new HashSet<>(); - final Set indicesWithAllShardsUnavailable = new HashSet<>(); + public final Set indicesWithUnavailableShards = new HashSet<>(); + public final Set indicesWithAllShardsUnavailable = new HashSet<>(); // We keep the searchable snapshots separately as long as the original index is still available // This is checked during the post-processing - SearchableSnapshotsState searchableSnapshotsState = new SearchableSnapshotsState(); + public SearchableSnapshotsState searchableSnapshotsState = new SearchableSnapshotsState(); final Map> diagnosisDefinitions = new HashMap<>(); public void increment(ShardRouting routing, ClusterState state, NodesShutdownMetadata shutdowns, boolean verbose) { @@ -675,7 +682,7 @@ List checkIsAllocationDisabled(IndexMetadata indexMetadata * @param clusterState the current cluster state. * @return A list of diagnoses for the provided unassigned shard */ - List checkDataTierRelatedIssues( + public List checkDataTierRelatedIssues( IndexMetadata indexMetadata, List nodeAllocationResults, ClusterState clusterState @@ -849,12 +856,12 @@ private static Optional checkNotEnoughNodesInDataTier( } } - class ShardAllocationStatus { - final ShardAllocationCounts primaries = new ShardAllocationCounts(); - final ShardAllocationCounts replicas = new ShardAllocationCounts(); - final Metadata clusterMetadata; + public class ShardAllocationStatus { + protected final ShardAllocationCounts primaries = new ShardAllocationCounts(); + protected final ShardAllocationCounts replicas = new ShardAllocationCounts(); + protected final Metadata clusterMetadata; - ShardAllocationStatus(Metadata clusterMetadata) { + public ShardAllocationStatus(Metadata clusterMetadata) { this.clusterMetadata = clusterMetadata; } @@ -1149,7 +1156,7 @@ static List getRestoreFromSnapshotAffectedResources( } } - static class SearchableSnapshotsState { + public static class SearchableSnapshotsState { private final Set searchableSnapshotWithUnavailableShard = new HashSet<>(); private final Set searchableSnapshotWithOriginalIndexAvailable = new HashSet<>(); @@ -1161,7 +1168,7 @@ void addSearchableSnapshotWithOriginalIndexAvailable(String indexName) { searchableSnapshotWithOriginalIndexAvailable.add(indexName); } - Set getRedSearchableSnapshots() { + public Set getRedSearchableSnapshots() { return Sets.difference(searchableSnapshotWithUnavailableShard, searchableSnapshotWithOriginalIndexAvailable); } diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/MasterNodeFileWatchingService.java b/server/src/main/java/org/elasticsearch/common/file/MasterNodeFileWatchingService.java similarity index 97% rename from server/src/main/java/org/elasticsearch/reservedstate/service/MasterNodeFileWatchingService.java rename to server/src/main/java/org/elasticsearch/common/file/MasterNodeFileWatchingService.java index 444cde45d6961..6da0845a7c7ba 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/MasterNodeFileWatchingService.java +++ b/server/src/main/java/org/elasticsearch/common/file/MasterNodeFileWatchingService.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.reservedstate.service; +package org.elasticsearch.common.file; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -15,7 +15,6 @@ import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.file.AbstractFileWatchingService; import java.io.IOException; import java.nio.file.Files; diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 9419cd7e6ab5f..141a06eff0ec6 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -1039,11 +1039,19 @@ private VersionValue getVersionFromMap(BytesRef id) { // but we only need to do this once since the last operation per ID is to add to the version // map so once we pass this point we can safely lookup from the version map. if (versionMap.isUnsafe()) { - lastUnsafeSegmentGenerationForGets.set(lastCommittedSegmentInfos.getGeneration() + 1); refreshInternalSearcher(UNSAFE_VERSION_MAP_REFRESH_SOURCE, true); + // After the refresh, the doc that triggered it must now be part of the last commit. + // In rare cases, there could be other flush cycles completed in between the above line + // and the line below which push the last commit generation further. But that's OK. + // The invariant here is that doc is available within the generations of commits upto + // lastUnsafeSegmentGenerationForGets (inclusive). Therefore it is ok for it be larger + // which means the search shard needs to wait for extra generations and these generations + // are guaranteed to happen since they are all committed. + lastUnsafeSegmentGenerationForGets.set(lastCommittedSegmentInfos.getGeneration()); } versionMap.enforceSafeAccess(); } + // The versionMap can still be unsafe at this point due to archive being unsafe } return versionMap.getUnderLock(id); } diff --git a/server/src/main/java/org/elasticsearch/indices/SystemIndexMappingUpdateService.java b/server/src/main/java/org/elasticsearch/indices/SystemIndexMappingUpdateService.java index a0667db91daf6..6f272d29efee2 100644 --- a/server/src/main/java/org/elasticsearch/indices/SystemIndexMappingUpdateService.java +++ b/server/src/main/java/org/elasticsearch/indices/SystemIndexMappingUpdateService.java @@ -92,7 +92,7 @@ public void clusterChanged(ClusterChangedEvent event) { } // if we're in a mixed-version cluster, exit - if (state.hasMixedSystemIndexVersions()) { + if (state.nodes().getMaxNodeVersion().after(state.nodes().getSmallestNonClientNodeVersion())) { logger.debug("Skipping system indices up-to-date check as cluster has mixed versions"); return; } diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 5f8f35ad3cd2b..1c1b9745befe8 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -43,7 +43,6 @@ import org.elasticsearch.core.Assertions; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.PathUtils; -import org.elasticsearch.core.Releasables; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; import org.elasticsearch.env.Environment; @@ -508,8 +507,6 @@ public synchronized void close() throws IOException { toClose.add(injector.getInstance(SnapshotsService.class)); toClose.add(injector.getInstance(SnapshotShardsService.class)); toClose.add(injector.getInstance(RepositoriesService.class)); - toClose.add(() -> stopWatch.stop().start("client")); - Releasables.close(injector.getInstance(Client.class)); toClose.add(() -> stopWatch.stop().start("indices_cluster")); toClose.add(injector.getInstance(IndicesClusterStateService.class)); toClose.add(() -> stopWatch.stop().start("indices")); diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginDescriptor.java b/server/src/main/java/org/elasticsearch/plugins/PluginDescriptor.java index 13baae5950d6c..e0ee229fe1f98 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginDescriptor.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginDescriptor.java @@ -399,8 +399,8 @@ public String getVersion() { * * @return an Elasticsearch version */ - public Version getElasticsearchVersion() { - return Version.fromString(elasticsearchVersion); + public String getElasticsearchVersion() { + return elasticsearchVersion; } /** diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginsUtils.java b/server/src/main/java/org/elasticsearch/plugins/PluginsUtils.java index 4d30ca1f1a261..0533f535a19f1 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginsUtils.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginsUtils.java @@ -11,7 +11,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.Build; -import org.elasticsearch.Version; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.jdk.JarHell; @@ -30,6 +30,8 @@ import java.util.Map; import java.util.Set; import java.util.function.Function; +import java.util.regex.Matcher; +import java.util.regex.Pattern; import java.util.stream.Collectors; /** @@ -77,43 +79,111 @@ public static List findPluginDirs(final Path rootPath) throws IOException * Verify the given plugin is compatible with the current Elasticsearch installation. */ public static void verifyCompatibility(PluginDescriptor info) { - if (info.isStable()) { - if (info.getElasticsearchVersion().major != Version.CURRENT.major) { - throw new IllegalArgumentException( - "Stable Plugin [" - + info.getName() - + "] was built for Elasticsearch major version " - + info.getElasticsearchVersion().major - + " but version " - + Version.CURRENT - + " is running" + final String currentVersion = Build.current().version(); + Matcher buildVersionMatcher = SemanticVersion.semanticPattern.matcher(currentVersion); + // If we're not on a semantic version, assume plugins are compatible + if (buildVersionMatcher.matches()) { + SemanticVersion currentElasticsearchSemanticVersion; + try { + currentElasticsearchSemanticVersion = new SemanticVersion( + Integer.parseInt(buildVersionMatcher.group(1)), + Integer.parseInt(buildVersionMatcher.group(2)), + Integer.parseInt(buildVersionMatcher.group(3)) ); + } catch (NumberFormatException e) { + throw new IllegalArgumentException("Couldn't parse integers from build version [" + currentVersion + "]", e); } - if (info.getElasticsearchVersion().after(Version.CURRENT)) { + if (info.isStable()) { + Matcher pluginEsVersionMatcher = SemanticVersion.semanticPattern.matcher(info.getElasticsearchVersion()); + if (pluginEsVersionMatcher.matches() == false) { + throw new IllegalArgumentException( + "Expected semantic version for plugin [" + info.getName() + "] but was [" + info.getElasticsearchVersion() + "]" + ); + } + SemanticVersion pluginElasticsearchSemanticVersion; + try { + pluginElasticsearchSemanticVersion = new SemanticVersion( + Integer.parseInt(pluginEsVersionMatcher.group(1)), + Integer.parseInt(pluginEsVersionMatcher.group(2)), + Integer.parseInt(pluginEsVersionMatcher.group(3)) + ); + } catch (NumberFormatException e) { + throw new IllegalArgumentException( + "Expected integer version for plugin [" + info.getName() + "] but found [" + info.getElasticsearchVersion() + "]", + e + ); + } + + // case: Major version mismatch + if (pluginElasticsearchSemanticVersion.major != currentElasticsearchSemanticVersion.major) { + throw new IllegalArgumentException( + "Stable Plugin [" + + info.getName() + + "] was built for Elasticsearch major version " + + pluginElasticsearchSemanticVersion.major + + " but version " + + currentVersion + + " is running" + ); + } + + // case: stable plugin from the future + if (pluginElasticsearchSemanticVersion.after(currentElasticsearchSemanticVersion)) { + throw new IllegalArgumentException( + "Stable Plugin [" + + info.getName() + + "] was built for Elasticsearch version " + + info.getElasticsearchVersion() + + " but earlier version " + + currentVersion + + " is running" + ); + } + } else if (info.getElasticsearchVersion().equals(currentVersion) == false) { throw new IllegalArgumentException( - "Stable Plugin [" + "Plugin [" + info.getName() + "] was built for Elasticsearch version " + info.getElasticsearchVersion() - + " but earlier version " - + Version.CURRENT + + " but version " + + currentVersion + " is running" ); } - } else if (info.getElasticsearchVersion().equals(Version.CURRENT) == false) { - throw new IllegalArgumentException( - "Plugin [" - + info.getName() - + "] was built for Elasticsearch version " - + info.getElasticsearchVersion() - + " but version " - + Version.CURRENT - + " is running" - ); } JarHell.checkJavaVersion(info.getName(), info.getJavaVersion()); } + private record SemanticVersion(int major, int minor, int bugfix) { + + static final Pattern semanticPattern = Pattern.compile("^(\\d+)\\.(\\d+)\\.(\\d+)$"); + + // does not compare anything after the semantic version + boolean after(SemanticVersion other) { + // major + if (this.major < other.major) { + return false; + } + if (this.major > other.major) { + return true; + } + // minor + if (this.minor < other.minor) { + return false; + } + if (this.minor > other.minor) { + return true; + } + // bugfix + return this.bugfix > other.bugfix; + } + + @Override + public String toString() { + return Strings.format("%d.%d.%d", this.major, this.minor, this.bugfix); + } + } + /** * Check for the existence of a marker file that indicates any plugins are in a garbage state from a failed attempt to remove the * plugin. diff --git a/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java b/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java index 2d13af0248a73..83676925a3ae7 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/service/FileSettingsService.java @@ -16,6 +16,7 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.ReservedStateMetadata; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.file.MasterNodeFileWatchingService; import org.elasticsearch.env.Environment; import org.elasticsearch.xcontent.XContentParserConfiguration; diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java index 661ecf38c8523..b6e1240a3f85a 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAliasesAction.java @@ -16,8 +16,11 @@ import org.elasticsearch.cluster.metadata.DataStreamAlias; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.logging.DeprecationCategory; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; @@ -25,6 +28,7 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestBuilderListener; +import org.elasticsearch.rest.action.RestCancellableNodeClient; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -46,6 +50,8 @@ @ServerlessScope(Scope.PUBLIC) public class RestGetAliasesAction extends BaseRestHandler { + private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(RestGetAliasesAction.class); + @Override public List routes() { return List.of( @@ -201,16 +207,30 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC final String[] indices = Strings.splitStringByCommaToArray(request.param("index")); getAliasesRequest.indices(indices); getAliasesRequest.indicesOptions(IndicesOptions.fromRequest(request, getAliasesRequest.indicesOptions())); - getAliasesRequest.local(request.paramAsBoolean("local", getAliasesRequest.local())); + + if (request.hasParam("local")) { + // consume this param just for validation + final var localParam = request.paramAsBoolean("local", false); + if (request.getRestApiVersion() != RestApiVersion.V_7) { + DEPRECATION_LOGGER.critical( + DeprecationCategory.API, + "get-aliases-local", + "the [?local={}] query parameter to get-aliases requests has no effect and will be removed in a future version", + localParam + ); + } + } // we may want to move this logic to TransportGetAliasesAction but it is based on the original provided aliases, which will // not always be available there (they may get replaced so retrieving request.aliases is not quite the same). - return channel -> client.admin().indices().getAliases(getAliasesRequest, new RestBuilderListener<>(channel) { - @Override - public RestResponse buildResponse(GetAliasesResponse response, XContentBuilder builder) throws Exception { - return buildRestResponse(namesProvided, aliases, response.getAliases(), response.getDataStreamAliases(), builder); - } - }); + return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).admin() + .indices() + .getAliases(getAliasesRequest, new RestBuilderListener<>(channel) { + @Override + public RestResponse buildResponse(GetAliasesResponse response, XContentBuilder builder) throws Exception { + return buildRestResponse(namesProvided, aliases, response.getAliases(), response.getDataStreamAliases(), builder); + } + }); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestAliasAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestAliasAction.java index b8f083115614f..dc99b970864b2 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestAliasAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestAliasAction.java @@ -14,10 +14,14 @@ import org.elasticsearch.cluster.metadata.AliasMetadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.Table; +import org.elasticsearch.common.logging.DeprecationCategory; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; +import org.elasticsearch.rest.action.RestCancellableNodeClient; import org.elasticsearch.rest.action.RestResponseListener; import java.util.List; @@ -28,6 +32,8 @@ @ServerlessScope(Scope.PUBLIC) public class RestAliasAction extends AbstractCatAction { + private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(RestAliasAction.class); + @Override public List routes() { return List.of(new Route(GET, "/_cat/aliases"), new Route(GET, "/_cat/aliases/{alias}")); @@ -49,15 +55,29 @@ protected RestChannelConsumer doCatRequest(final RestRequest request, final Node ? new GetAliasesRequest(Strings.commaDelimitedListToStringArray(request.param("alias"))) : new GetAliasesRequest(); getAliasesRequest.indicesOptions(IndicesOptions.fromRequest(request, getAliasesRequest.indicesOptions())); - getAliasesRequest.local(request.paramAsBoolean("local", getAliasesRequest.local())); - return channel -> client.admin().indices().getAliases(getAliasesRequest, new RestResponseListener(channel) { - @Override - public RestResponse buildResponse(GetAliasesResponse response) throws Exception { - Table tab = buildTable(request, response); - return RestTable.buildResponse(tab, channel); + if (request.hasParam("local")) { + // consume this param just for validation + final var localParam = request.paramAsBoolean("local", false); + if (request.getRestApiVersion() != RestApiVersion.V_7) { + DEPRECATION_LOGGER.critical( + DeprecationCategory.API, + "cat-aliases-local", + "the [?local={}] query parameter to cat-aliases requests has no effect and will be removed in a future version", + localParam + ); } - }); + } + + return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).admin() + .indices() + .getAliases(getAliasesRequest, new RestResponseListener<>(channel) { + @Override + public RestResponse buildResponse(GetAliasesResponse response) throws Exception { + Table tab = buildTable(request, response); + return RestTable.buildResponse(tab, channel); + } + }); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AdaptingAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/AdaptingAggregator.java index 0be4e7f729bbf..f96b732b9464f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AdaptingAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AdaptingAggregator.java @@ -130,10 +130,6 @@ public void collectDebugInfo(BiConsumer add) { add.accept("delegate_debug", delegateDebug); } - public Aggregator delegate() { - return delegate; - } - @Override public String toString() { return name(); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilders.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilders.java index 3f998bffd1860..d78567f3effdb 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilders.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilders.java @@ -75,7 +75,6 @@ import org.elasticsearch.search.aggregations.metrics.TopHitsAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.ValueCount; import org.elasticsearch.search.aggregations.metrics.ValueCountAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.WeightedAvgAggregationBuilder; import java.util.List; @@ -100,13 +99,6 @@ public static AvgAggregationBuilder avg(String name) { return new AvgAggregationBuilder(name); } - /** - * Create a new {@link Avg} aggregation with the given name. - */ - public static WeightedAvgAggregationBuilder weightedAvg(String name) { - return new WeightedAvgAggregationBuilder(name); - } - /** * Create a new {@link Max} aggregation with the given name. */ diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java index bf74494e872bb..15b5c29589227 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java @@ -64,8 +64,6 @@ public String name() { return name; } - public void doValidate() {} - protected abstract Aggregator createInternal(Aggregator parent, CardinalityUpperBound cardinality, Map metadata) throws IOException; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/PipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/PipelineAggregationBuilder.java index c43eb252a5364..153a54bf890e4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/PipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/PipelineAggregationBuilder.java @@ -112,11 +112,6 @@ public Collection getSiblingAggregations() { return siblingAggregations; } - @Override - public Collection getSiblingPipelineAggregations() { - return siblingPipelineAggregations; - } - @Override public void validateHasParent(String type, String name) { addValidationError(type + " aggregation [" + name + "] must be declared inside of another aggregation"); @@ -155,11 +150,6 @@ public Collection getSiblingAggregations() { return parent.getSubAggregations(); } - @Override - public Collection getSiblingPipelineAggregations() { - return parent.getPipelineAggregations(); - } - @Override public void validateHasParent(String type, String name) { // There is a parent inside the tree. @@ -181,11 +171,6 @@ public void validateParentAggSequentiallyOrderedWithoutSkips(String type, String */ public abstract Collection getSiblingAggregations(); - /** - * Pipeline aggregations that are siblings to the aggregation being validated. - */ - public abstract Collection getSiblingPipelineAggregations(); - /** * Add a validation error to this context. All validation errors * are accumulated in a list and, if there are any, the request diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/PipelineAggregatorBuilders.java b/server/src/main/java/org/elasticsearch/search/aggregations/PipelineAggregatorBuilders.java index f280eb4de61bb..61da00241a4ea 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/PipelineAggregatorBuilders.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/PipelineAggregatorBuilders.java @@ -11,7 +11,6 @@ import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.pipeline.AvgBucketPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.BucketScriptPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.CumulativeSumPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.ExtendedStatsBucketPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.MaxBucketPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.MinBucketPipelineAggregationBuilder; @@ -62,10 +61,6 @@ public static BucketScriptPipelineAggregationBuilder bucketScript(String name, S return new BucketScriptPipelineAggregationBuilder(name, script, bucketsPaths); } - public static CumulativeSumPipelineAggregationBuilder cumulativeSum(String name, String bucketsPath) { - return new CumulativeSumPipelineAggregationBuilder(name, bucketsPath); - } - public static SerialDiffPipelineAggregationBuilder diff(String name, String bucketsPath) { return new SerialDiffPipelineAggregationBuilder(name, bucketsPath); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/IteratorAndCurrent.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/IteratorAndCurrent.java index af8757e10ccf7..37c5f49dc55fe 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/IteratorAndCurrent.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/IteratorAndCurrent.java @@ -21,10 +21,6 @@ public IteratorAndCurrent(Iterator iterator) { this.current = iterator.next(); } - public Iterator getIterator() { - return iterator; - } - public B current() { return current; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/BinaryValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/BinaryValuesSource.java index 58fd7f85f6076..62f587f5249d1 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/BinaryValuesSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/BinaryValuesSource.java @@ -48,7 +48,7 @@ class BinaryValuesSource extends SingleDimensionValuesSource { int size, int reverseMul ) { - super(bigArrays, format, fieldType, missingBucket, missingOrder, size, reverseMul); + super(bigArrays, format, fieldType, missingBucket, missingOrder, reverseMul); this.breakerConsumer = breakerConsumer; this.docValuesFunc = docValuesFunc; this.values = bigArrays.newObjectArray(Math.min(size, 100)); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java index 2cd33e470e3e5..d31d3a18b3567 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java @@ -212,13 +212,6 @@ public ZoneId timeZone() { return timeZone; } - /** - * Get the offset to use when rounding, which is a number of milliseconds. - */ - public long offset() { - return offset; - } - /** * Set the offset on this builder, which is a number of milliseconds. * @return this for chaining diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DoubleValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DoubleValuesSource.java index 2dc48b7ce0e2d..752c4ecf97401 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DoubleValuesSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DoubleValuesSource.java @@ -43,7 +43,7 @@ class DoubleValuesSource extends SingleDimensionValuesSource { int size, int reverseMul ) { - super(bigArrays, format, fieldType, missingBucket, missingOrder, size, reverseMul); + super(bigArrays, format, fieldType, missingBucket, missingOrder, reverseMul); this.docValuesFunc = docValuesFunc; this.bits = this.missingBucket ? new BitArray(100, bigArrays) : null; boolean success = false; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/GlobalOrdinalValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/GlobalOrdinalValuesSource.java index 855b456546314..f833bb39b3b56 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/GlobalOrdinalValuesSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/GlobalOrdinalValuesSource.java @@ -76,7 +76,7 @@ class GlobalOrdinalValuesSource extends SingleDimensionValuesSource { int size, int reverseMul ) { - super(bigArrays, format, type, missingBucket, missingOrder, size, reverseMul); + super(bigArrays, format, type, missingBucket, missingOrder, reverseMul); this.uniqueValueCount = uniqueValueCount; this.docValuesFunc = docValuesFunc; this.values = bigArrays.newLongArray(Math.min(size, 100), false); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/HistogramValuesSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/HistogramValuesSourceBuilder.java index e3ca337ef8a8c..9d17db7a77864 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/HistogramValuesSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/HistogramValuesSourceBuilder.java @@ -147,13 +147,6 @@ public String type() { return TYPE; } - /** - * Returns the interval that is set on this source - **/ - public double interval() { - return interval; - } - /** * Sets the interval on this source. **/ diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java index 7613d926d3c6d..3d79509ad9377 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/LongValuesSource.java @@ -59,7 +59,7 @@ class LongValuesSource extends SingleDimensionValuesSource { int size, int reverseMul ) { - super(bigArrays, format, fieldType, missingBucket, missingOrder, size, reverseMul); + super(bigArrays, format, fieldType, missingBucket, missingOrder, reverseMul); this.bigArrays = bigArrays; this.docValuesFunc = docValuesFunc; this.rounding = rounding; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/SingleDimensionValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/SingleDimensionValuesSource.java index bd3c8b7eb322c..6376c5334d7b5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/SingleDimensionValuesSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/SingleDimensionValuesSource.java @@ -31,8 +31,6 @@ abstract class SingleDimensionValuesSource> implements R protected final MappedFieldType fieldType; protected final boolean missingBucket; protected final MissingOrder missingOrder; - - protected final int size; protected final int reverseMul; protected T afterValue; @@ -45,7 +43,6 @@ abstract class SingleDimensionValuesSource> implements R * @param fieldType The field type or null if the source is a script. * @param missingBucket If true, an explicit `null bucket represents documents with missing values. * @param missingOrder How to order missing buckets if missingBucket is true. - * @param size The number of values to record. * @param reverseMul -1 if the natural order ({@link SortOrder#ASC} should be reversed. */ SingleDimensionValuesSource( @@ -54,7 +51,6 @@ abstract class SingleDimensionValuesSource> implements R @Nullable MappedFieldType fieldType, boolean missingBucket, MissingOrder missingOrder, - int size, int reverseMul ) { this.bigArrays = bigArrays; @@ -62,7 +58,6 @@ abstract class SingleDimensionValuesSource> implements R this.fieldType = fieldType; this.missingBucket = missingBucket; this.missingOrder = missingOrder; - this.size = size; this.reverseMul = reverseMul; this.afterValue = null; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregationBuilder.java index 81678404d1dab..c96fe5ad550f4 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregationBuilder.java @@ -198,13 +198,6 @@ public FiltersAggregationBuilder keyedBucket(boolean keyedBucket) { return this; } - /** - * Get whether to return keyed bucket in array - */ - public boolean keyedBucket() { - return keyedBucket; - } - @Override public BucketCardinality bucketCardinality() { return BucketCardinality.MANY; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java index 882b4960dd36c..ff9495ca4d825 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java @@ -151,10 +151,6 @@ public GeoGridAggregationBuilder size(int size) { return this; } - public int size() { - return requiredSize; - } - public GeoGridAggregationBuilder shardSize(int shardSize) { if (shardSize <= 0) { throw new IllegalArgumentException("[shardSize] must be greater than 0. Found [" + shardSize + "] in [" + name + "]"); @@ -163,10 +159,6 @@ public GeoGridAggregationBuilder shardSize(int shardSize) { return this; } - public int shardSize() { - return shardSize; - } - public GeoGridAggregationBuilder setGeoBoundingBox(GeoBoundingBox geoBoundingBox) { this.geoBoundingBox = geoBoundingBox; // no validation done here, similar to geo_bounding_box query behavior. diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java index ebbb290e1db9c..2653f9ac53553 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java @@ -290,11 +290,6 @@ public DateHistogramAggregationBuilder extendedBounds(LongBounds extendedBounds) return this; } - /** Return hard bounds for this histogram, or {@code null} if none are set. */ - public LongBounds hardBounds() { - return hardBounds; - } - /** Set hard bounds on this histogram, specifying boundaries outside which buckets cannot be created. */ public DateHistogramAggregationBuilder hardBounds(LongBounds hardBounds) { if (hardBounds == null) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java index b62e3c9e91f6f..1a75766c40a6b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java @@ -117,10 +117,6 @@ public DateHistogramAggregatorFactory( this.rounding = rounding; } - public long minDocCount() { - return minDocCount; - } - @Override protected Aggregator doCreateInternal(Aggregator parent, CardinalityUpperBound cardinality, Map metadata) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java index 7218bef0b9d9b..2371506082f1b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java @@ -26,7 +26,6 @@ import java.io.IOException; import java.time.ZoneId; -import java.util.Locale; import java.util.Objects; import static org.elasticsearch.core.RestApiVersion.equalTo; @@ -59,10 +58,6 @@ public enum IntervalTypeEnum implements Writeable { @Deprecated LEGACY_DATE_HISTO(null); - public static IntervalTypeEnum fromString(String name) { - return valueOf(name.trim().toUpperCase(Locale.ROOT)); - } - public static IntervalTypeEnum fromStream(StreamInput in) throws IOException { return in.readEnum(IntervalTypeEnum.class); } @@ -72,10 +67,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeEnum(this); } - public String value() { - return name().toLowerCase(Locale.ROOT); - } - public boolean isValid() { // I'm being a little cheeky here and just reusing the name for signaling invlaid choices too return this.preferredName != null; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java index 0740557a526d3..f5fb2d128f75f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java @@ -216,10 +216,6 @@ public double maxBound() { return DoubleBounds.getEffectiveMax(extendedBounds); } - protected DoubleBounds extendedBounds() { - return extendedBounds; - } - /** * Set extended bounds on this builder: buckets between {@code minBound} and * {@code maxBound} will be created even if no documents fell into these diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java index d0ec504f0aa2b..ca61a5f4ddcf6 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorFactory.java @@ -84,10 +84,6 @@ public HistogramAggregatorFactory( this.hardBounds = hardBounds; } - public long minDocCount() { - return minDocCount; - } - @Override protected Aggregator doCreateInternal(Aggregator parent, CardinalityUpperBound cardinality, Map metadata) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java index 4eaec7034b7f4..ed883a4b04d6b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java @@ -269,10 +269,6 @@ public List getBuckets() { return Collections.unmodifiableList(buckets); } - DocValueFormat getFormatter() { - return format; - } - long getMinDocCount() { return minDocCount; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java index 9f464fa1b23cb..5686c0ea11dfa 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java @@ -187,10 +187,6 @@ public int compareKey(InternalVariableWidthHistogram.Bucket other) { return Double.compare(centroid, other.centroid); // Use centroid for bucket ordering } - public DocValueFormat getFormatter() { - return format; - } - Bucket finalizeSampling(SamplingContext samplingContext) { return new Bucket( centroid, @@ -282,10 +278,6 @@ public List getBuckets() { return Collections.unmodifiableList(buckets); } - DocValueFormat getFormatter() { - return format; - } - public int getTargetBuckets() { return targetNumBuckets; } @@ -525,7 +517,7 @@ private void mergeBucketsWithSameMin(List buckets, AggregationReduceCont * * After this adjustment, A will contain more values than indicated and B will have less. */ - private static void adjustBoundsForOverlappingBuckets(List buckets, AggregationReduceContext reduceContext) { + private static void adjustBoundsForOverlappingBuckets(List buckets) { for (int i = 1; i < buckets.size(); i++) { Bucket curBucket = buckets.get(i); Bucket prevBucket = buckets.get(i - 1); @@ -545,7 +537,7 @@ public InternalAggregation reduce(List aggregations, Aggreg if (reduceContext.isFinalReduce()) { buckets.sort(Comparator.comparing(Bucket::min)); mergeBucketsWithSameMin(reducedBuckets, reduceContext); - adjustBoundsForOverlappingBuckets(reducedBuckets, reduceContext); + adjustBoundsForOverlappingBuckets(reducedBuckets); } return new InternalVariableWidthHistogram(getName(), reducedBuckets, emptyBucketInfo, targetNumBuckets, format, metadata); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedVariableWidthHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedVariableWidthHistogram.java index cc29ce21c2507..ba33373354f3e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedVariableWidthHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedVariableWidthHistogram.java @@ -93,16 +93,6 @@ public double getMin() { return min; } - public String getMinAsString() { - if (minAsString != null) { - return minAsString; - } - if (min != null) { - return Double.toString(min); - } - return null; - } - public void setMax(Double max) { this.max = max; } @@ -115,16 +105,6 @@ public double getMax() { return max; } - public String getMaxAsString() { - if (maxAsString != null) { - return maxAsString; - } - if (max != null) { - return Double.toString(max); - } - return null; - } - static ParsedBucket fromXContent(XContentParser parser, boolean keyed) throws IOException { final ParsedBucket bucket = new ParsedBucket(); bucket.setKeyed(keyed); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregationBuilder.java index f7870df45648e..04e73691979f8 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregationBuilder.java @@ -114,10 +114,6 @@ public VariableWidthHistogramAggregationBuilder setInitialBuffer(int initialBuff return this; } - public int getNumBuckets() { - return numBuckets; - } - public int getShardSize() { if (shardSize == -1) { return numBuckets * 50; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregationBuilder.java index f4ba3db383586..9df53a0cfe826 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregationBuilder.java @@ -69,13 +69,6 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeString(path); } - /** - * Get the path to use for this nested aggregation. - */ - public String path() { - return path; - } - @Override public BucketCardinality bucketCardinality() { return BucketCardinality.ONE; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregationBuilder.java index e6c4e59bf3f93..71e6c6ace203d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregationBuilder.java @@ -72,13 +72,6 @@ public ReverseNestedAggregationBuilder path(String path) { return this; } - /** - * Get the path to use for this nested aggregation. - */ - public String path() { - return path; - } - @Override public BucketCardinality bucketCardinality() { return BucketCardinality.ONE; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java index d64e016be7351..789f936359dfa 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java @@ -33,7 +33,6 @@ public class ReverseNestedAggregator extends BucketsAggregator implements Single static final ParseField PATH_FIELD = new ParseField("path"); - private final Query parentFilter; private final BitSetProducer parentBitsetProducer; public ReverseNestedAggregator( @@ -46,6 +45,7 @@ public ReverseNestedAggregator( Map metadata ) throws IOException { super(name, factories, context, parent, cardinality, metadata); + Query parentFilter; if (objectMapper == null) { parentFilter = Queries.newNonNestedFilter(context.getIndexSettings().getIndexVersionCreated()); } else { @@ -102,7 +102,4 @@ public InternalAggregation buildEmptyAggregation() { return new InternalReverseNested(name, 0, buildEmptySubAggregations(), metadata()); } - Query getParentFilter() { - return parentFilter; - } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/IpPrefixAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/IpPrefixAggregator.java index d7e807cc7cc6b..eb8b0f95047b9 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/IpPrefixAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/IpPrefixAggregator.java @@ -53,22 +53,6 @@ public IpPrefix(boolean isIpv6, int prefixLength, boolean appendPrefixLength, By this.netmask = netmask; } - public boolean isIpv6() { - return isIpv6; - } - - public int getPrefixLength() { - return prefixLength; - } - - public boolean appendPrefixLength() { - return appendPrefixLength; - } - - public BytesRef getNetmask() { - return netmask; - } - @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeAggregatorFactory.java index a084f251693a4..a8476071ee52d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeAggregatorFactory.java @@ -17,7 +17,6 @@ import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; -import org.elasticsearch.search.aggregations.support.ValuesSourceRegistry; import java.io.IOException; import java.util.Map; @@ -27,12 +26,10 @@ public class AbstractRangeAggregatorFactory extends ValuesSourc private final InternalRange.Factory rangeFactory; private final R[] ranges; private final boolean keyed; - private final ValuesSourceRegistry.RegistryKey registryKey; private final RangeAggregatorSupplier aggregatorSupplier; public AbstractRangeAggregatorFactory( String name, - ValuesSourceRegistry.RegistryKey registryKey, ValuesSourceConfig config, R[] ranges, boolean keyed, @@ -47,7 +44,6 @@ public AbstractRangeAggregatorFactory( this.ranges = ranges; this.keyed = keyed; this.rangeFactory = rangeFactory; - this.registryKey = registryKey; this.aggregatorSupplier = aggregatorSupplier; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregationBuilder.java index 1dfb7a8dac2f5..59baf14f988f6 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregationBuilder.java @@ -240,14 +240,6 @@ public DateRangeAggregationBuilder addUnboundedTo(String key, double to) { return this; } - /** - * Same as {@link #addUnboundedTo(String, double)} but the key will be - * computed automatically. - */ - public DateRangeAggregationBuilder addUnboundedTo(double to) { - return addUnboundedTo(null, to); - } - /** * Add a new range with no upper bound. * diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregatorFactory.java index b0661811c5932..393c732409a91 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregatorFactory.java @@ -30,19 +30,7 @@ public DateRangeAggregatorFactory( RangeAggregatorSupplier aggregatorSupplier ) throws IOException { - super( - name, - DateRangeAggregationBuilder.REGISTRY_KEY, - config, - ranges, - keyed, - rangeFactory, - context, - parent, - subFactoriesBuilder, - metadata, - aggregatorSupplier - ); + super(name, config, ranges, keyed, rangeFactory, context, parent, subFactoriesBuilder, metadata, aggregatorSupplier); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java index a44d92f024e46..ffc6d68f21a05 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java @@ -414,10 +414,6 @@ public GeoDistanceAggregationBuilder unit(DistanceUnit unit) { return this; } - public DistanceUnit unit() { - return unit; - } - public GeoDistanceAggregationBuilder distanceType(GeoDistance distanceType) { if (distanceType == null) { throw new IllegalArgumentException("[distanceType] must not be null: [" + name + "]"); @@ -426,19 +422,11 @@ public GeoDistanceAggregationBuilder distanceType(GeoDistance distanceType) { return this; } - public GeoDistance distanceType() { - return distanceType; - } - public GeoDistanceAggregationBuilder keyed(boolean keyed) { this.keyed = keyed; return this; } - public boolean keyed() { - return keyed; - } - @Override public BucketCardinality bucketCardinality() { return BucketCardinality.MANY; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalDateRange.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalDateRange.java index 999d37e1fe65a..c99abc4eb904b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalDateRange.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalDateRange.java @@ -11,7 +11,6 @@ import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.aggregations.support.ValueType; import java.io.IOException; import java.time.Instant; @@ -71,25 +70,9 @@ private Double internalGetTo() { return to; } - @Override - protected InternalRange.Factory getFactory() { - return FACTORY; - } - - boolean keyed() { - return keyed; - } - - DocValueFormat format() { - return format; - } } public static class Factory extends InternalRange.Factory { - @Override - public ValueType getValueType() { - return ValueType.DATE; - } @Override public InternalDateRange create( diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalGeoDistance.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalGeoDistance.java index 028fce1b4c567..31306d81220d6 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalGeoDistance.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalGeoDistance.java @@ -11,7 +11,6 @@ import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; -import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSourceType; import java.io.IOException; @@ -27,14 +26,6 @@ static class Bucket extends InternalRange.Bucket { super(key, from, to, docCount, aggregations, keyed, DocValueFormat.RAW); } - @Override - protected InternalRange.Factory getFactory() { - return FACTORY; - } - - boolean keyed() { - return keyed; - } } public static class Factory extends InternalRange.Factory { @@ -43,11 +34,6 @@ public ValuesSourceType getValueSourceType() { return CoreValuesSourceType.GEOPOINT; } - @Override - public ValueType getValueType() { - return ValueType.GEOPOINT; - } - @Override public InternalGeoDistance create( String name, diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java index c6f3cbaf740f0..cb970fc87fd33 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java @@ -17,7 +17,6 @@ import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; import org.elasticsearch.search.aggregations.support.SamplingContext; -import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSourceType; import org.elasticsearch.xcontent.XContentBuilder; @@ -117,11 +116,6 @@ public InternalAggregations getAggregations() { return aggregations; } - @SuppressWarnings("unchecked") - protected Factory getFactory() { - return FACTORY; - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { final String key = getKeyAsString(); @@ -206,10 +200,6 @@ public ValuesSourceType getValueSourceType() { return CoreValuesSourceType.NUMERIC; } - public ValueType getValueType() { - return ValueType.NUMERIC; - } - @SuppressWarnings("unchecked") public R create(String name, List ranges, DocValueFormat format, boolean keyed, Map metadata) { return (R) new InternalRange(name, ranges, format, keyed, metadata); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/IpRangeAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/IpRangeAggregationBuilder.java index ef580929521d0..42f6d9957c329 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/IpRangeAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/IpRangeAggregationBuilder.java @@ -39,7 +39,6 @@ import java.net.InetAddress; import java.net.UnknownHostException; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; @@ -164,18 +163,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(to); } - public String getKey() { - return key; - } - - public String getFrom() { - return from; - } - - public String getTo() { - return to; - } - @Override public boolean equals(Object obj) { if (obj == null || getClass() != obj.getClass()) { @@ -253,11 +240,6 @@ public boolean keyed() { return keyed; } - /** Get the current list or ranges that are configured on this aggregation. */ - public List getRanges() { - return Collections.unmodifiableList(ranges); - } - /** Add a new {@link Range} to this aggregation. */ public IpRangeAggregationBuilder addRange(Range range) { ranges.add(range); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java index 9fdbaa10509e6..f9fc993c3f347 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java @@ -203,13 +203,6 @@ protected ValuesSourceRegistry.RegistryKey getRegistryKey() { return REGISTRY_KEY; } - private static String generateKey(double from, double to, DocValueFormat format) { - StringBuilder builder = new StringBuilder().append(Double.isInfinite(from) ? "*" : format.format(from)) - .append("-") - .append(Double.isInfinite(to) ? "*" : format.format(to)); - return builder.toString(); - } - @Override public TransportVersion getMinimalSupportedVersion() { return TransportVersions.ZERO; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java index 98237d19f0f33..7c89061ea32f2 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java @@ -199,14 +199,6 @@ public double getTo() { return this.originalTo; } - public Double getOriginalFrom() { - return originalFrom; - } - - public Double getOriginalTo() { - return originalTo; - } - public String getFromAsString() { return this.fromAsStr; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregatorFactory.java index 6e9781b0d9531..42d0f55e14a8d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregatorFactory.java @@ -32,18 +32,6 @@ public RangeAggregatorFactory( Map metadata, RangeAggregatorSupplier aggregatorSupplier ) throws IOException { - super( - name, - RangeAggregationBuilder.REGISTRY_KEY, - config, - ranges, - keyed, - rangeFactory, - context, - parent, - subFactoriesBuilder, - metadata, - aggregatorSupplier - ); + super(name, config, ranges, keyed, rangeFactory, context, parent, subFactoriesBuilder, metadata, aggregatorSupplier); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregationBuilder.java index 6dd998c0db043..90c29a8e3556f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregationBuilder.java @@ -106,13 +106,6 @@ public DiversifiedAggregationBuilder shardSize(int shardSize) { return this; } - /** - * Get the max num docs to be returned from each shard. - */ - public int shardSize() { - return shardSize; - } - /** * Set the max num docs to be returned per value. */ @@ -126,13 +119,6 @@ public DiversifiedAggregationBuilder maxDocsPerValue(int maxDocsPerValue) { return this; } - /** - * Get the max num docs to be returned per value. - */ - public int maxDocsPerValue() { - return maxDocsPerValue; - } - /** * Set the execution hint. */ @@ -141,13 +127,6 @@ public DiversifiedAggregationBuilder executionHint(String executionHint) { return this; } - /** - * Get the execution hint. - */ - public String executionHint() { - return executionHint; - } - @Override public BucketCardinality bucketCardinality() { return BucketCardinality.ONE; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregationBuilder.java index 9795097f308da..5c3208418df08 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregationBuilder.java @@ -67,13 +67,6 @@ public SamplerAggregationBuilder shardSize(int shardSize) { return this; } - /** - * Get the max num docs to be returned from each shard. - */ - public int shardSize() { - return shardSize; - } - @Override public BucketCardinality bucketCardinality() { return BucketCardinality.ONE; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregationBuilder.java index 80d396d9aff7d..240f016c66954 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregationBuilder.java @@ -21,7 +21,6 @@ import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; import java.util.Collection; @@ -45,10 +44,6 @@ public class RandomSamplerAggregationBuilder extends AbstractAggregationBuilder< PARSER.declareDouble(RandomSamplerAggregationBuilder::setProbability, PROBABILITY); } - public static RandomSamplerAggregationBuilder parse(String aggregationName, XContentParser parser) throws IOException { - return PARSER.parse(parser, new RandomSamplerAggregationBuilder(aggregationName), null); - } - private int seed = Randomness.get().nextInt(); private double p; @@ -78,10 +73,6 @@ public RandomSamplerAggregationBuilder(StreamInput in) throws IOException { this.seed = in.readInt(); } - public double getProbability() { - return p; - } - protected RandomSamplerAggregationBuilder( RandomSamplerAggregationBuilder clone, AggregatorFactories.Builder factoriesBuilder, @@ -140,10 +131,6 @@ protected AggregatorFactory doBuild( return new RandomSamplerAggregatorFactory(name, seed, p, context, parent, subfactoriesBuilder, metadata); } - public int getSeed() { - return seed; - } - @Override protected XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedRareTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedRareTerms.java index fd6ecb0b36252..f1b9608c9c2cf 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedRareTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedRareTerms.java @@ -7,8 +7,6 @@ */ package org.elasticsearch.search.aggregations.bucket.terms; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.io.stream.StreamInput; @@ -42,8 +40,6 @@ public abstract class InternalMappedRareTerms, final SetBackedScalingCuckooFilter filter; - protected final Logger logger = LogManager.getLogger(getClass()); - InternalMappedRareTerms( String name, BucketOrder order, @@ -59,10 +55,6 @@ public abstract class InternalMappedRareTerms, this.filter = filter; } - public long getMaxDocCount() { - return maxDocCount; - } - SetBackedScalingCuckooFilter getFilter() { return filter; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalRareTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalRareTerms.java index 2b2f6a19d46a0..a9870d113ae3a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalRareTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalRareTerms.java @@ -166,11 +166,6 @@ protected B reduceBucket(List buckets, AggregationReduceContext context) { protected abstract A createWithFilter(String name, List buckets, SetBackedScalingCuckooFilter filter); - /** - * Create an array to hold some buckets. Used in collecting the results. - */ - protected abstract B[] createBucketsArray(int size); - @Override public boolean equals(Object obj) { if (this == obj) return true; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongRareTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongRareTerms.java index 8a27a6929f0ba..2f8b685d4d623 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongRareTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongRareTerms.java @@ -126,11 +126,6 @@ protected LongRareTerms createWithFilter(String name, List return new LongRareTerms(name, order, getMetadata(), format, buckets, maxDocCount, filter); } - @Override - protected LongRareTerms.Bucket[] createBucketsArray(int size) { - return new LongRareTerms.Bucket[size]; - } - @Override public boolean containsTerm(SetBackedScalingCuckooFilter filter, LongRareTerms.Bucket bucket) { return filter.mightContain((long) bucket.getKey()); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java index 768c962d13db9..0422428e6b728 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java @@ -162,13 +162,6 @@ public IncludeExclude includeExclude() { return includeExclude; } - /** - * Get the current false positive rate for individual cuckoo filters. - */ - public double getPrecision() { - return precision; - } - /** * Set's the false-positive rate for individual cuckoo filters. Does not dictate the overall fpp rate * since we use a "scaling" cuckoo filter which adds more filters as required, and the overall diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregatorFactory.java index 088d575a98ea8..2cc49816d3c25 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregatorFactory.java @@ -8,7 +8,6 @@ package org.elasticsearch.search.aggregations.bucket.terms; -import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -210,20 +209,8 @@ Aggregator create( ); } - @Override - boolean needsGlobalOrdinals() { - return false; - } - }; - public static ExecutionMode fromString(String value, final DeprecationLogger deprecationLogger) { - return switch (value) { - case "map" -> MAP; - default -> throw new IllegalArgumentException("Unknown `execution_hint`: [" + value + "], expected any of [map]"); - }; - } - private final ParseField parseField; ExecutionMode(ParseField parseField) { @@ -244,8 +231,6 @@ abstract Aggregator create( CardinalityUpperBound cardinality ) throws IOException; - abstract boolean needsGlobalOrdinals(); - @Override public String toString() { return parseField.getPreferredName(); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificanceLookup.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificanceLookup.java index 9ac9c0e241566..cf8a1df3e0079 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificanceLookup.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificanceLookup.java @@ -223,7 +223,7 @@ private long getBackgroundFrequency(Query query) throws IOException { // for types that use the inverted index, we prefer using a terms // enum that will do a better job at reusing index inputs Term term = ((TermQuery) query).getTerm(); - TermsEnum termsEnum = getTermsEnum(term.field()); + TermsEnum termsEnum = getTermsEnum(); if (termsEnum.seekExact(term.bytes())) { return termsEnum.docFreq(); } @@ -237,7 +237,7 @@ private long getBackgroundFrequency(Query query) throws IOException { return new IndexSearcher(context.searcher().getIndexReader()).count(query); } - private TermsEnum getTermsEnum(String field) throws IOException { + private TermsEnum getTermsEnum() throws IOException { // TODO this method helps because of asMultiBucketAggregator. Once we remove it we can move this logic into the aggregators. if (termsEnum != null) { return termsEnum; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregationBuilder.java index 5311688ceee54..bb89e7d54bcb6 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregationBuilder.java @@ -186,14 +186,6 @@ public TermsAggregator.BucketCountThresholds bucketCountThresholds() { return bucketCountThresholds; } - public SignificantTermsAggregationBuilder bucketCountThresholds(TermsAggregator.BucketCountThresholds bucketCountThresholds) { - if (bucketCountThresholds == null) { - throw new IllegalArgumentException("[bucketCountThresholds] must not be null: [" + name + "]"); - } - this.bucketCountThresholds = bucketCountThresholds; - return this; - } - /** * Sets the size - indicating how many term buckets should be returned * (defaults to 10) @@ -256,13 +248,6 @@ public SignificantTermsAggregationBuilder executionHint(String executionHint) { return this; } - /** - * Expert: gets an execution hint to the aggregation. - */ - public String executionHint() { - return executionHint; - } - public SignificantTermsAggregationBuilder backgroundFilter(QueryBuilder backgroundFilter) { if (backgroundFilter == null) { throw new IllegalArgumentException("[backgroundFilter] must not be null: [" + name + "]"); @@ -271,10 +256,6 @@ public SignificantTermsAggregationBuilder backgroundFilter(QueryBuilder backgrou return this; } - public QueryBuilder backgroundFilter() { - return backgroundFilter; - } - /** * Set terms to include and exclude from the aggregation results */ diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTextAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTextAggregationBuilder.java index a73d12c23a378..99dc93a175f7b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTextAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTextAggregationBuilder.java @@ -123,10 +123,6 @@ protected AggregationBuilder shallowCopy(Builder factoriesBuilder, Map> sorts) { return this; } - /** - * Gets the bytes representing the sort builders for this request. - */ - public List> sorts() { - return sorts; - } - /** * Adds highlight to perform as part of the search. */ @@ -296,23 +289,6 @@ public TopHitsAggregationBuilder highlighter(HighlightBuilder highlightBuilder) return this; } - /** - * Gets the highlighter builder for this request. - */ - public HighlightBuilder highlighter() { - return highlightBuilder; - } - - /** - * Indicates whether the response should contain the stored _source for - * every hit - */ - public TopHitsAggregationBuilder fetchSource(boolean fetch) { - FetchSourceContext fetchSourceContext = this.fetchSourceContext != null ? this.fetchSourceContext : FetchSourceContext.FETCH_SOURCE; - this.fetchSourceContext = FetchSourceContext.of(fetch, fetchSourceContext.includes(), fetchSourceContext.excludes()); - return this; - } - /** * Indicate that _source should be returned with every hit, with an * "include" and/or "exclude" set which can include simple wildcard @@ -362,14 +338,6 @@ public TopHitsAggregationBuilder fetchSource(@Nullable FetchSourceContext fetchS return this; } - /** - * Gets the {@link FetchSourceContext} which defines how the _source - * should be fetched. - */ - public FetchSourceContext fetchSource() { - return fetchSourceContext; - } - /** * Adds a stored field to load and return (note, it must be stored) as part of the search request. * To disable the stored fields entirely (source and metadata fields) use {@code storedField("_none_")}. @@ -394,13 +362,6 @@ public TopHitsAggregationBuilder storedFields(List fields) { return this; } - /** - * Gets the stored fields context - */ - public StoredFieldsContext storedFields() { - return storedFieldsContext; - } - /** * Adds a field to load from doc values and return as part of * the search request. @@ -424,13 +385,6 @@ public TopHitsAggregationBuilder docValueField(String docValueField) { return docValueField(docValueField, null); } - /** - * Gets the field-data fields. - */ - public List docValueFields() { - return docValueFields; - } - /** * Adds a field to load and return as part of the search request. */ @@ -452,13 +406,6 @@ public TopHitsAggregationBuilder fetchField(String field) { return fetchField(new FieldAndFormat(field, null, null)); } - /** - * Gets the fields to load and return as part of the search request. - */ - public List fetchFields() { - return fetchFields; - } - /** * Adds a script field under the given name with the provided script. * @@ -511,13 +458,6 @@ public TopHitsAggregationBuilder scriptFields(List scriptFields) { return this; } - /** - * Gets the script fields. - */ - public Set scriptFields() { - return scriptFields; - } - /** * Should each {@link org.elasticsearch.search.SearchHit} be returned * with an explanation of the hit (ranking). @@ -527,14 +467,6 @@ public TopHitsAggregationBuilder explain(boolean explain) { return this; } - /** - * Indicates whether each search hit will be returned with an - * explanation of the hit (ranking) - */ - public boolean explain() { - return explain; - } - /** * Should each {@link org.elasticsearch.search.SearchHit} be returned * with a version associated with it. @@ -544,14 +476,6 @@ public TopHitsAggregationBuilder version(boolean version) { return this; } - /** - * Indicates whether the document's version will be included in the - * search hits. - */ - public boolean version() { - return version; - } - /** * Should each {@link org.elasticsearch.search.SearchHit} be returned with the * sequence number and primary term of the last modification of the document. @@ -561,14 +485,6 @@ public TopHitsAggregationBuilder seqNoAndPrimaryTerm(Boolean seqNoAndPrimaryTerm return this; } - /** - * Indicates whether {@link org.elasticsearch.search.SearchHit}s should be returned with the - * sequence number and primary term of the last modification of the document. - */ - public Boolean seqNoAndPrimaryTerm() { - return seqNoAndPrimaryTerm; - } - /** * Applies when sorting, and controls if scores will be tracked as well. * Defaults to {@code false}. @@ -578,13 +494,6 @@ public TopHitsAggregationBuilder trackScores(boolean trackScores) { return this; } - /** - * Indicates whether scores will be tracked for this request. - */ - public boolean trackScores() { - return trackScores; - } - @Override public TopHitsAggregationBuilder subAggregations(Builder subFactories) { throw new AggregationInitializationException( diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/AbstractPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/AbstractPipelineAggregationBuilder.java index 74bb9a8881d79..b3335dcbd5be5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/AbstractPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/AbstractPipelineAggregationBuilder.java @@ -58,10 +58,6 @@ public void writeTo(StreamOutput out) throws IOException { protected abstract void doWriteTo(StreamOutput out) throws IOException; - public String type() { - return type; - } - protected abstract PipelineAggregator createInternal(Map metadata); /** diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketMetricsPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketMetricsPipelineAggregationBuilder.java index b5a9da37c60f6..c93d8f1c41874 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketMetricsPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketMetricsPipelineAggregationBuilder.java @@ -65,13 +65,6 @@ public AF format(String format) { return (AF) this; } - /** - * Gets the format to use on the output of this aggregation. - */ - public String format() { - return format; - } - protected DocValueFormat formatter() { if (format != null) { return new DocValueFormat.Decimal(format); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptPipelineAggregationBuilder.java index 4993c8ec25d83..1cf49af421466 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptPipelineAggregationBuilder.java @@ -142,13 +142,6 @@ public BucketScriptPipelineAggregationBuilder format(String format) { return this; } - /** - * Gets the format to use on the output of this aggregation. - */ - public String format() { - return format; - } - protected DocValueFormat formatter() { if (format != null) { return new DocValueFormat.Decimal(format); @@ -168,13 +161,6 @@ public BucketScriptPipelineAggregationBuilder gapPolicy(GapPolicy gapPolicy) { return this; } - /** - * Gets the gap policy to use for this aggregation. - */ - public GapPolicy gapPolicy() { - return gapPolicy; - } - @Override protected PipelineAggregator createInternal(Map metadata) { return new BucketScriptPipelineAggregator(name, bucketsPathsMap, script, formatter(), gapPolicy, metadata); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumPipelineAggregationBuilder.java index c2816629b653f..944c4e8a88d08 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumPipelineAggregationBuilder.java @@ -73,13 +73,6 @@ public CumulativeSumPipelineAggregationBuilder format(String format) { return this; } - /** - * Gets the format to use on the output of this aggregation. - */ - public String format() { - return format; - } - protected DocValueFormat formatter() { if (format != null) { return new DocValueFormat.Decimal(format); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ParsedDerivative.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ParsedDerivative.java index 66d2c0621b410..e7751230334d5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ParsedDerivative.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ParsedDerivative.java @@ -24,16 +24,6 @@ public class ParsedDerivative extends ParsedSimpleValue { private static final ParseField NORMALIZED_AS_STRING = new ParseField("normalized_value_as_string"); private static final ParseField NORMALIZED = new ParseField("normalized_value"); - /** - * Returns the normalized value. If no normalised factor has been specified - * this method will return {@link #value()} - * - * @return the normalized value - */ - public double normalizedValue() { - return this.normalizedValue; - } - @Override public String getType() { return "derivative"; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregationBuilder.java index a47f1255e0fe9..4edecb3c8b480 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregationBuilder.java @@ -84,13 +84,6 @@ public PercentilesBucketPipelineAggregationBuilder setKeyed(boolean keyed) { return this; } - /** - * Get whether the XContent should be keyed - */ - public boolean getKeyed() { - return keyed; - } - @Override protected PipelineAggregator createInternal(Map metadata) { return new PercentilesBucketPipelineAggregator(name, percents, keyed, bucketsPaths, gapPolicy(), formatter(), metadata); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffPipelineAggregationBuilder.java index 1143594e98d16..935104bcacd51 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffPipelineAggregationBuilder.java @@ -70,13 +70,6 @@ public SerialDiffPipelineAggregationBuilder lag(int lag) { return this; } - /** - * Gets the lag to use when calculating the serial difference. - */ - public int lag() { - return lag; - } - /** * Sets the format to use on the output of this aggregation. */ @@ -88,13 +81,6 @@ public SerialDiffPipelineAggregationBuilder format(String format) { return this; } - /** - * Gets the format to use on the output of this aggregation. - */ - public String format() { - return format; - } - /** * Sets the GapPolicy to use on the output of this aggregation. */ @@ -106,13 +92,6 @@ public SerialDiffPipelineAggregationBuilder gapPolicy(GapPolicy gapPolicy) { return this; } - /** - * Gets the GapPolicy to use on the output of this aggregation. - */ - public GapPolicy gapPolicy() { - return gapPolicy; - } - protected DocValueFormat formatter() { if (format != null) { return new DocValueFormat.Decimal(format); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationInfo.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationInfo.java index e3662b150270c..11b2da7c82e24 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationInfo.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationInfo.java @@ -55,10 +55,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeMap(aggs, StreamOutput::writeStringCollection); } - public Map> getAggregations() { - return aggs; - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject("aggregations"); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/TimeSeriesValuesSourceType.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/TimeSeriesValuesSourceType.java index e1e249466aea6..6529c3f565a33 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/TimeSeriesValuesSourceType.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/TimeSeriesValuesSourceType.java @@ -84,10 +84,6 @@ public ValuesSource replaceMissing( } }; - public static ValuesSourceType fromString(String name) { - return valueOf(name.trim().toUpperCase(Locale.ROOT)); - } - public String value() { return name().toLowerCase(Locale.ROOT); } diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAwareClient.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAwareClient.java index 23a3857397f90..d85cc1d67a8b9 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAwareClient.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAwareClient.java @@ -84,11 +84,6 @@ private void maybeEnsureConnected(ActionListener ensureConnectedListener) } } - @Override - public void close() { - // do nothing - } - @Override public Client getRemoteClusterClient(String remoteClusterAlias, Executor responseExecutor) { return remoteClusterService.getRemoteClusterClient(threadPool(), remoteClusterAlias, responseExecutor); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/remote/RemoteClusterNodesActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/remote/RemoteClusterNodesActionTests.java index b593c947fa725..91af3383f0670 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/remote/RemoteClusterNodesActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/remote/RemoteClusterNodesActionTests.java @@ -123,9 +123,6 @@ protected void ); listener.onResponse((Response) nodesInfoResponse); } - - @Override - public void close() {} } ); @@ -201,9 +198,6 @@ protected void assertThat(asInstanceOf(NodesInfoRequest.class, request).requestedMetrics(), empty()); listener.onResponse((Response) nodesInfoResponse); } - - @Override - public void close() {} } ); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponseTests.java index 420bde60bc168..6fde4bed97a17 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponseTests.java @@ -10,7 +10,9 @@ import org.elasticsearch.cluster.metadata.AliasMetadata; import org.elasticsearch.cluster.metadata.AliasMetadata.Builder; +import org.elasticsearch.cluster.metadata.DataStreamAlias; import org.elasticsearch.cluster.metadata.DataStreamTestHelper; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Tuple; import org.elasticsearch.test.AbstractWireSerializingTestCase; @@ -29,9 +31,18 @@ protected GetAliasesResponse createTestInstance() { return createTestItem(); } + /** + * NB prior to 8.12 get-aliases was a TransportMasterNodeReadAction so for BwC we must remain able to write these responses so that + * older nodes can read them until we no longer need to support {@link org.elasticsearch.TransportVersions#CLUSTER_FEATURES_ADDED} and + * earlier. The reader implementation below is the production implementation from earlier versions, but moved here because it is unused + * in production now. + */ @Override protected Writeable.Reader instanceReader() { - return GetAliasesResponse::new; + return in -> new GetAliasesResponse( + in.readImmutableOpenMap(StreamInput::readString, i -> i.readCollectionAsList(AliasMetadata::new)), + in.readMap(in1 -> in1.readCollectionAsList(DataStreamAlias::new)) + ); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilderTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilderTests.java index fa2546a98697b..9941f84da7b9a 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequestBuilderTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.core.Strings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; @@ -28,19 +29,21 @@ public class CreateIndexRequestBuilderTests extends ESTestCase { private static final String KEY = "my.settings.key"; private static final String VALUE = "my.settings.value"; + private TestThreadPool threadPool; private NoOpClient testClient; @Override @Before public void setUp() throws Exception { super.setUp(); - this.testClient = new NoOpClient(getTestName()); + this.threadPool = createThreadPool(); + this.testClient = new NoOpClient(threadPool); } @Override @After public void tearDown() throws Exception { - this.testClient.close(); + this.threadPool.close(); super.tearDown(); } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/Retry2Tests.java b/server/src/test/java/org/elasticsearch/action/bulk/Retry2Tests.java index a33fadc13f4e4..5075c98421af0 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/Retry2Tests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/Retry2Tests.java @@ -18,6 +18,8 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; import org.junit.After; import org.junit.Before; @@ -39,6 +41,7 @@ public class Retry2Tests extends ESTestCase { private static final int CALLS_TO_FAIL = 5; + private TestThreadPool threadPool; private MockBulkClient bulkClient; /** * Headers that are expected to be sent with all bulk requests. @@ -49,7 +52,8 @@ public class Retry2Tests extends ESTestCase { @Before public void setUp() throws Exception { super.setUp(); - this.bulkClient = new MockBulkClient(getTestName(), CALLS_TO_FAIL); + this.threadPool = createThreadPool(); + this.bulkClient = new MockBulkClient(threadPool, CALLS_TO_FAIL); // Stash some random headers so we can assert that we preserve them bulkClient.threadPool().getThreadContext().stashContext(); expectedHeaders.clear(); @@ -60,8 +64,8 @@ public void setUp() throws Exception { @Override @After public void tearDown() throws Exception { + this.threadPool.close(); super.tearDown(); - this.bulkClient.close(); } private BulkRequest createBulkRequest() { @@ -267,8 +271,8 @@ public void assertOnFailureNeverCalled() { private class MockBulkClient extends NoOpClient { private int numberOfCallsToFail; - private MockBulkClient(String testName, int numberOfCallsToFail) { - super(testName); + private MockBulkClient(ThreadPool threadPool, int numberOfCallsToFail) { + super(threadPool); this.numberOfCallsToFail = numberOfCallsToFail; } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/RetryTests.java b/server/src/test/java/org/elasticsearch/action/bulk/RetryTests.java index 65931846bd366..c780c436e78aa 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/RetryTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/RetryTests.java @@ -18,6 +18,8 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; import org.junit.After; import org.junit.Before; @@ -36,6 +38,7 @@ public class RetryTests extends ESTestCase { private static final TimeValue DELAY = TimeValue.timeValueMillis(1L); private static final int CALLS_TO_FAIL = 5; + private TestThreadPool threadPool; private MockBulkClient bulkClient; /** * Headers that are expected to be sent with all bulk requests. @@ -46,7 +49,8 @@ public class RetryTests extends ESTestCase { @Before public void setUp() throws Exception { super.setUp(); - this.bulkClient = new MockBulkClient(getTestName(), CALLS_TO_FAIL); + this.threadPool = createThreadPool(); + this.bulkClient = new MockBulkClient(threadPool, CALLS_TO_FAIL); // Stash some random headers so we can assert that we preserve them bulkClient.threadPool().getThreadContext().stashContext(); expectedHeaders.clear(); @@ -57,8 +61,8 @@ public void setUp() throws Exception { @Override @After public void tearDown() throws Exception { + this.threadPool.close(); super.tearDown(); - this.bulkClient.close(); } private BulkRequest createBulkRequest() { @@ -195,8 +199,8 @@ public void assertOnFailureNeverCalled() { private class MockBulkClient extends NoOpClient { private int numberOfCallsToFail; - private MockBulkClient(String testName, int numberOfCallsToFail) { - super(testName); + private MockBulkClient(ThreadPool threadPool, int numberOfCallsToFail) { + super(threadPool); this.numberOfCallsToFail = numberOfCallsToFail; } diff --git a/server/src/test/java/org/elasticsearch/action/index/IndexRequestBuilderTests.java b/server/src/test/java/org/elasticsearch/action/index/IndexRequestBuilderTests.java index b06f833806059..2f66b9d3b70f8 100644 --- a/server/src/test/java/org/elasticsearch/action/index/IndexRequestBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/action/index/IndexRequestBuilderTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; @@ -24,19 +25,21 @@ public class IndexRequestBuilderTests extends ESTestCase { private static final String EXPECTED_SOURCE = "{\"SomeKey\":\"SomeValue\"}"; + private TestThreadPool threadPool; private NoOpClient testClient; @Override @Before public void setUp() throws Exception { super.setUp(); - this.testClient = new NoOpClient(getTestName()); + this.threadPool = createThreadPool(); + this.testClient = new NoOpClient(threadPool); } @Override @After public void tearDown() throws Exception { - this.testClient.close(); + this.threadPool.close(); super.tearDown(); } diff --git a/server/src/test/java/org/elasticsearch/action/search/ParsedScrollIdTests.java b/server/src/test/java/org/elasticsearch/action/search/ParsedScrollIdTests.java index 6130435b4b181..a92cfdb1d02be 100644 --- a/server/src/test/java/org/elasticsearch/action/search/ParsedScrollIdTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/ParsedScrollIdTests.java @@ -26,7 +26,7 @@ public void testHasLocalIndices() { new ShardSearchContextId(randomAlphaOfLength(8), randomLong()) ); } - final ParsedScrollId parsedScrollId = new ParsedScrollId(randomAlphaOfLength(8), randomAlphaOfLength(8), searchContextIdForNodes); + final ParsedScrollId parsedScrollId = new ParsedScrollId(randomAlphaOfLength(8), searchContextIdForNodes); assertEquals(hasLocal, parsedScrollId.hasLocalIndices()); } diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchScrollAsyncActionTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchScrollAsyncActionTests.java index df33a5e18fce6..41e7a5c8ad1e1 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchScrollAsyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchScrollAsyncActionTests.java @@ -458,7 +458,7 @@ protected void onFirstPhaseResult(int shardId, SearchAsyncActionTests.TestSearch private static ParsedScrollId getParsedScrollId(SearchContextIdForNode... idsForNodes) { List searchContextIdForNodes = Arrays.asList(idsForNodes); Collections.shuffle(searchContextIdForNodes, random()); - return new ParsedScrollId("", "test", searchContextIdForNodes.toArray(new SearchContextIdForNode[0])); + return new ParsedScrollId("test", searchContextIdForNodes.toArray(new SearchContextIdForNode[0])); } private ActionListener dummyListener() { diff --git a/server/src/test/java/org/elasticsearch/action/synonyms/PutSynonymRuleActionTests.java b/server/src/test/java/org/elasticsearch/action/synonyms/PutSynonymRuleActionTests.java index b05dd1e0abbd6..7d73281e7c86b 100644 --- a/server/src/test/java/org/elasticsearch/action/synonyms/PutSynonymRuleActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/synonyms/PutSynonymRuleActionTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.action.synonyms; -import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.synonyms.RestPutSynonymRuleAction; import org.elasticsearch.test.ESTestCase; @@ -27,7 +26,8 @@ public void testEmptyRequestBody() throws Exception { .build(); FakeRestChannel channel = new FakeRestChannel(request, false, 0); - try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName())) { + try (var threadPool = createThreadPool()) { + final var nodeClient = new NoOpNodeClient(threadPool); expectThrows(IllegalArgumentException.class, () -> action.handleRequest(request, channel, nodeClient)); } } diff --git a/server/src/test/java/org/elasticsearch/action/synonyms/PutSynonymsActionTests.java b/server/src/test/java/org/elasticsearch/action/synonyms/PutSynonymsActionTests.java index 94674134e30ad..df596469c4e1b 100644 --- a/server/src/test/java/org/elasticsearch/action/synonyms/PutSynonymsActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/synonyms/PutSynonymsActionTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.action.synonyms; -import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.synonyms.RestPutSynonymsAction; import org.elasticsearch.test.ESTestCase; @@ -27,7 +26,8 @@ public void testEmptyRequestBody() throws Exception { .build(); FakeRestChannel channel = new FakeRestChannel(request, false, 0); - try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName())) { + try (var threadPool = createThreadPool()) { + final var nodeClient = new NoOpNodeClient(threadPool); expectThrows(IllegalArgumentException.class, () -> action.handleRequest(request, channel, nodeClient)); } } diff --git a/server/src/test/java/org/elasticsearch/client/internal/AbstractClientHeadersTestCase.java b/server/src/test/java/org/elasticsearch/client/internal/AbstractClientHeadersTestCase.java index deec1ec10c5a8..32e9b214ab530 100644 --- a/server/src/test/java/org/elasticsearch/client/internal/AbstractClientHeadersTestCase.java +++ b/server/src/test/java/org/elasticsearch/client/internal/AbstractClientHeadersTestCase.java @@ -83,7 +83,6 @@ public void setUp() throws Exception { @Override public void tearDown() throws Exception { super.tearDown(); - client.close(); terminate(threadPool); } diff --git a/server/src/test/java/org/elasticsearch/client/internal/OriginSettingClientTests.java b/server/src/test/java/org/elasticsearch/client/internal/OriginSettingClientTests.java index cf0bd108e327d..3a93f559284ca 100644 --- a/server/src/test/java/org/elasticsearch/client/internal/OriginSettingClientTests.java +++ b/server/src/test/java/org/elasticsearch/client/internal/OriginSettingClientTests.java @@ -24,23 +24,24 @@ public class OriginSettingClientTests extends ESTestCase { public void testSetsParentId() { String origin = randomAlphaOfLength(7); - /* - * This mock will do nothing but verify that origin is set in the - * thread context before executing the action. - */ - NoOpClient mock = new NoOpClient(getTestName()) { - @Override - protected void doExecute( - ActionType action, - Request request, - ActionListener listener - ) { - assertEquals(origin, threadPool().getThreadContext().getTransient(ThreadContext.ACTION_ORIGIN_TRANSIENT_NAME)); - super.doExecute(action, request, listener); - } - }; + try (var threadPool = createThreadPool()) { + /* + * This mock will do nothing but verify that origin is set in the + * thread context before executing the action. + */ + final var mock = new NoOpClient(threadPool) { + @Override + protected void doExecute( + ActionType action, + Request request, + ActionListener listener + ) { + assertEquals(origin, threadPool().getThreadContext().getTransient(ThreadContext.ACTION_ORIGIN_TRANSIENT_NAME)); + super.doExecute(action, request, listener); + } + }; - try (OriginSettingClient client = new OriginSettingClient(mock, origin)) { + final var client = new OriginSettingClient(mock, origin); // All of these should have the origin set client.bulk(new BulkRequest()); client.search(new SearchRequest()); diff --git a/server/src/test/java/org/elasticsearch/client/internal/ParentTaskAssigningClientTests.java b/server/src/test/java/org/elasticsearch/client/internal/ParentTaskAssigningClientTests.java index f140e624cc674..0f12076dd53b6 100644 --- a/server/src/test/java/org/elasticsearch/client/internal/ParentTaskAssigningClientTests.java +++ b/server/src/test/java/org/elasticsearch/client/internal/ParentTaskAssigningClientTests.java @@ -23,19 +23,21 @@ public class ParentTaskAssigningClientTests extends ESTestCase { public void testSetsParentId() { TaskId[] parentTaskId = new TaskId[] { new TaskId(randomAlphaOfLength(3), randomLong()) }; - // This mock will do nothing but verify that parentTaskId is set on all requests sent to it. - NoOpClient mock = new NoOpClient(getTestName()) { - @Override - protected void doExecute( - ActionType action, - Request request, - ActionListener listener - ) { - assertEquals(parentTaskId[0], request.getParentTask()); - super.doExecute(action, request, listener); - } - }; - try (ParentTaskAssigningClient client = new ParentTaskAssigningClient(mock, parentTaskId[0])) { + try (var threadPool = createThreadPool()) { + // This mock will do nothing but verify that parentTaskId is set on all requests sent to it. + final var mock = new NoOpClient(threadPool) { + @Override + protected void doExecute( + ActionType action, + Request request, + ActionListener listener + ) { + assertEquals(parentTaskId[0], request.getParentTask()); + super.doExecute(action, request, listener); + } + }; + + final var client = new ParentTaskAssigningClient(mock, parentTaskId[0]); assertEquals(parentTaskId[0], client.getParentTask()); // All of these should have the parentTaskId set diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityActionGuideTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityActionGuideTests.java index e059f96474db5..4777b0eb357da 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityActionGuideTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityActionGuideTests.java @@ -10,27 +10,27 @@ import org.elasticsearch.test.ESTestCase; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.ACTION_CHECK_ALLOCATION_EXPLAIN_API; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.ACTION_ENABLE_CLUSTER_ROUTING_ALLOCATION; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.ACTION_ENABLE_INDEX_ROUTING_ALLOCATION; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.ACTION_ENABLE_TIERS_LOOKUP; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_NODE_CAPACITY; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_SHARD_LIMIT_CLUSTER_SETTING; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_SHARD_LIMIT_CLUSTER_SETTING_LOOKUP; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_SHARD_LIMIT_INDEX_SETTING; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_SHARD_LIMIT_INDEX_SETTING_LOOKUP; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.ACTION_MIGRATE_TIERS_AWAY_FROM_INCLUDE_DATA; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.ACTION_MIGRATE_TIERS_AWAY_FROM_REQUIRE_DATA; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.ACTION_RESTORE_FROM_SNAPSHOT; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.DIAGNOSE_SHARDS_ACTION_GUIDE; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.ENABLE_CLUSTER_ALLOCATION_ACTION_GUIDE; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.ENABLE_INDEX_ALLOCATION_GUIDE; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.ENABLE_TIER_ACTION_GUIDE; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.INCREASE_CLUSTER_SHARD_LIMIT_ACTION_GUIDE; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.INCREASE_SHARD_LIMIT_ACTION_GUIDE; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.MIGRATE_TO_TIERS_ACTION_GUIDE; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.RESTORE_FROM_SNAPSHOT_ACTION_GUIDE; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.TIER_CAPACITY_ACTION_GUIDE; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_CHECK_ALLOCATION_EXPLAIN_API; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_ENABLE_CLUSTER_ROUTING_ALLOCATION; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_ENABLE_INDEX_ROUTING_ALLOCATION; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_ENABLE_TIERS_LOOKUP; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_NODE_CAPACITY; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_SHARD_LIMIT_CLUSTER_SETTING; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_SHARD_LIMIT_CLUSTER_SETTING_LOOKUP; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_SHARD_LIMIT_INDEX_SETTING; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_SHARD_LIMIT_INDEX_SETTING_LOOKUP; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_MIGRATE_TIERS_AWAY_FROM_INCLUDE_DATA; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_MIGRATE_TIERS_AWAY_FROM_REQUIRE_DATA; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_RESTORE_FROM_SNAPSHOT; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.DIAGNOSE_SHARDS_ACTION_GUIDE; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ENABLE_CLUSTER_ALLOCATION_ACTION_GUIDE; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ENABLE_INDEX_ALLOCATION_GUIDE; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ENABLE_TIER_ACTION_GUIDE; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.INCREASE_CLUSTER_SHARD_LIMIT_ACTION_GUIDE; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.INCREASE_SHARD_LIMIT_ACTION_GUIDE; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.MIGRATE_TO_TIERS_ACTION_GUIDE; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.RESTORE_FROM_SNAPSHOT_ACTION_GUIDE; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.TIER_CAPACITY_ACTION_GUIDE; import static org.hamcrest.Matchers.is; public class ShardsAvailabilityActionGuideTests extends ESTestCase { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java index c8e6a011bc52e..b67b4ef7e5a7f 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java @@ -53,6 +53,7 @@ import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.Maps; import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; @@ -1247,44 +1248,68 @@ public void testRebalanceDoesNotCauseHotSpots() { public void testShouldLogOnTooManyUndesiredAllocations() { - var indexMetadata = IndexMetadata.builder("index-1").settings(indexSettings(IndexVersion.current(), 1, 0)).build(); - final var index = indexMetadata.getIndex(); - final var shardId = new ShardId(index, 0); + final int shardCount = 5; + + final var dataNode1Assignments = Maps.newMapWithExpectedSize(shardCount); + final var dataNode2Assignments = Maps.newMapWithExpectedSize(shardCount); + + final var metadataBuilder = Metadata.builder(); + final var routingTableBuilder = RoutingTable.builder(); + for (int i = 0; i < shardCount; i++) { + final var indexMetadata = IndexMetadata.builder("index-" + i).settings(indexSettings(IndexVersion.current(), 1, 0)).build(); + final var index = indexMetadata.getIndex(); + final var shardId = new ShardId(index, 0); + metadataBuilder.put(indexMetadata, false); + routingTableBuilder.add(IndexRoutingTable.builder(index).addShard(newShardRouting(shardId, "data-node-1", true, STARTED))); + + dataNode1Assignments.put(shardId, new ShardAssignment(Set.of("data-node-1"), 1, 0, 0)); + dataNode2Assignments.put(shardId, new ShardAssignment(Set.of("data-node-2"), 1, 0, 0)); + } final var clusterState = ClusterState.builder(ClusterName.DEFAULT) .nodes(DiscoveryNodes.builder().add(newNode("data-node-1")).add(newNode("data-node-2"))) - .metadata(Metadata.builder().put(indexMetadata, true)) - .routingTable( - RoutingTable.builder() - .add(IndexRoutingTable.builder(index).addShard(newShardRouting(shardId, "data-node-2", true, STARTED))) - ) + .metadata(metadataBuilder) + .routingTable(routingTableBuilder) .build(); - final var balance = new DesiredBalance(1, Map.of(shardId, new ShardAssignment(Set.of("data-node-1"), 1, 0, 0))); - var threadPool = mock(ThreadPool.class); - when(threadPool.relativeTimeInMillis()).thenReturn(1L).thenReturn(2L); + when(threadPool.relativeTimeInMillis()).thenReturn(1L).thenReturn(2L).thenReturn(3L); var reconciler = new DesiredBalanceReconciler(createBuiltInClusterSettings(), threadPool); + var expectedWarningMessage = "[100%] of assigned shards (" + + shardCount + + "/" + + shardCount + + ") are not on their desired nodes, which exceeds the warn threshold of [10%]"; + assertThatLogger( + () -> reconciler.reconcile(new DesiredBalance(1, dataNode1Assignments), createRoutingAllocationFrom(clusterState)), + DesiredBalanceReconciler.class, + new MockLogAppender.UnseenEventExpectation( + "Should not log if all shards on desired location", + DesiredBalanceReconciler.class.getCanonicalName(), + Level.WARN, + expectedWarningMessage + ) + ); assertThatLogger( - () -> reconciler.reconcile(balance, createRoutingAllocationFrom(clusterState)), + () -> reconciler.reconcile(new DesiredBalance(1, dataNode2Assignments), createRoutingAllocationFrom(clusterState)), DesiredBalanceReconciler.class, new MockLogAppender.SeenEventExpectation( "Should log first too many shards on undesired locations", DesiredBalanceReconciler.class.getCanonicalName(), Level.WARN, - "[100%] of assigned shards (1/1) are not on their desired nodes, which exceeds the warn threshold of [10%]" + expectedWarningMessage ) ); assertThatLogger( - () -> reconciler.reconcile(balance, createRoutingAllocationFrom(clusterState)), + () -> reconciler.reconcile(new DesiredBalance(1, dataNode2Assignments), createRoutingAllocationFrom(clusterState)), DesiredBalanceReconciler.class, new MockLogAppender.UnseenEventExpectation( "Should not log immediate second too many shards on undesired locations", DesiredBalanceReconciler.class.getCanonicalName(), Level.WARN, - "[100.0%] of assigned shards (1/1) are not on their desired nodes, which exceeds the warn threshold of [10.0%]" + expectedWarningMessage ) ); } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorServiceTests.java similarity index 96% rename from server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorServiceTests.java rename to server/src/test/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorServiceTests.java index b333e1cdf6fa9..9ab44ec3fb047 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorServiceTests.java @@ -6,7 +6,7 @@ * Side Public License, v 1. */ -package org.elasticsearch.cluster.routing.allocation; +package org.elasticsearch.cluster.routing.allocation.shards; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -24,13 +24,19 @@ import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; -import org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.ShardAllocationStatus; +import org.elasticsearch.cluster.routing.allocation.AllocateUnassignedDecision; +import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.cluster.routing.allocation.DataTier; +import org.elasticsearch.cluster.routing.allocation.MoveDecision; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationResult; +import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision; import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; +import org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ShardAllocationStatus; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -54,6 +60,7 @@ import org.elasticsearch.snapshots.SearchableSnapshotsSettings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; +import org.mockito.Mockito; import org.mockito.stubbing.Answer; import java.util.ArrayList; @@ -74,30 +81,30 @@ import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_PREFIX; import static org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata.Type.RESTART; import static org.elasticsearch.cluster.routing.ShardRouting.newUnassigned; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.ACTION_CHECK_ALLOCATION_EXPLAIN_API; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.ACTION_ENABLE_CLUSTER_ROUTING_ALLOCATION; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.ACTION_ENABLE_INDEX_ROUTING_ALLOCATION; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.ACTION_ENABLE_TIERS_LOOKUP; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_NODE_CAPACITY; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_SHARD_LIMIT_CLUSTER_SETTING; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_SHARD_LIMIT_CLUSTER_SETTING_LOOKUP; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_SHARD_LIMIT_INDEX_SETTING; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_SHARD_LIMIT_INDEX_SETTING_LOOKUP; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_TIER_CAPACITY_LOOKUP; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.ACTION_MIGRATE_TIERS_AWAY_FROM_INCLUDE_DATA; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.ACTION_MIGRATE_TIERS_AWAY_FROM_INCLUDE_DATA_LOOKUP; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.ACTION_MIGRATE_TIERS_AWAY_FROM_REQUIRE_DATA; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.ACTION_MIGRATE_TIERS_AWAY_FROM_REQUIRE_DATA_LOOKUP; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.ACTION_RESTORE_FROM_SNAPSHOT; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.DIAGNOSIS_WAIT_FOR_INITIALIZATION; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.DIAGNOSIS_WAIT_FOR_OR_FIX_DELAYED_SHARDS; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService.NAME; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorServiceTests.ShardState.AVAILABLE; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorServiceTests.ShardState.CREATING; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorServiceTests.ShardState.INITIALIZING; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorServiceTests.ShardState.RESTARTING; -import static org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorServiceTests.ShardState.UNAVAILABLE; import static org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider.CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_CHECK_ALLOCATION_EXPLAIN_API; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_ENABLE_CLUSTER_ROUTING_ALLOCATION; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_ENABLE_INDEX_ROUTING_ALLOCATION; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_ENABLE_TIERS_LOOKUP; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_NODE_CAPACITY; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_SHARD_LIMIT_CLUSTER_SETTING; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_SHARD_LIMIT_CLUSTER_SETTING_LOOKUP; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_SHARD_LIMIT_INDEX_SETTING; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_SHARD_LIMIT_INDEX_SETTING_LOOKUP; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_INCREASE_TIER_CAPACITY_LOOKUP; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_MIGRATE_TIERS_AWAY_FROM_INCLUDE_DATA; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_MIGRATE_TIERS_AWAY_FROM_INCLUDE_DATA_LOOKUP; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_MIGRATE_TIERS_AWAY_FROM_REQUIRE_DATA; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_MIGRATE_TIERS_AWAY_FROM_REQUIRE_DATA_LOOKUP; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.ACTION_RESTORE_FROM_SNAPSHOT; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.DIAGNOSIS_WAIT_FOR_INITIALIZATION; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.DIAGNOSIS_WAIT_FOR_OR_FIX_DELAYED_SHARDS; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService.NAME; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorServiceTests.ShardState.AVAILABLE; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorServiceTests.ShardState.CREATING; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorServiceTests.ShardState.INITIALIZING; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorServiceTests.ShardState.RESTARTING; +import static org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorServiceTests.ShardState.UNAVAILABLE; import static org.elasticsearch.common.util.CollectionUtils.concatLists; import static org.elasticsearch.core.TimeValue.timeValueSeconds; import static org.elasticsearch.health.Diagnosis.Resource.Type.FEATURE_STATE; @@ -2233,7 +2240,7 @@ private static ShardsAvailabilityHealthIndicatorService createAllocationHealthIn when(clusterService.state()).thenReturn(clusterState); var clusterSettings = new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); when(clusterService.getClusterSettings()).thenReturn(clusterSettings); - var allocationService = mock(AllocationService.class); + var allocationService = Mockito.mock(AllocationService.class); when(allocationService.explainShardAllocation(any(), any())).thenAnswer((Answer) invocation -> { ShardRouting shardRouting = invocation.getArgument(0); var key = new ShardRoutingKey(shardRouting.getIndexName(), shardRouting.getId(), shardRouting.primary()); diff --git a/server/src/test/java/org/elasticsearch/health/HealthPeriodicLoggerTests.java b/server/src/test/java/org/elasticsearch/health/HealthPeriodicLoggerTests.java index 846ed3e3021ab..7e77b3a4a1d73 100644 --- a/server/src/test/java/org/elasticsearch/health/HealthPeriodicLoggerTests.java +++ b/server/src/test/java/org/elasticsearch/health/HealthPeriodicLoggerTests.java @@ -92,7 +92,6 @@ public void cleanup() { testHealthPeriodicLogger.close(); } threadPool.shutdownNow(); - client.close(); } public void testConvertToLoggedFields() { diff --git a/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java b/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java index f099fa657b89c..b32e9f4db8b77 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/ShardGetServiceTests.java @@ -30,7 +30,6 @@ import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; public class ShardGetServiceTests extends IndexShardTestCase { @@ -241,7 +240,8 @@ public void testGetFromTranslog() throws IOException { .getFromTranslog("2", new String[] { "foo" }, true, 1, VersionType.INTERNAL, FetchSourceContext.FETCH_SOURCE, false); assertNull(getResult); var lastUnsafeGeneration = engine.getLastUnsafeSegmentGenerationForGets(); - assertThat(lastUnsafeGeneration, greaterThan(0L)); + // last unsafe generation is set to last committed gen after the refresh triggered by realtime get + assertThat(lastUnsafeGeneration, equalTo(engine.getLastCommittedSegmentInfos().getGeneration())); assertTrue(LiveVersionMapTestUtils.isSafeAccessRequired(map)); assertFalse(LiveVersionMapTestUtils.isUnsafe(map)); @@ -250,7 +250,7 @@ public void testGetFromTranslog() throws IOException { engine.flush(true, true, flushFuture); var flushResult = flushFuture.actionGet(); assertTrue(flushResult.flushPerformed()); - assertThat(flushResult.generation(), equalTo(lastUnsafeGeneration)); + assertThat(flushResult.generation(), equalTo(lastUnsafeGeneration + 1)); assertThat(engine.getLastUnsafeSegmentGenerationForGets(), equalTo(lastUnsafeGeneration)); // No longer in translog getResult = primary.getService() diff --git a/server/src/test/java/org/elasticsearch/plugins/PluginDescriptorTests.java b/server/src/test/java/org/elasticsearch/plugins/PluginDescriptorTests.java index 5ed02bd4b35c9..000dc1a33ed91 100644 --- a/server/src/test/java/org/elasticsearch/plugins/PluginDescriptorTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/PluginDescriptorTests.java @@ -335,7 +335,7 @@ public void testPluginEqualityAndHash() { descriptor1.getName(), randomValueOtherThan(descriptor1.getDescription(), () -> randomAlphaOfLengthBetween(4, 12)), randomValueOtherThan(descriptor1.getVersion(), () -> randomAlphaOfLengthBetween(4, 12)), - descriptor1.getElasticsearchVersion().previousMajor().toString(), + "8.0.0", randomValueOtherThan(descriptor1.getJavaVersion(), () -> randomAlphaOfLengthBetween(4, 12)), descriptor1.isStable() ? randomAlphaOfLengthBetween(4, 12) : null, descriptor1.isStable() ? randomAlphaOfLength(6) : null, @@ -352,7 +352,7 @@ public void testPluginEqualityAndHash() { randomValueOtherThan(descriptor1.getName(), () -> randomAlphaOfLengthBetween(4, 12)), descriptor1.getDescription(), descriptor1.getVersion(), - descriptor1.getElasticsearchVersion().toString(), + descriptor1.getElasticsearchVersion(), descriptor1.getJavaVersion(), classname, descriptor1.getModuleName().orElse(null), diff --git a/server/src/test/java/org/elasticsearch/plugins/PluginsUtilsTests.java b/server/src/test/java/org/elasticsearch/plugins/PluginsUtilsTests.java index 3556c94980773..a7cc74582afdc 100644 --- a/server/src/test/java/org/elasticsearch/plugins/PluginsUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/PluginsUtilsTests.java @@ -382,6 +382,18 @@ public void testJarHellSpiConflict() throws Exception { assertThat(e.getCause().getMessage(), containsString("DummyClass1")); } + public void testInternalNonSemanticVersions() throws Exception { + PluginDescriptor info = getPluginDescriptorForVersion(randomAlphaOfLengthBetween(6, 32), "1.8", false); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginsUtils.verifyCompatibility(info)); + assertThat(e.getMessage(), containsString("Plugin [my_plugin] was built for Elasticsearch version")); + } + + public void testStableNonSemanticVersions() throws Exception { + PluginDescriptor info = getPluginDescriptorForVersion(randomAlphaOfLengthBetween(6, 32), "1.8", true); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginsUtils.verifyCompatibility(info)); + assertThat(e.getMessage(), containsString("Expected semantic version for plugin [my_plugin] but was")); + } + public void testStableEarlierElasticsearchVersion() throws Exception { PluginDescriptor info = getPluginDescriptorForVersion(Version.fromId(Version.CURRENT.id + 1).toString(), "1.8", true); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> PluginsUtils.verifyCompatibility(info)); diff --git a/server/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java b/server/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java index 06a95c2628389..0211397fdeee8 100644 --- a/server/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java @@ -47,7 +47,6 @@ public void setUp() throws Exception { public void tearDown() throws Exception { super.tearDown(); threadPool.shutdown(); - mockClient.close(); } public void testOneUnconsumedParameters() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java b/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java index 068e5933bee1e..06328734e394d 100644 --- a/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java +++ b/server/src/test/java/org/elasticsearch/rest/RestControllerTests.java @@ -39,6 +39,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpNodeClient; import org.elasticsearch.test.rest.FakeRestRequest; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.transport.BytesRefRecycler; import org.elasticsearch.usage.UsageService; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -87,6 +88,7 @@ public class RestControllerTests extends ESTestCase { private RestController restController; private HierarchyCircuitBreakerService circuitBreakerService; private UsageService usageService; + private TestThreadPool threadPool; private NodeClient client; private Tracer tracer; private List methodList; @@ -107,7 +109,8 @@ public void setup() { inFlightRequestsBreaker = circuitBreakerService.getBreaker(CircuitBreaker.IN_FLIGHT_REQUESTS); HttpServerTransport httpServerTransport = new TestHttpServerTransport(); - client = new NoOpNodeClient(this.getTestName()); + threadPool = createThreadPool(); + client = new NoOpNodeClient(threadPool); tracer = mock(Tracer.class); restController = new RestController(null, client, circuitBreakerService, usageService, tracer); restController.registerHandler( @@ -126,7 +129,7 @@ public void setup() { @After public void teardown() throws IOException { - IOUtils.close(client); + IOUtils.close(threadPool); } public void testApplyProductSpecificResponseHeaders() { diff --git a/server/src/test/java/org/elasticsearch/rest/action/RestCancellableNodeClientTests.java b/server/src/test/java/org/elasticsearch/rest/action/RestCancellableNodeClientTests.java index a8fe1c53d129a..a21eab1d95911 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/RestCancellableNodeClientTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/RestCancellableNodeClientTests.java @@ -64,30 +64,29 @@ public void stopThreadPool() { * associated with its corresponding channel. Either way, we need to make sure that no tasks are left in the map. */ public void testCompletedTasks() throws Exception { - try (TestClient testClient = new TestClient(Settings.EMPTY, threadPool, false)) { - int initialHttpChannels = RestCancellableNodeClient.getNumChannels(); - int totalSearches = 0; - List> futures = new ArrayList<>(); - int numChannels = randomIntBetween(1, 30); - for (int i = 0; i < numChannels; i++) { - int numTasks = randomIntBetween(1, 30); - TestHttpChannel channel = new TestHttpChannel(); - totalSearches += numTasks; - for (int j = 0; j < numTasks; j++) { - PlainActionFuture actionFuture = new PlainActionFuture<>(); - RestCancellableNodeClient client = new RestCancellableNodeClient(testClient, channel); - threadPool.generic().submit(() -> client.execute(SearchAction.INSTANCE, new SearchRequest(), actionFuture)); - futures.add(actionFuture); - } - } - for (Future future : futures) { - future.get(); + final var testClient = new TestClient(Settings.EMPTY, threadPool, false); + int initialHttpChannels = RestCancellableNodeClient.getNumChannels(); + int totalSearches = 0; + List> futures = new ArrayList<>(); + int numChannels = randomIntBetween(1, 30); + for (int i = 0; i < numChannels; i++) { + int numTasks = randomIntBetween(1, 30); + TestHttpChannel channel = new TestHttpChannel(); + totalSearches += numTasks; + for (int j = 0; j < numTasks; j++) { + PlainActionFuture actionFuture = new PlainActionFuture<>(); + RestCancellableNodeClient client = new RestCancellableNodeClient(testClient, channel); + threadPool.generic().submit(() -> client.execute(SearchAction.INSTANCE, new SearchRequest(), actionFuture)); + futures.add(actionFuture); } - // no channels get closed in this test, hence we expect as many channels as we created in the map - assertEquals(initialHttpChannels + numChannels, RestCancellableNodeClient.getNumChannels()); - assertEquals(0, RestCancellableNodeClient.getNumTasks()); - assertEquals(totalSearches, testClient.searchRequests.get()); } + for (Future future : futures) { + future.get(); + } + // no channels get closed in this test, hence we expect as many channels as we created in the map + assertEquals(initialHttpChannels + numChannels, RestCancellableNodeClient.getNumChannels()); + assertEquals(0, RestCancellableNodeClient.getNumTasks()); + assertEquals(totalSearches, testClient.searchRequests.get()); } /** @@ -95,30 +94,29 @@ public void testCompletedTasks() throws Exception { * removed and all of its corresponding tasks get cancelled. */ public void testCancelledTasks() throws Exception { - try (TestClient nodeClient = new TestClient(Settings.EMPTY, threadPool, true)) { - int initialHttpChannels = RestCancellableNodeClient.getNumChannels(); - int numChannels = randomIntBetween(1, 30); - int totalSearches = 0; - List channels = new ArrayList<>(numChannels); - for (int i = 0; i < numChannels; i++) { - TestHttpChannel channel = new TestHttpChannel(); - channels.add(channel); - int numTasks = randomIntBetween(1, 30); - totalSearches += numTasks; - RestCancellableNodeClient client = new RestCancellableNodeClient(nodeClient, channel); - for (int j = 0; j < numTasks; j++) { - client.execute(SearchAction.INSTANCE, new SearchRequest(), null); - } - assertEquals(numTasks, RestCancellableNodeClient.getNumTasks(channel)); + final var nodeClient = new TestClient(Settings.EMPTY, threadPool, true); + int initialHttpChannels = RestCancellableNodeClient.getNumChannels(); + int numChannels = randomIntBetween(1, 30); + int totalSearches = 0; + List channels = new ArrayList<>(numChannels); + for (int i = 0; i < numChannels; i++) { + TestHttpChannel channel = new TestHttpChannel(); + channels.add(channel); + int numTasks = randomIntBetween(1, 30); + totalSearches += numTasks; + RestCancellableNodeClient client = new RestCancellableNodeClient(nodeClient, channel); + for (int j = 0; j < numTasks; j++) { + client.execute(SearchAction.INSTANCE, new SearchRequest(), null); } - assertEquals(initialHttpChannels + numChannels, RestCancellableNodeClient.getNumChannels()); - for (TestHttpChannel channel : channels) { - channel.awaitClose(); - } - assertEquals(initialHttpChannels, RestCancellableNodeClient.getNumChannels()); - assertEquals(totalSearches, nodeClient.searchRequests.get()); - assertEquals(totalSearches, nodeClient.cancelledTasks.size()); + assertEquals(numTasks, RestCancellableNodeClient.getNumTasks(channel)); + } + assertEquals(initialHttpChannels + numChannels, RestCancellableNodeClient.getNumChannels()); + for (TestHttpChannel channel : channels) { + channel.awaitClose(); } + assertEquals(initialHttpChannels, RestCancellableNodeClient.getNumChannels()); + assertEquals(totalSearches, nodeClient.searchRequests.get()); + assertEquals(totalSearches, nodeClient.cancelledTasks.size()); } /** @@ -128,26 +126,25 @@ public void testCancelledTasks() throws Exception { * the newly added listener will be invoked at registration time. */ public void testChannelAlreadyClosed() { - try (TestClient testClient = new TestClient(Settings.EMPTY, threadPool, true)) { - int initialHttpChannels = RestCancellableNodeClient.getNumChannels(); - int numChannels = randomIntBetween(1, 30); - int totalSearches = 0; - for (int i = 0; i < numChannels; i++) { - TestHttpChannel channel = new TestHttpChannel(); - // no need to wait here, there will be no close listener registered, nothing to wait for. - channel.close(); - int numTasks = randomIntBetween(1, 5); - totalSearches += numTasks; - RestCancellableNodeClient client = new RestCancellableNodeClient(testClient, channel); - for (int j = 0; j < numTasks; j++) { - // here the channel will be first registered, then straight-away removed from the map as the close listener is invoked - client.execute(SearchAction.INSTANCE, new SearchRequest(), null); - } + final var testClient = new TestClient(Settings.EMPTY, threadPool, true); + int initialHttpChannels = RestCancellableNodeClient.getNumChannels(); + int numChannels = randomIntBetween(1, 30); + int totalSearches = 0; + for (int i = 0; i < numChannels; i++) { + TestHttpChannel channel = new TestHttpChannel(); + // no need to wait here, there will be no close listener registered, nothing to wait for. + channel.close(); + int numTasks = randomIntBetween(1, 5); + totalSearches += numTasks; + RestCancellableNodeClient client = new RestCancellableNodeClient(testClient, channel); + for (int j = 0; j < numTasks; j++) { + // here the channel will be first registered, then straight-away removed from the map as the close listener is invoked + client.execute(SearchAction.INSTANCE, new SearchRequest(), null); } - assertEquals(initialHttpChannels, RestCancellableNodeClient.getNumChannels()); - assertEquals(totalSearches, testClient.searchRequests.get()); - assertEquals(totalSearches, testClient.cancelledTasks.size()); } + assertEquals(initialHttpChannels, RestCancellableNodeClient.getNumChannels()); + assertEquals(totalSearches, testClient.searchRequests.get()); + assertEquals(totalSearches, testClient.cancelledTasks.size()); } private static class TestClient extends NodeClient { diff --git a/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeActionTests.java index 1151965a46ce6..f42c450221383 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestAnalyzeActionTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.rest.action.admin.indices; import org.elasticsearch.action.admin.indices.analyze.AnalyzeAction; -import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.index.analysis.NameOrDefinition; import org.elasticsearch.rest.RestRequest; @@ -95,7 +94,8 @@ public void testParseXContentForAnalyzeRequestWithInvalidJsonThrowsException() { new BytesArray("{invalid_json}"), XContentType.JSON ).build(); - try (NodeClient client = new NoOpNodeClient(this.getClass().getSimpleName())) { + try (var threadPool = createThreadPool()) { + final var client = new NoOpNodeClient(threadPool); var e = expectThrows(XContentParseException.class, () -> action.handleRequest(request, null, client)); assertThat(e.getMessage(), containsString("expecting double-quote")); } diff --git a/server/src/test/java/org/elasticsearch/rest/action/cat/RestCatComponentTemplateActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/cat/RestCatComponentTemplateActionTests.java index 6a422a3fd97aa..33b20cfeee959 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/cat/RestCatComponentTemplateActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/cat/RestCatComponentTemplateActionTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.test.rest.FakeRestChannel; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.test.rest.RestActionTestCase; +import org.elasticsearch.threadpool.ThreadPool; import java.util.HashMap; import java.util.Map; @@ -78,7 +79,8 @@ public void testRestCatComponentAction() throws Exception { FakeRestChannel channel = new FakeRestChannel(getCatComponentTemplateRequest, true, 0); // execute action - try (NoOpNodeClient nodeClient = buildNodeClient()) { + try (var threadPool = createThreadPool()) { + final var nodeClient = buildNodeClient(threadPool); action.handleRequest(getCatComponentTemplateRequest, channel, nodeClient); } @@ -96,7 +98,8 @@ public void testRestCatComponentActionWithParam() throws Exception { FakeRestChannel channel = new FakeRestChannel(getCatComponentTemplateRequest, true, 0); // execute action - try (NoOpNodeClient nodeClient = buildNodeClient()) { + try (var threadPool = createThreadPool()) { + final var nodeClient = buildNodeClient(threadPool); action.handleRequest(getCatComponentTemplateRequest, channel, nodeClient); } @@ -106,10 +109,10 @@ public void testRestCatComponentActionWithParam() throws Exception { assertThat(channel.capturedResponse().content().utf8ToString(), emptyString()); } - private NoOpNodeClient buildNodeClient() { + private NoOpNodeClient buildNodeClient(ThreadPool threadPool) { ClusterStateResponse clusterStateResponse = new ClusterStateResponse(clusterName, clusterState, false); - return new NoOpNodeClient(getTestName()) { + return new NoOpNodeClient(threadPool) { @Override @SuppressWarnings("unchecked") public void doExecute( diff --git a/server/src/test/java/org/elasticsearch/rest/action/cat/RestTasksActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/cat/RestTasksActionTests.java index 38d39fda898aa..803f9c2fb1b01 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/cat/RestTasksActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/cat/RestTasksActionTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.test.client.NoOpNodeClient; import org.elasticsearch.test.rest.FakeRestChannel; import org.elasticsearch.test.rest.FakeRestRequest; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; import java.util.List; @@ -33,7 +34,8 @@ public void testConsumesParameters() throws Exception { Map.of("parent_task_id", "the node:3", "nodes", "node1,node2", "actions", "*") ).build(); FakeRestChannel fakeRestChannel = new FakeRestChannel(fakeRestRequest, false, 1); - try (NoOpNodeClient nodeClient = buildNodeClient()) { + try (var threadPool = createThreadPool()) { + final var nodeClient = buildNodeClient(threadPool); action.handleRequest(fakeRestRequest, fakeRestChannel, nodeClient); } @@ -41,8 +43,8 @@ public void testConsumesParameters() throws Exception { assertThat(fakeRestChannel.responses().get(), is(1)); } - private NoOpNodeClient buildNodeClient() { - return new NoOpNodeClient(getTestName()) { + private NoOpNodeClient buildNodeClient(ThreadPool threadPool) { + return new NoOpNodeClient(threadPool) { @Override @SuppressWarnings("unchecked") public void doExecute( diff --git a/server/src/test/java/org/elasticsearch/rest/action/document/RestBulkActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/document/RestBulkActionTests.java index d7e3aaf326075..caeb0f36a1000 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/document/RestBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/document/RestBulkActionTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.update.UpdateRequest; -import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.rest.RestChannel; @@ -39,15 +38,16 @@ public class RestBulkActionTests extends ESTestCase { public void testBulkPipelineUpsert() throws Exception { SetOnce bulkCalled = new SetOnce<>(); - try (NodeClient verifyingClient = new NoOpNodeClient(this.getTestName()) { - @Override - public void bulk(BulkRequest request, ActionListener listener) { - bulkCalled.set(true); - assertThat(request.requests(), hasSize(2)); - UpdateRequest updateRequest = (UpdateRequest) request.requests().get(1); - assertThat(updateRequest.upsertRequest().getPipeline(), equalTo("timestamps")); - } - }) { + try (var threadPool = createThreadPool()) { + final var verifyingClient = new NoOpNodeClient(threadPool) { + @Override + public void bulk(BulkRequest request, ActionListener listener) { + bulkCalled.set(true); + assertThat(request.requests(), hasSize(2)); + UpdateRequest updateRequest = (UpdateRequest) request.requests().get(1); + assertThat(updateRequest.upsertRequest().getPipeline(), equalTo("timestamps")); + } + }; final Map params = new HashMap<>(); params.put("pipeline", "timestamps"); new RestBulkAction(settings(IndexVersion.current()).build()).handleRequest( @@ -68,17 +68,18 @@ public void testListExecutedPipelines() throws Exception { AtomicBoolean bulkCalled = new AtomicBoolean(false); AtomicBoolean listExecutedPipelinesRequest1 = new AtomicBoolean(false); AtomicBoolean listExecutedPipelinesRequest2 = new AtomicBoolean(false); - try (NodeClient verifyingClient = new NoOpNodeClient(this.getTestName()) { - @Override - public void bulk(BulkRequest request, ActionListener listener) { - bulkCalled.set(true); - assertThat(request.requests(), hasSize(2)); - IndexRequest indexRequest1 = (IndexRequest) request.requests().get(0); - listExecutedPipelinesRequest1.set(indexRequest1.getListExecutedPipelines()); - IndexRequest indexRequest2 = (IndexRequest) request.requests().get(1); - listExecutedPipelinesRequest2.set(indexRequest2.getListExecutedPipelines()); - } - }) { + try (var threadPool = createThreadPool()) { + final var verifyingClient = new NoOpNodeClient(threadPool) { + @Override + public void bulk(BulkRequest request, ActionListener listener) { + bulkCalled.set(true); + assertThat(request.requests(), hasSize(2)); + IndexRequest indexRequest1 = (IndexRequest) request.requests().get(0); + listExecutedPipelinesRequest1.set(indexRequest1.getListExecutedPipelines()); + IndexRequest indexRequest2 = (IndexRequest) request.requests().get(1); + listExecutedPipelinesRequest2.set(indexRequest2.getListExecutedPipelines()); + } + }; Map params = new HashMap<>(); { new RestBulkAction(settings(IndexVersion.current()).build()).handleRequest( diff --git a/server/src/test/java/org/elasticsearch/search/scroll/RestClearScrollActionTests.java b/server/src/test/java/org/elasticsearch/search/scroll/RestClearScrollActionTests.java index fbc4acc959579..a5fabe32de645 100644 --- a/server/src/test/java/org/elasticsearch/search/scroll/RestClearScrollActionTests.java +++ b/server/src/test/java/org/elasticsearch/search/scroll/RestClearScrollActionTests.java @@ -12,7 +12,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.action.search.ClearScrollResponse; -import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.search.RestClearScrollAction; @@ -41,14 +40,15 @@ public void testParseClearScrollRequestWithInvalidJsonThrowsException() throws E public void testBodyParamsOverrideQueryStringParams() throws Exception { SetOnce scrollCalled = new SetOnce<>(); - try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName()) { - @Override - public void clearScroll(ClearScrollRequest request, ActionListener listener) { - scrollCalled.set(true); - assertThat(request.getScrollIds(), hasSize(1)); - assertThat(request.getScrollIds().get(0), equalTo("BODY")); - } - }) { + try (var threadPool = createThreadPool()) { + final var nodeClient = new NoOpNodeClient(threadPool) { + @Override + public void clearScroll(ClearScrollRequest request, ActionListener listener) { + scrollCalled.set(true); + assertThat(request.getScrollIds(), hasSize(1)); + assertThat(request.getScrollIds().get(0), equalTo("BODY")); + } + }; RestClearScrollAction action = new RestClearScrollAction(); RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams( Collections.singletonMap("scroll_id", "QUERY_STRING") diff --git a/server/src/test/java/org/elasticsearch/search/scroll/RestSearchScrollActionTests.java b/server/src/test/java/org/elasticsearch/search/scroll/RestSearchScrollActionTests.java index 2e4a92fa54344..9c521d15d7f74 100644 --- a/server/src/test/java/org/elasticsearch/search/scroll/RestSearchScrollActionTests.java +++ b/server/src/test/java/org/elasticsearch/search/scroll/RestSearchScrollActionTests.java @@ -12,7 +12,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchScrollRequest; -import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.search.RestSearchScrollAction; @@ -41,14 +40,15 @@ public void testParseSearchScrollRequestWithInvalidJsonThrowsException() throws public void testBodyParamsOverrideQueryStringParams() throws Exception { SetOnce scrollCalled = new SetOnce<>(); - try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName()) { - @Override - public void searchScroll(SearchScrollRequest request, ActionListener listener) { - scrollCalled.set(true); - assertThat(request.scrollId(), equalTo("BODY")); - assertThat(request.scroll().keepAlive().getStringRep(), equalTo("1m")); - } - }) { + try (var threadPool = createThreadPool()) { + final var nodeClient = new NoOpNodeClient(threadPool) { + @Override + public void searchScroll(SearchScrollRequest request, ActionListener listener) { + scrollCalled.set(true); + assertThat(request.scrollId(), equalTo("BODY")); + assertThat(request.scroll().keepAlive().getStringRep(), equalTo("1m")); + } + }; RestSearchScrollAction action = new RestSearchScrollAction(); Map params = new HashMap<>(); params.put("scroll_id", "QUERY_STRING"); diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterAwareClientTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterAwareClientTests.java index b4e72af7a184e..e606da040bab4 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterAwareClientTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterAwareClientTests.java @@ -85,39 +85,36 @@ public void testSearchShards() throws Exception { service.start(); service.acceptIncomingRequests(); - try ( - RemoteClusterAwareClient client = new RemoteClusterAwareClient( - Settings.EMPTY, - threadPool, - service, - "cluster1", - threadPool.executor(TEST_THREAD_POOL_NAME), - randomBoolean() - ) - ) { - SearchShardsRequest searchShardsRequest = new SearchShardsRequest( - new String[] { "test-index" }, - IndicesOptions.strictExpandOpen(), - new MatchAllQueryBuilder(), - null, - null, - randomBoolean(), - null - ); - final SearchShardsResponse searchShardsResponse = PlainActionFuture.get( - future -> client.execute( - SearchShardsAction.INSTANCE, - searchShardsRequest, - ActionListener.runBefore( - future, - () -> assertTrue(Thread.currentThread().getName().contains('[' + TEST_THREAD_POOL_NAME + ']')) - ) - ), - 10, - TimeUnit.SECONDS - ); - assertThat(searchShardsResponse.getNodes(), equalTo(knownNodes)); - } + final var client = new RemoteClusterAwareClient( + Settings.EMPTY, + threadPool, + service, + "cluster1", + threadPool.executor(TEST_THREAD_POOL_NAME), + randomBoolean() + ); + SearchShardsRequest searchShardsRequest = new SearchShardsRequest( + new String[] { "test-index" }, + IndicesOptions.strictExpandOpen(), + new MatchAllQueryBuilder(), + null, + null, + randomBoolean(), + null + ); + final SearchShardsResponse searchShardsResponse = PlainActionFuture.get( + future -> client.execute( + SearchShardsAction.INSTANCE, + searchShardsRequest, + ActionListener.runBefore( + future, + () -> assertTrue(Thread.currentThread().getName().contains('[' + TEST_THREAD_POOL_NAME + ']')) + ) + ), + 10, + TimeUnit.SECONDS + ); + assertThat(searchShardsResponse.getNodes(), equalTo(knownNodes)); } } } @@ -145,46 +142,44 @@ public void testSearchShardsThreadContextHeader() { service.start(); service.acceptIncomingRequests(); - try ( - RemoteClusterAwareClient client = new RemoteClusterAwareClient( - Settings.EMPTY, - threadPool, - service, - "cluster1", - EsExecutors.DIRECT_EXECUTOR_SERVICE, - randomBoolean() - ) - ) { - int numThreads = 10; - ExecutorService executorService = Executors.newFixedThreadPool(numThreads); - for (int i = 0; i < numThreads; i++) { - final String threadId = Integer.toString(i); - PlainActionFuture future = new PlainActionFuture<>(); - executorService.submit(() -> { - ThreadContext threadContext = seedTransport.threadPool.getThreadContext(); - threadContext.putHeader("threadId", threadId); - var searchShardsRequest = new SearchShardsRequest( - new String[] { "test-index" }, - IndicesOptions.strictExpandOpen(), - new MatchAllQueryBuilder(), - null, - null, - randomBoolean(), - null - ); - client.execute( - SearchShardsAction.INSTANCE, - searchShardsRequest, - ActionListener.runBefore( - future, - () -> assertThat(seedTransport.threadPool.getThreadContext().getHeader("threadId"), equalTo(threadId)) - ) - ); - assertThat(future.actionGet().getNodes(), equalTo(knownNodes)); - }); - } - ThreadPool.terminate(executorService, 5, TimeUnit.SECONDS); + final var client = new RemoteClusterAwareClient( + Settings.EMPTY, + threadPool, + service, + "cluster1", + EsExecutors.DIRECT_EXECUTOR_SERVICE, + randomBoolean() + ); + + int numThreads = 10; + ExecutorService executorService = Executors.newFixedThreadPool(numThreads); + for (int i = 0; i < numThreads; i++) { + final String threadId = Integer.toString(i); + PlainActionFuture future = new PlainActionFuture<>(); + executorService.submit(() -> { + ThreadContext threadContext = seedTransport.threadPool.getThreadContext(); + threadContext.putHeader("threadId", threadId); + var searchShardsRequest = new SearchShardsRequest( + new String[] { "test-index" }, + IndicesOptions.strictExpandOpen(), + new MatchAllQueryBuilder(), + null, + null, + randomBoolean(), + null + ); + client.execute( + SearchShardsAction.INSTANCE, + searchShardsRequest, + ActionListener.runBefore( + future, + () -> assertThat(seedTransport.threadPool.getThreadContext().getHeader("threadId"), equalTo(threadId)) + ) + ); + assertThat(future.actionGet().getNodes(), equalTo(knownNodes)); + }); } + ThreadPool.terminate(executorService, 5, TimeUnit.SECONDS); } } } diff --git a/server/src/test/java/org/elasticsearch/usage/UsageServiceTests.java b/server/src/test/java/org/elasticsearch/usage/UsageServiceTests.java index 136a86457ba34..5cc92db89d5a2 100644 --- a/server/src/test/java/org/elasticsearch/usage/UsageServiceTests.java +++ b/server/src/test/java/org/elasticsearch/usage/UsageServiceTests.java @@ -95,7 +95,8 @@ public void testRestUsage() throws Exception { usageService.addRestHandler(handlerD); usageService.addRestHandler(handlerE); usageService.addRestHandler(handlerF); - try (NodeClient client = new NoOpNodeClient(this.getClass().getSimpleName() + "TestClient")) { + try (var threadPool = createThreadPool()) { + final var client = new NoOpNodeClient(threadPool); handlerA.handleRequest(restRequest, null, client); handlerB.handleRequest(restRequest, null, client); handlerA.handleRequest(restRequest, null, client); diff --git a/test/external-modules/apm-integration/src/javaRestTest/java/org/elasticsearch/test/apmintegration/RecordingApmServer.java b/test/external-modules/apm-integration/src/javaRestTest/java/org/elasticsearch/test/apmintegration/RecordingApmServer.java index 4be16f02f2ce4..c3a8df2c4b150 100644 --- a/test/external-modules/apm-integration/src/javaRestTest/java/org/elasticsearch/test/apmintegration/RecordingApmServer.java +++ b/test/external-modules/apm-integration/src/javaRestTest/java/org/elasticsearch/test/apmintegration/RecordingApmServer.java @@ -17,13 +17,14 @@ import org.elasticsearch.xcontent.spi.XContentProvider; import org.junit.rules.ExternalResource; +import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; +import java.io.InputStreamReader; import java.net.InetAddress; import java.net.InetSocketAddress; import java.nio.charset.StandardCharsets; import java.util.ArrayList; -import java.util.Arrays; import java.util.List; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.TimeUnit; @@ -97,7 +98,7 @@ private void handle(HttpExchange exchange) throws IOException { private List readJsonMessages(InputStream input) throws IOException { // parse NDJSON - return Arrays.stream(new String(input.readAllBytes(), StandardCharsets.UTF_8).split(System.lineSeparator())).toList(); + return new BufferedReader(new InputStreamReader(input, StandardCharsets.UTF_8)).lines().toList(); } public int getPort() { diff --git a/test/framework/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryActionHelper.java b/test/framework/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryActionHelper.java deleted file mode 100644 index 90786fb0e2915..0000000000000 --- a/test/framework/src/main/java/org/elasticsearch/action/admin/indices/recovery/TransportRecoveryActionHelper.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.recovery; - -/** - * Helper methods for {@link TransportRecoveryAction}. - */ -public class TransportRecoveryActionHelper { - - /** - * Helper method for tests to call {@link TransportRecoveryAction#setOnShardOperation}. - */ - public static void setOnShardOperation(TransportRecoveryAction transportRecoveryAction, Runnable setOnShardOperation) { - transportRecoveryAction.setOnShardOperation(setOnShardOperation); - } -} diff --git a/test/framework/src/main/java/org/elasticsearch/action/support/CancellableActionTestPlugin.java b/test/framework/src/main/java/org/elasticsearch/action/support/CancellableActionTestPlugin.java new file mode 100644 index 0000000000000..115ea63fb243e --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/action/support/CancellableActionTestPlugin.java @@ -0,0 +1,155 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.support; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskCancelledException; +import org.elasticsearch.tasks.TaskManager; + +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicReference; + +import static org.elasticsearch.ExceptionsHelper.unwrapCause; +import static org.elasticsearch.action.support.ActionTestUtils.assertNoFailureListener; +import static org.elasticsearch.test.ESIntegTestCase.internalCluster; +import static org.elasticsearch.test.ESTestCase.asInstanceOf; +import static org.elasticsearch.test.ESTestCase.randomInt; +import static org.elasticsearch.test.ESTestCase.safeAwait; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +/** + * Utility plugin that captures the invocation of an action on a node after the task has been registered with the {@link TaskManager}, + * cancels it (e.g. by closing the connection used for the original REST request), verifies that the corresponding task is cancelled, then + * lets the action execution proceed in order to verify that it fails with a {@link TaskCancelledException}. This allows to verify a few key + * aspects of the cancellability of tasks: + *
      + *
    • The task that the request creates is cancellable.
    • + *
    • The REST handler propagates cancellation to the task it starts.
    • + *
    • The action implementation checks for cancellation at least once.
    • + *
    + * However, note that this is implemented as an {@link ActionFilter} it blocks and cancels the action before it even starts executing on the + * local node, so it does not verify that the cancellation is processed promptly at all stages of the execution of the action, nor that + * cancellations are propagated correctly to subsidiary actions. + */ +public class CancellableActionTestPlugin extends Plugin implements ActionPlugin { + + public interface CapturingAction extends Releasable { + /** + * @param doCancel callback to invoke when the specified action has started which should cancel the action. + */ + void captureAndCancel(Runnable doCancel); + } + + /** + * Returns a {@link CapturingAction}, typically for use in a try-with-resources block, which can be used to capture and cancel exactly + * one invocation of the specified action on the specified node. + */ + public static CapturingAction capturingActionOnNode(String actionName, String nodeName) { + final var plugins = internalCluster().getInstance(PluginsService.class, nodeName) + .filterPlugins(CancellableActionTestPlugin.class) + .toList(); + assertThat("unique " + CancellableActionTestPlugin.class.getCanonicalName() + " plugin not found", plugins, hasSize(1)); + return plugins.get(0).capturingAction(actionName); + } + + private volatile String capturedActionName; + private final AtomicReference> capturedRef = new AtomicReference<>(); + + private record Captured(Runnable doCancel, CountDownLatch countDownLatch) {} + + private CapturingAction capturingAction(String actionName) { + final var captureListener = new SubscribableListener(); + capturedActionName = actionName; + assertTrue(capturedRef.compareAndSet(null, captureListener)); + + final var completionLatch = new CountDownLatch(1); + + return new CapturingAction() { + @Override + public void captureAndCancel(Runnable doCancel) { + assertFalse(captureListener.isDone()); + captureListener.onResponse(new Captured(doCancel, completionLatch)); + safeAwait(completionLatch); + } + + @Override + public void close() { + // verify that a request was indeed captured + assertNull(capturedRef.get()); + // and that it completed + assertEquals(0, completionLatch.getCount()); + } + }; + } + + @Override + public List getActionFilters() { + return List.of(new ActionFilter() { + + private final int order = randomInt(); + + @Override + public int order() { + return order; + } + + @Override + public void apply( + Task task, + String action, + Request request, + ActionListener listener, + ActionFilterChain chain + ) { + if (action.equals(capturedActionName)) { + final var capturingListener = capturedRef.getAndSet(null); + if (capturingListener != null) { + final var cancellableTask = asInstanceOf(CancellableTask.class, task); + capturingListener.addListener(assertNoFailureListener(captured -> { + cancellableTask.addListener(() -> chain.proceed(task, action, request, new ActionListener<>() { + @Override + public void onResponse(Response response) { + fail("cancelled action should not succeed, but got " + response); + } + + @Override + public void onFailure(Exception e) { + assertThat(unwrapCause(e), instanceOf(TaskCancelledException.class)); + listener.onFailure(e); + captured.countDownLatch().countDown(); + } + })); + assertFalse(cancellableTask.isCancelled()); + captured.doCancel().run(); + })); + return; + } + } + + chain.proceed(task, action, request, listener); + } + }); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryVectorBuilderTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryVectorBuilderTestCase.java index eb4a129f09110..ab5e3a7555214 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryVectorBuilderTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryVectorBuilderTestCase.java @@ -23,6 +23,7 @@ import org.elasticsearch.search.vectors.KnnSearchBuilder; import org.elasticsearch.search.vectors.QueryVectorBuilder; import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.junit.Before; @@ -114,7 +115,8 @@ public final void testKnnSearchRewrite() throws Exception { KnnSearchBuilder::new, TransportVersion.current() ); - try (NoOpClient client = new AssertingClient(expected, queryVectorBuilder)) { + try (var threadPool = createThreadPool()) { + final var client = new AssertingClient(threadPool, expected, queryVectorBuilder); QueryRewriteContext context = new QueryRewriteContext(null, client, null); PlainActionFuture future = new PlainActionFuture<>(); Rewriteable.rewriteAndFetch(randomFrom(serialized, searchBuilder), context, future); @@ -128,7 +130,8 @@ public final void testKnnSearchRewrite() throws Exception { public final void testVectorFetch() throws Exception { float[] expected = randomVector(randomIntBetween(10, 1024)); T queryVectorBuilder = createTestInstance(expected); - try (NoOpClient client = new AssertingClient(expected, queryVectorBuilder)) { + try (var threadPool = createThreadPool()) { + final var client = new AssertingClient(threadPool, expected, queryVectorBuilder); PlainActionFuture future = new PlainActionFuture<>(); queryVectorBuilder.buildVector(client, future); assertThat(future.get(), equalTo(expected)); @@ -163,8 +166,8 @@ private class AssertingClient extends NoOpClient { private final float[] array; private final T queryVectorBuilder; - AssertingClient(float[] array, T queryVectorBuilder) { - super("query_vector_builder_tests"); + AssertingClient(ThreadPool threadPool, float[] array, T queryVectorBuilder) { + super(threadPool); this.array = array; this.queryVectorBuilder = queryVectorBuilder; } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 832902f52deb2..37e4176e1818d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -158,9 +158,7 @@ import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; -import java.net.InetAddress; import java.net.InetSocketAddress; -import java.net.URL; import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; @@ -1983,46 +1981,7 @@ protected Collection> nodePlugins() { return Collections.emptyList(); } - private ExternalTestCluster buildExternalCluster(String clusterAddresses, String clusterName) throws IOException { - String[] stringAddresses = clusterAddresses.split(","); - TransportAddress[] transportAddresses = new TransportAddress[stringAddresses.length]; - int i = 0; - for (String stringAddress : stringAddresses) { - URL url = new URL("http://" + stringAddress); - InetAddress inetAddress = InetAddress.getByName(url.getHost()); - transportAddresses[i++] = new TransportAddress(new InetSocketAddress(inetAddress, url.getPort())); - } - return new ExternalTestCluster( - createTempDir(), - externalClusterClientSettings(), - nodePlugins(), - getClientWrapper(), - clusterName, - transportAddresses - ); - } - - protected Settings externalClusterClientSettings() { - return Settings.EMPTY; - } - - protected boolean ignoreExternalCluster() { - return false; - } - protected TestCluster buildTestCluster(Scope scope, long seed) throws IOException { - String clusterAddresses = System.getProperty(TESTS_CLUSTER); - if (Strings.hasLength(clusterAddresses) && ignoreExternalCluster() == false) { - if (scope == Scope.TEST) { - throw new IllegalArgumentException("Cannot run TEST scope test with " + TESTS_CLUSTER); - } - String clusterName = System.getProperty(TESTS_CLUSTER_NAME); - if (Strings.isNullOrEmpty(clusterName)) { - throw new IllegalArgumentException("External test cluster name must be provided"); - } - return buildExternalCluster(clusterAddresses, clusterName); - } - final String nodePrefix = switch (scope) { case TEST -> TEST_CLUSTER_NODE_PREFIX; case SUITE -> SUITE_CLUSTER_NODE_PREFIX; @@ -2436,6 +2395,10 @@ protected static RestClient createRestClient() { return createRestClient(null, "http"); } + protected static RestClient createRestClient(String node) { + return createRestClient(client(node).admin().cluster().prepareNodesInfo("_local").get().getNodes(), null, "http"); + } + protected static RestClient createRestClient(RestClientBuilder.HttpClientConfigCallback httpClientConfigCallback, String protocol) { NodesInfoResponse nodesInfoResponse = clusterAdmin().prepareNodesInfo().get(); assertFalse(nodesInfoResponse.hasFailures()); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index d3f01f03ed61c..5589e1b94281d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -104,6 +104,8 @@ import org.elasticsearch.search.MockSearchService; import org.elasticsearch.test.junit.listeners.LoggingListener; import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter; +import org.elasticsearch.threadpool.ExecutorBuilder; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.LeakTracker; import org.elasticsearch.transport.netty4.Netty4Plugin; @@ -1263,6 +1265,10 @@ public static boolean waitUntil(BooleanSupplier breakSupplier, long maxWaitTime, return breakSupplier.getAsBoolean(); } + protected TestThreadPool createThreadPool(ExecutorBuilder... executorBuilders) { + return new TestThreadPool(getTestName(), executorBuilders); + } + public static boolean terminate(ExecutorService... services) { boolean terminated = true; for (ExecutorService service : services) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java index dcddbbbcece64..3e3759601a1c9 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ExternalTestCluster.java @@ -51,7 +51,11 @@ * External cluster to run the tests against. * It is a pure immutable test cluster that allows to send requests to a pre-existing cluster * and supports by nature all the needed test operations like wipeIndices etc. + * + * @deprecated not a realistic test setup since the removal of the transport client, use {@link ESIntegTestCase} for internal-cluster tests + * or {@link org.elasticsearch.test.rest.ESRestTestCase} otherwise. */ +@Deprecated(forRemoval = true) public final class ExternalTestCluster extends TestCluster { private static final Logger logger = LogManager.getLogger(ExternalTestCluster.class); @@ -131,14 +135,14 @@ public ExternalTestCluster( logger.info("Setup ExternalTestCluster [{}] made of [{}] nodes", nodeInfos.getClusterName().value(), size()); } catch (NodeValidationException e) { try { - IOUtils.close(wrappedClient, mockNode); + IOUtils.close(mockNode); } catch (IOException e1) { e.addSuppressed(e1); } throw new ElasticsearchException(e); } catch (Exception e) { try { - IOUtils.close(wrappedClient, mockNode); + IOUtils.close(mockNode); } catch (IOException e1) { e.addSuppressed(e1); } @@ -178,7 +182,7 @@ public InetSocketAddress[] httpAddresses() { @Override public void close() throws IOException { - IOUtils.close(client, node); + IOUtils.close(node); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 8abf10a773764..0ce970943cc0b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -64,7 +64,6 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.Nullable; -import org.elasticsearch.core.Releasables; import org.elasticsearch.core.TimeValue; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; @@ -977,7 +976,6 @@ private Client getOrBuildNodeClient() { void resetClient() { if (closed.get() == false) { - Releasables.close(nodeClient); nodeClient = null; } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/client/NoOpClient.java b/test/framework/src/main/java/org/elasticsearch/test/client/NoOpClient.java index a9f4391975f68..55aaabf74ba71 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/client/NoOpClient.java +++ b/test/framework/src/main/java/org/elasticsearch/test/client/NoOpClient.java @@ -8,18 +8,14 @@ package org.elasticsearch.test.client; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; import org.elasticsearch.client.internal.support.AbstractClient; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; -import java.util.concurrent.TimeUnit; - /** * Client that always responds with {@code null} to every request. Override {@link #doExecute(ActionType, ActionRequest, ActionListener)} * for testing. @@ -27,20 +23,11 @@ * See also {@link NoOpNodeClient} if you need to mock a {@link org.elasticsearch.client.internal.node.NodeClient}. */ public class NoOpClient extends AbstractClient { - /** - * Build with {@link ThreadPool}. This {@linkplain ThreadPool} is terminated on {@link #close()}. - */ + public NoOpClient(ThreadPool threadPool) { super(Settings.EMPTY, threadPool); } - /** - * Create a new {@link TestThreadPool} for this client. This {@linkplain TestThreadPool} is terminated on {@link #close()}. - */ - public NoOpClient(String testName) { - super(Settings.EMPTY, new TestThreadPool(testName)); - } - @Override protected void doExecute( ActionType action, @@ -49,13 +36,4 @@ protected void ) { listener.onResponse(null); } - - @Override - public void close() { - try { - ThreadPool.terminate(threadPool(), 10, TimeUnit.SECONDS); - } catch (Exception e) { - throw new ElasticsearchException(e.getMessage(), e); - } - } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/client/NoOpNodeClient.java b/test/framework/src/main/java/org/elasticsearch/test/client/NoOpNodeClient.java index 0df930048f9c7..766c9176c6846 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/client/NoOpNodeClient.java +++ b/test/framework/src/main/java/org/elasticsearch/test/client/NoOpNodeClient.java @@ -8,7 +8,6 @@ package org.elasticsearch.test.client; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; @@ -20,14 +19,12 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskManager; -import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.transport.Transport; import java.util.Map; import java.util.concurrent.Executor; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Supplier; @@ -41,20 +38,10 @@ public class NoOpNodeClient extends NodeClient { private final AtomicLong executionCount = new AtomicLong(0); - /** - * Build with {@link ThreadPool}. This {@linkplain ThreadPool} is terminated on {@link #close()}. - */ public NoOpNodeClient(ThreadPool threadPool) { super(Settings.EMPTY, threadPool); } - /** - * Create a new {@link TestThreadPool} for this client. This {@linkplain TestThreadPool} is terminated on {@link #close()}. - */ - public NoOpNodeClient(String testName) { - super(Settings.EMPTY, new TestThreadPool(testName)); - } - @Override public void doExecute( ActionType action, @@ -97,17 +84,4 @@ public String getLocalNodeId() { public Client getRemoteClusterClient(String clusterAlias, Executor responseExecutor) { return null; } - - @Override - public void close() { - try { - ThreadPool.terminate(threadPool(), 10, TimeUnit.SECONDS); - } catch (Exception e) { - throw new ElasticsearchException(e.getMessage(), e); - } - } - - public long getExecutionCount() { - return executionCount.get(); - } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/RestActionTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/RestActionTestCase.java index 1229b3470775f..9e638425d5c5c 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/RestActionTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/RestActionTestCase.java @@ -20,6 +20,8 @@ import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpNodeClient; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.usage.UsageService; import org.junit.After; import org.junit.Before; @@ -35,17 +37,19 @@ */ public abstract class RestActionTestCase extends ESTestCase { private RestController controller; + private TestThreadPool threadPool; protected VerifyingClient verifyingClient; @Before public void setUpController() { - verifyingClient = new VerifyingClient(this.getTestName()); + threadPool = createThreadPool(); + verifyingClient = new VerifyingClient(threadPool); controller = new RestController(null, verifyingClient, new NoneCircuitBreakerService(), new UsageService(), Tracer.NOOP); } @After public void tearDownController() { - verifyingClient.close(); + threadPool.close(); } /** @@ -78,8 +82,8 @@ public static final class VerifyingClient extends NoOpNodeClient { AtomicReference, ActionRequest, ActionResponse>> executeVerifier = new AtomicReference<>(); AtomicReference, ActionRequest, ActionResponse>> executeLocallyVerifier = new AtomicReference<>(); - public VerifyingClient(String testName) { - super(testName); + public VerifyingClient(ThreadPool threadPool) { + super(threadPool); reset(); } diff --git a/test/framework/src/main/java/org/elasticsearch/threadpool/TestThreadPool.java b/test/framework/src/main/java/org/elasticsearch/threadpool/TestThreadPool.java index 58f06cbbe9d40..e8a853989e8e5 100644 --- a/test/framework/src/main/java/org/elasticsearch/threadpool/TestThreadPool.java +++ b/test/framework/src/main/java/org/elasticsearch/threadpool/TestThreadPool.java @@ -10,14 +10,16 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.Releasable; import org.elasticsearch.node.Node; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutorService; import java.util.concurrent.ThreadFactory; import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; -public class TestThreadPool extends ThreadPool { +public class TestThreadPool extends ThreadPool implements Releasable { private final CountDownLatch blockingLatch = new CountDownLatch(1); private volatile boolean returnRejectingExecutor = false; @@ -98,4 +100,9 @@ private synchronized void createRejectingExecutor() { throw new RuntimeException(e); } } + + @Override + public void close() { + ThreadPool.terminate(this, 10, TimeUnit.SECONDS); + } } diff --git a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/GetAutoscalingCapacityRestCancellationIT.java b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/GetAutoscalingCapacityRestCancellationIT.java index d3d7744bddc4c..afe8759acc7a3 100644 --- a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/GetAutoscalingCapacityRestCancellationIT.java +++ b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/GetAutoscalingCapacityRestCancellationIT.java @@ -60,11 +60,6 @@ protected Collection> nodePlugins() { return Collections.unmodifiableList(result); } - @Override - protected boolean ignoreExternalCluster() { - return true; - } - public void testCapacityRestCancellationAndResponse() throws Exception { internalCluster().startMasterOnlyNode(); diff --git a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodesInfoServiceTests.java b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodesInfoServiceTests.java index 8d357a5f050ca..c6fb0613b3e8e 100644 --- a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodesInfoServiceTests.java +++ b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodesInfoServiceTests.java @@ -42,6 +42,8 @@ import org.elasticsearch.monitor.os.OsInfo; import org.elasticsearch.monitor.os.OsStats; import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.autoscaling.AutoscalingMetadata; import org.elasticsearch.xpack.autoscaling.AutoscalingTestCase; import org.elasticsearch.xpack.autoscaling.policy.AutoscalingPolicy; @@ -73,6 +75,7 @@ public class AutoscalingNodesInfoServiceTests extends AutoscalingTestCase { + private TestThreadPool threadPool; private NodeStatsClient client; private AutoscalingNodeInfoService service; private TimeValue fetchTimeout; @@ -83,7 +86,8 @@ public class AutoscalingNodesInfoServiceTests extends AutoscalingTestCase { @Override public void setUp() throws Exception { super.setUp(); - client = new NodeStatsClient(); + threadPool = createThreadPool(); + client = new NodeStatsClient(threadPool); final ClusterService clusterService = mock(ClusterService.class); Settings settings; if (randomBoolean()) { @@ -105,8 +109,8 @@ public void setUp() throws Exception { @After @Override public void tearDown() throws Exception { + threadPool.close(); super.tearDown(); - client.close(); } public void testAddRemoveNode() { @@ -470,8 +474,8 @@ private class NodeStatsClient extends NoOpClient { private BiConsumer> responderStats; private BiConsumer> responderInfo; - private NodeStatsClient() { - super(getTestName()); + private NodeStatsClient(ThreadPool threadPool) { + super(threadPool); } public void respondInfo(NodesInfoResponse response, Runnable whileFetching) { diff --git a/x-pack/plugin/build.gradle b/x-pack/plugin/build.gradle index 17495a3568923..eae3031512d4f 100644 --- a/x-pack/plugin/build.gradle +++ b/x-pack/plugin/build.gradle @@ -177,16 +177,18 @@ tasks.named("yamlRestTestV7CompatTransform").configure { task -> } tasks.register('enforceApiSpecsConvention').configure { + def mainApiSpecs = fileTree('src/test/resources/rest-api-spec/api') doLast { - if (fileTree('src/test/resources/rest-api-spec/api').files) { + if (mainApiSpecs.files) { throw new GradleException("There are REST specs in src/test source set. These should be moved to the :rest-api-spec project.") } } } tasks.register('enforceYamlTestConvention').configure { + def mainYamlFiles = fileTree('src/test/resources/rest-api-spec/test') doLast { - if (fileTree('src/test/resources/rest-api-spec/test').files) { + if (mainYamlFiles.files) { throw new GradleException("There are YAML tests in src/test source set. These should be moved to src/yamlRestTest.") } } diff --git a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierShardAvailabilityHealthIndicatorIT.java b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierShardAvailabilityHealthIndicatorIT.java index 7737a5b42dfae..82784d9112a1b 100644 --- a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierShardAvailabilityHealthIndicatorIT.java +++ b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierShardAvailabilityHealthIndicatorIT.java @@ -17,7 +17,7 @@ import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.allocation.DataTier; -import org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService; +import org.elasticsearch.cluster.routing.allocation.shards.ShardsAvailabilityHealthIndicatorService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; @@ -42,7 +42,7 @@ import static org.hamcrest.Matchers.hasItem; /** - * Contains all integration tests for the {@link org.elasticsearch.cluster.routing.allocation.ShardsAvailabilityHealthIndicatorService} + * Contains all integration tests for the {@link ShardsAvailabilityHealthIndicatorService} * that require the data tiers allocation decider logic. */ @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicySecurityClient.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicySecurityClient.java index c5ac78c0a330b..e8f76b655b70e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicySecurityClient.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicySecurityClient.java @@ -39,12 +39,6 @@ public LifecyclePolicySecurityClient(Client client, String origin, Map void doExecute( ActionType action, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java index f69be31939b32..d27d325a5c596 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java @@ -9,6 +9,7 @@ import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.common.Randomness; @@ -96,6 +97,10 @@ public final class TrainedModelAssignment implements SimpleDiffable void @Before public void setupComponents() { - clientWithBasicLicense = new MockClientLicenseCheck(getTestName(), "basic", LicenseStatus.ACTIVE); - clientWithExpiredBasicLicense = new MockClientLicenseCheck(getTestName(), "basic", LicenseStatus.EXPIRED); + clientThreadPool = createThreadPool(); + clientWithBasicLicense = new MockClientLicenseCheck(clientThreadPool, "basic", LicenseStatus.ACTIVE); + clientWithExpiredBasicLicense = new MockClientLicenseCheck(clientThreadPool, "basic", LicenseStatus.EXPIRED); LicensedFeature.Momentary feature = LicensedFeature.momentary(null, "feature", License.OperationMode.BASIC); platinumFeature = LicensedFeature.momentary(null, "platinum-feature", License.OperationMode.PLATINUM); remoteClusterLicenseCheckerBasic = new RemoteClusterLicenseChecker(clientWithBasicLicense, feature); - clientWithPlatinumLicense = new MockClientLicenseCheck(getTestName(), "platinum", LicenseStatus.ACTIVE); - clientWithTrialLicense = new MockClientLicenseCheck(getTestName(), "trial", LicenseStatus.ACTIVE); + clientWithPlatinumLicense = new MockClientLicenseCheck(clientThreadPool, "platinum", LicenseStatus.ACTIVE); + clientWithTrialLicense = new MockClientLicenseCheck(clientThreadPool, "trial", LicenseStatus.ACTIVE); } @After public void closeComponents() throws Exception { - clientWithBasicLicense.close(); - clientWithExpiredBasicLicense.close(); - clientWithPlatinumLicense.close(); - clientWithTrialLicense.close(); + clientThreadPool.close(); ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CleanupShrinkIndexStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CleanupShrinkIndexStepTests.java index 5269b3abd826f..90482be334363 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CleanupShrinkIndexStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CleanupShrinkIndexStepTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ilm.Step.StepKey; import java.util.Map; @@ -100,7 +101,8 @@ public void testPerformAction() { .metadata(Metadata.builder().put(indexMetadata, true).build()) .build(); - try (NoOpClient client = getDeleteIndexRequestAssertingClient(shrinkIndexName)) { + try (var threadPool = createThreadPool()) { + final var client = getDeleteIndexRequestAssertingClient(threadPool, shrinkIndexName); CleanupShrinkIndexStep step = new CleanupShrinkIndexStep(randomStepKey(), randomStepKey(), client); step.performAction(indexMetadata, clusterState, null, ActionListener.noop()); } @@ -126,14 +128,15 @@ public void testDeleteSkippedIfManagedIndexIsShrunkAndSourceDoesntExist() { .metadata(Metadata.builder().put(shrunkIndexMetadata, true).build()) .build(); - try (NoOpClient client = getFailingIfCalledClient()) { + try (var threadPool = createThreadPool()) { + final var client = getFailingIfCalledClient(threadPool); CleanupShrinkIndexStep step = new CleanupShrinkIndexStep(randomStepKey(), randomStepKey(), client); step.performAction(shrunkIndexMetadata, clusterState, null, ActionListener.noop()); } } - private NoOpClient getDeleteIndexRequestAssertingClient(String shrinkIndexName) { - return new NoOpClient(getTestName()) { + private NoOpClient getDeleteIndexRequestAssertingClient(ThreadPool threadPool, String shrinkIndexName) { + return new NoOpClient(threadPool) { @Override protected void doExecute( ActionType action, @@ -147,8 +150,8 @@ protected void }; } - private NoOpClient getFailingIfCalledClient() { - return new NoOpClient(getTestName()) { + private NoOpClient getFailingIfCalledClient(ThreadPool threadPool) { + return new NoOpClient(threadPool) { @Override protected void doExecute( ActionType action, diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CleanupSnapshotStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CleanupSnapshotStepTests.java index c279e2e82738e..5fddcd51a6614 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CleanupSnapshotStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CleanupSnapshotStepTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ilm.Step.StepKey; import java.util.Map; @@ -108,14 +109,15 @@ public void testPerformAction() { .metadata(Metadata.builder().put(indexMetadata, true).build()) .build(); - try (NoOpClient client = getDeleteSnapshotRequestAssertingClient(snapshotName)) { + try (var threadPool = createThreadPool()) { + final var client = getDeleteSnapshotRequestAssertingClient(threadPool, snapshotName); CleanupSnapshotStep step = new CleanupSnapshotStep(randomStepKey(), randomStepKey(), client); step.performAction(indexMetadata, clusterState, null, ActionListener.noop()); } } - private NoOpClient getDeleteSnapshotRequestAssertingClient(String expectedSnapshotName) { - return new NoOpClient(getTestName()) { + private NoOpClient getDeleteSnapshotRequestAssertingClient(ThreadPool threadPool, String expectedSnapshotName) { + return new NoOpClient(threadPool) { @Override protected void doExecute( ActionType action, diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CleanupTargetIndexStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CleanupTargetIndexStepTests.java index cad79e1b8c2ca..dea53b2c736ac 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CleanupTargetIndexStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CleanupTargetIndexStepTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ilm.Step.StepKey; import java.util.Map; @@ -118,7 +119,8 @@ public void testPerformAction() { .metadata(Metadata.builder().put(indexMetadata, true).build()) .build(); - try (NoOpClient client = getDeleteIndexRequestAssertingClient(shrinkIndexName)) { + try (var threadPool = createThreadPool()) { + final var client = getDeleteIndexRequestAssertingClient(threadPool, shrinkIndexName); CleanupTargetIndexStep step = new CleanupTargetIndexStep( randomStepKey(), randomStepKey(), @@ -150,7 +152,8 @@ public void testDeleteSkippedIfManagedIndexIsShrunkAndSourceDoesntExist() { .metadata(Metadata.builder().put(shrunkIndexMetadata, true).build()) .build(); - try (NoOpClient client = getFailingIfCalledClient()) { + try (var threadPool = createThreadPool()) { + final var client = getFailingIfCalledClient(threadPool); CleanupTargetIndexStep step = new CleanupTargetIndexStep( randomStepKey(), randomStepKey(), @@ -162,8 +165,8 @@ public void testDeleteSkippedIfManagedIndexIsShrunkAndSourceDoesntExist() { } } - private NoOpClient getDeleteIndexRequestAssertingClient(String shrinkIndexName) { - return new NoOpClient(getTestName()) { + private NoOpClient getDeleteIndexRequestAssertingClient(ThreadPool threadPool, String shrinkIndexName) { + return new NoOpClient(threadPool) { @Override protected void doExecute( ActionType action, @@ -177,8 +180,8 @@ protected void }; } - private NoOpClient getFailingIfCalledClient() { - return new NoOpClient(getTestName()) { + private NoOpClient getFailingIfCalledClient(ThreadPool threadPool) { + return new NoOpClient(threadPool) { @Override protected void doExecute( ActionType action, diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CreateSnapshotStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CreateSnapshotStepTests.java index ace616d837e94..b954162aee6f2 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CreateSnapshotStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CreateSnapshotStepTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.index.IndexVersion; import org.elasticsearch.snapshots.SnapshotNameAlreadyInUseException; import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ilm.Step.StepKey; import java.util.HashMap; @@ -132,7 +133,8 @@ public void testPerformAction() { .metadata(Metadata.builder().put(indexMetadata, true).build()) .build(); - try (NoOpClient client = getCreateSnapshotRequestAssertingClient(repository, snapshotName, indexName)) { + try (var threadPool = createThreadPool()) { + final var client = getCreateSnapshotRequestAssertingClient(threadPool, repository, snapshotName, indexName); CreateSnapshotStep step = new CreateSnapshotStep(randomStepKey(), randomStepKey(), randomStepKey(), client); step.performAction(indexMetadata, clusterState, null, ActionListener.noop()); } @@ -158,7 +160,8 @@ public void testNextStepKey() { .metadata(Metadata.builder().put(indexMetadata, true).build()) .build(); { - try (NoOpClient client = new NoOpClient(getTestName())) { + try (var threadPool = createThreadPool()) { + final var client = new NoOpClient(threadPool); StepKey nextKeyOnComplete = randomStepKey(); StepKey nextKeyOnIncomplete = randomStepKey(); CreateSnapshotStep completeStep = new CreateSnapshotStep(randomStepKey(), nextKeyOnComplete, nextKeyOnIncomplete, client) { @@ -173,7 +176,8 @@ void createSnapshot(IndexMetadata indexMetadata, ActionListener listene } { - try (NoOpClient client = new NoOpClient(getTestName())) { + try (var threadPool = createThreadPool()) { + final var client = new NoOpClient(threadPool); StepKey nextKeyOnComplete = randomStepKey(); StepKey nextKeyOnIncomplete = randomStepKey(); CreateSnapshotStep incompleteStep = new CreateSnapshotStep( @@ -193,7 +197,8 @@ void createSnapshot(IndexMetadata indexMetadata, ActionListener listene } { - try (NoOpClient client = new NoOpClient(getTestName())) { + try (var threadPool = createThreadPool()) { + final var client = new NoOpClient(threadPool); StepKey nextKeyOnComplete = randomStepKey(); StepKey nextKeyOnIncomplete = randomStepKey(); CreateSnapshotStep doubleInvocationStep = new CreateSnapshotStep( @@ -213,8 +218,13 @@ void createSnapshot(IndexMetadata indexMetadata, ActionListener listene } } - private NoOpClient getCreateSnapshotRequestAssertingClient(String expectedRepoName, String expectedSnapshotName, String indexName) { - return new NoOpClient(getTestName()) { + private NoOpClient getCreateSnapshotRequestAssertingClient( + ThreadPool threadPool, + String expectedRepoName, + String expectedSnapshotName, + String indexName + ) { + return new NoOpClient(threadPool) { @Override protected void doExecute( ActionType action, diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/DownsampleStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/DownsampleStepTests.java index 421f6bd30f432..e3731c4416491 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/DownsampleStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/DownsampleStepTests.java @@ -251,7 +251,8 @@ public void testNextStepKey() { .metadata(Metadata.builder().put(sourceIndexMetadata, true).build()) .build(); { - try (NoOpClient client = new NoOpClient(getTestName())) { + try (var threadPool = createThreadPool()) { + final var client = new NoOpClient(threadPool); StepKey nextKey = randomStepKey(); DateHistogramInterval fixedInterval = ConfigTestHelpers.randomInterval(); TimeValue timeout = DownsampleAction.DEFAULT_WAIT_TIMEOUT; @@ -265,7 +266,8 @@ void performDownsampleIndex(String indexName, String downsampleIndexName, Action } } { - try (NoOpClient client = new NoOpClient(getTestName())) { + try (var threadPool = createThreadPool()) { + final var client = new NoOpClient(threadPool); StepKey nextKey = randomStepKey(); DateHistogramInterval fixedInterval = ConfigTestHelpers.randomInterval(); TimeValue timeout = DownsampleAction.DEFAULT_WAIT_TIMEOUT; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyClientTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyClientTests.java index ed9a4d45b681f..162794865ba5a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyClientTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyClientTests.java @@ -56,15 +56,8 @@ public void testExecuteWithHeadersAsyncNoHeaders() throws InterruptedException { SearchRequest request = new SearchRequest("foo"); - try ( - LifecyclePolicySecurityClient policyClient = new LifecyclePolicySecurityClient( - client, - ClientHelper.INDEX_LIFECYCLE_ORIGIN, - Collections.emptyMap() - ) - ) { - policyClient.execute(SearchAction.INSTANCE, request, listener); - } + final var policyClient = new LifecyclePolicySecurityClient(client, ClientHelper.INDEX_LIFECYCLE_ORIGIN, Collections.emptyMap()); + policyClient.execute(SearchAction.INSTANCE, request, listener); latch.await(); } @@ -95,15 +88,8 @@ public void testExecuteWithHeadersAsyncWrongHeaders() throws InterruptedExceptio headers.put("foo", "foo"); headers.put("bar", "bar"); - try ( - LifecyclePolicySecurityClient policyClient = new LifecyclePolicySecurityClient( - client, - ClientHelper.INDEX_LIFECYCLE_ORIGIN, - headers - ) - ) { - policyClient.execute(SearchAction.INSTANCE, request, listener); - } + final var policyClient = new LifecyclePolicySecurityClient(client, ClientHelper.INDEX_LIFECYCLE_ORIGIN, headers); + policyClient.execute(SearchAction.INSTANCE, request, listener); latch.await(); } @@ -136,15 +122,8 @@ public void testExecuteWithHeadersAsyncWithHeaders() throws Exception { headers.put("es-security-runas-user", "foo"); headers.put("_xpack_security_authentication", "bar"); - try ( - LifecyclePolicySecurityClient policyClient = new LifecyclePolicySecurityClient( - client, - ClientHelper.INDEX_LIFECYCLE_ORIGIN, - headers - ) - ) { - policyClient.execute(SearchAction.INSTANCE, request, listener); - } + final var policyClient = new LifecyclePolicySecurityClient(client, ClientHelper.INDEX_LIFECYCLE_ORIGIN, headers); + policyClient.execute(SearchAction.INSTANCE, request, listener); latch.await(); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/MountSnapshotStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/MountSnapshotStepTests.java index 4bd909fc00ca7..f905ca38e1c5c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/MountSnapshotStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/MountSnapshotStepTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.index.IndexVersion; import org.elasticsearch.snapshots.RestoreInfo; import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ilm.Step.StepKey; import org.elasticsearch.xpack.core.searchablesnapshots.MountSearchableSnapshotAction; import org.elasticsearch.xpack.core.searchablesnapshots.MountSearchableSnapshotRequest; @@ -164,16 +165,16 @@ public void testPerformAction() throws Exception { .metadata(Metadata.builder().put(indexMetadata, true).build()) .build(); - try ( - NoOpClient client = getRestoreSnapshotRequestAssertingClient( + try (var threadPool = createThreadPool()) { + final var client = getRestoreSnapshotRequestAssertingClient( + threadPool, repository, snapshotName, indexName, RESTORED_INDEX_PREFIX, indexName, new String[] { LifecycleSettings.LIFECYCLE_NAME } - ) - ) { + ); MountSnapshotStep step = new MountSnapshotStep( randomStepKey(), randomStepKey(), @@ -207,7 +208,8 @@ public void testResponseStatusHandling() throws Exception { { RestoreSnapshotResponse responseWithOKStatus = new RestoreSnapshotResponse(new RestoreInfo("test", List.of(), 1, 1)); - try (NoOpClient clientPropagatingOKResponse = getClientTriggeringResponse(responseWithOKStatus)) { + try (var threadPool = createThreadPool()) { + final var clientPropagatingOKResponse = getClientTriggeringResponse(threadPool, responseWithOKStatus); MountSnapshotStep step = new MountSnapshotStep( randomStepKey(), randomStepKey(), @@ -221,7 +223,8 @@ public void testResponseStatusHandling() throws Exception { { RestoreSnapshotResponse responseWithACCEPTEDStatus = new RestoreSnapshotResponse((RestoreInfo) null); - try (NoOpClient clientPropagatingACCEPTEDResponse = getClientTriggeringResponse(responseWithACCEPTEDStatus)) { + try (var threadPool = createThreadPool()) { + final var clientPropagatingACCEPTEDResponse = getClientTriggeringResponse(threadPool, responseWithACCEPTEDStatus); MountSnapshotStep step = new MountSnapshotStep( randomStepKey(), randomStepKey(), @@ -286,16 +289,16 @@ public void doTestMountWithoutSnapshotIndexNameInState(String prefix) throws Exc .metadata(Metadata.builder().put(indexMetadata, true).build()) .build(); - try ( - NoOpClient client = getRestoreSnapshotRequestAssertingClient( + try (var threadPool = createThreadPool()) { + final var client = getRestoreSnapshotRequestAssertingClient( + threadPool, repository, snapshotName, indexName, RESTORED_INDEX_PREFIX, indexNameSnippet, new String[] { LifecycleSettings.LIFECYCLE_NAME } - ) - ) { + ); MountSnapshotStep step = new MountSnapshotStep( randomStepKey(), randomStepKey(), @@ -328,16 +331,16 @@ public void testIgnoreTotalShardsPerNodeInFrozenPhase() throws Exception { .metadata(Metadata.builder().put(indexMetadata, true).build()) .build(); - try ( - NoOpClient client = getRestoreSnapshotRequestAssertingClient( + try (var threadPool = createThreadPool()) { + final var client = getRestoreSnapshotRequestAssertingClient( + threadPool, repository, snapshotName, indexName, RESTORED_INDEX_PREFIX, indexName, new String[] { LifecycleSettings.LIFECYCLE_NAME, ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING.getKey() } - ) - ) { + ); MountSnapshotStep step = new MountSnapshotStep( new StepKey(TimeseriesLifecycleType.FROZEN_PHASE, randomAlphaOfLength(10), randomAlphaOfLength(10)), randomStepKey(), @@ -350,8 +353,8 @@ public void testIgnoreTotalShardsPerNodeInFrozenPhase() throws Exception { } @SuppressWarnings("unchecked") - private NoOpClient getClientTriggeringResponse(RestoreSnapshotResponse response) { - return new NoOpClient(getTestName()) { + private NoOpClient getClientTriggeringResponse(ThreadPool threadPool, RestoreSnapshotResponse response) { + return new NoOpClient(threadPool) { @Override protected void doExecute( ActionType action, @@ -365,6 +368,7 @@ protected void @SuppressWarnings("unchecked") private NoOpClient getRestoreSnapshotRequestAssertingClient( + ThreadPool threadPool, String expectedRepoName, String expectedSnapshotName, String indexName, @@ -372,7 +376,7 @@ private NoOpClient getRestoreSnapshotRequestAssertingClient( String expectedSnapshotIndexName, String[] expectedIgnoredIndexSettings ) { - return new NoOpClient(getTestName()) { + return new NoOpClient(threadPool) { @Override protected void doExecute( ActionType action, diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SwapAliasesAndDeleteSourceIndexStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SwapAliasesAndDeleteSourceIndexStepTests.java index 4c62781bc2406..7a09b375ed53b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SwapAliasesAndDeleteSourceIndexStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SwapAliasesAndDeleteSourceIndexStepTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ilm.Step.StepKey; import java.util.Arrays; @@ -103,7 +104,8 @@ public void testPerformAction() { .isHidden(isHidden) ); - try (NoOpClient client = getIndicesAliasAssertingClient(expectedAliasActions)) { + try (var threadPool = createThreadPool()) { + final var client = getIndicesAliasAssertingClient(threadPool, expectedAliasActions); SwapAliasesAndDeleteSourceIndexStep step = new SwapAliasesAndDeleteSourceIndexStep( randomStepKey(), randomStepKey(), @@ -124,8 +126,8 @@ public void testPerformAction() { } } - private NoOpClient getIndicesAliasAssertingClient(List expectedAliasActions) { - return new NoOpClient(getTestName()) { + private NoOpClient getIndicesAliasAssertingClient(ThreadPool threadPool, List expectedAliasActions) { + return new NoOpClient(threadPool) { @Override protected void doExecute( ActionType action, diff --git a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleIT.java b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleIT.java index c38ed182abc64..cb945e8ffa418 100644 --- a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleIT.java +++ b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleIT.java @@ -43,10 +43,6 @@ protected Collection> nodePlugins() { return List.of(DataStreamsPlugin.class, LocalStateCompositeXPackPlugin.class, Downsample.class, AggregateMetricMapperPlugin.class); } - protected boolean ignoreExternalCluster() { - return true; - } - @Override protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { Settings.Builder settings = Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings)); diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichProcessorFactoryTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichProcessorFactoryTests.java index caa550f3658b8..a26cab231f52c 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichProcessorFactoryTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichProcessorFactoryTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; -import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.AliasMetadata; @@ -63,7 +62,8 @@ public void initializeScriptService() { public void testCreateProcessorInstance() throws Exception { List enrichValues = List.of("globalRank", "tldRank", "tld"); EnrichPolicy policy = new EnrichPolicy(EnrichPolicy.MATCH_TYPE, null, List.of("source_index"), "my_key", enrichValues); - try (Client client = new NoOpClient(this.getClass().getSimpleName() + "TestClient")) { + try (var threadPool = createThreadPool()) { + final var client = new NoOpClient(threadPool); EnrichProcessorFactory factory = new EnrichProcessorFactory(client, scriptService, enrichCache); factory.metadata = createMetadata("majestic", policy); @@ -172,7 +172,8 @@ public void testPolicyNameMissing() { public void testUnsupportedPolicy() throws Exception { List enrichValues = List.of("globalRank", "tldRank", "tld"); EnrichPolicy policy = new EnrichPolicy("unsupported", null, List.of("source_index"), "my_key", enrichValues); - try (Client client = new NoOpClient(this.getClass().getSimpleName() + "TestClient")) { + try (var threadPool = createThreadPool()) { + final var client = new NoOpClient(threadPool); EnrichProcessorFactory factory = new EnrichProcessorFactory(client, scriptService, enrichCache); factory.metadata = createMetadata("majestic", policy); @@ -193,7 +194,8 @@ public void testUnsupportedPolicy() throws Exception { public void testCompactEnrichValuesFormat() throws Exception { List enrichValues = List.of("globalRank", "tldRank", "tld"); EnrichPolicy policy = new EnrichPolicy(EnrichPolicy.MATCH_TYPE, null, List.of("source_index"), "host", enrichValues); - try (Client client = new NoOpClient(this.getClass().getSimpleName() + "TestClient")) { + try (var threadPool = createThreadPool()) { + final var client = new NoOpClient(threadPool); EnrichProcessorFactory factory = new EnrichProcessorFactory(client, scriptService, enrichCache); factory.metadata = createMetadata("majestic", policy); @@ -245,38 +247,38 @@ public void testCaching() throws Exception { enrichCache = new EnrichCache(100L); List enrichValues = List.of("globalRank", "tldRank", "tld"); EnrichPolicy policy = new EnrichPolicy(EnrichPolicy.MATCH_TYPE, null, List.of("source_index"), "host", enrichValues); - try (Client client = new NoOpClient(this.getClass().getSimpleName() + "testCaching") { - - @Override - @SuppressWarnings("unchecked") - protected void doExecute( - ActionType action, - Request request, - ActionListener listener - ) { - assert EnrichCoordinatorProxyAction.NAME.equals(action.name()); - var emptyResponse = new SearchResponse( - new InternalSearchResponse( - new SearchHits(new SearchHit[0], new TotalHits(0L, TotalHits.Relation.EQUAL_TO), 0.0f), - InternalAggregations.EMPTY, - new Suggest(Collections.emptyList()), - new SearchProfileResults(Collections.emptyMap()), - false, - false, - 1 - ), - "", - 1, - 1, - 0, - 0, - ShardSearchFailure.EMPTY_ARRAY, - SearchResponse.Clusters.EMPTY - ); - requestCounter[0]++; - listener.onResponse((Response) emptyResponse); - } - }) { + try (var threadPool = createThreadPool()) { + final var client = new NoOpClient(threadPool) { + @Override + @SuppressWarnings("unchecked") + protected void doExecute( + ActionType action, + Request request, + ActionListener listener + ) { + assert EnrichCoordinatorProxyAction.NAME.equals(action.name()); + var emptyResponse = new SearchResponse( + new InternalSearchResponse( + new SearchHits(new SearchHit[0], new TotalHits(0L, TotalHits.Relation.EQUAL_TO), 0.0f), + InternalAggregations.EMPTY, + new Suggest(Collections.emptyList()), + new SearchProfileResults(Collections.emptyMap()), + false, + false, + 1 + ), + "", + 1, + 1, + 0, + 0, + ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY + ); + requestCounter[0]++; + listener.onResponse((Response) emptyResponse); + } + }; EnrichProcessorFactory factory = new EnrichProcessorFactory(client, scriptService, enrichCache); factory.accept(ClusterState.builder(new ClusterName("_name")).metadata(createMetadata("majestic", policy)).build()); diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/AbstractRestEnterpriseSearchActionTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/AbstractRestEnterpriseSearchActionTests.java index 8591ec821271a..259beb008dd70 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/AbstractRestEnterpriseSearchActionTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/AbstractRestEnterpriseSearchActionTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.application; -import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; @@ -28,7 +27,8 @@ protected void checkLicenseForRequest(FakeRestRequest request, LicenseUtils.Prod final FakeRestChannel channel = new FakeRestChannel(request, true, 1); - try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName())) { + try (var threadPool = createThreadPool()) { + final var nodeClient = new NoOpNodeClient(threadPool); action.handleRequest(request, channel, nodeClient); } assertThat(channel.capturedResponse(), notNullValue()); diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/EnterpriseSearchBaseRestHandlerTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/EnterpriseSearchBaseRestHandlerTests.java index 7ff8dae594e8a..6cf176e21498e 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/EnterpriseSearchBaseRestHandlerTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/EnterpriseSearchBaseRestHandlerTests.java @@ -59,7 +59,8 @@ public List routes() { FakeRestRequest fakeRestRequest = new FakeRestRequest(); FakeRestChannel fakeRestChannel = new FakeRestChannel(fakeRestRequest, randomBoolean(), licensedFeature ? 0 : 1); - try (NodeClient client = new NoOpNodeClient(this.getTestName())) { + try (var threadPool = createThreadPool()) { + final var client = new NoOpNodeClient(threadPool); assertFalse(consumerCalled.get()); verifyNoMoreInteractions(licenseState); handler.handleRequest(fakeRestRequest, fakeRestChannel, client); diff --git a/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/RestEqlCancellationIT.java b/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/RestEqlCancellationIT.java index 31f2a4e178c91..6ae49ea7416bb 100644 --- a/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/RestEqlCancellationIT.java +++ b/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/RestEqlCancellationIT.java @@ -139,8 +139,4 @@ public void testRestCancellation() throws Exception { expectThrows(CancellationException.class, future::actionGet); } - @Override - protected boolean ignoreExternalCluster() { - return true; - } } diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java index 2df9452137ee4..663a0328a575b 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java @@ -34,6 +34,7 @@ import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.eql.EqlTestUtils; import org.elasticsearch.xpack.eql.analysis.PostAnalyzer; import org.elasticsearch.xpack.eql.analysis.PreAnalyzer; @@ -103,8 +104,9 @@ private void testMemoryCleared(boolean fail) { Collections.singletonList(EqlTestUtils.circuitBreakerSettings(Settings.EMPTY)), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) ); - ESMockClient esClient = new ESMockClient(service.getBreaker(CIRCUIT_BREAKER_NAME)); + var threadPool = createThreadPool() ) { + final var esClient = new ESMockClient(threadPool, service.getBreaker(CIRCUIT_BREAKER_NAME)); CircuitBreaker eqlCircuitBreaker = service.getBreaker(CIRCUIT_BREAKER_NAME); IndexResolver indexResolver = new IndexResolver(esClient, "cluster", DefaultDataTypeRegistry.INSTANCE, () -> emptySet()); EqlSession eqlSession = new EqlSession( @@ -190,8 +192,8 @@ private class ESMockClient extends NoOpClient { protected final CircuitBreaker circuitBreaker; private final String pitId = "test_pit_id"; - ESMockClient(CircuitBreaker circuitBreaker) { - super(getTestName()); + ESMockClient(ThreadPool threadPool, CircuitBreaker circuitBreaker) { + super(threadPool); this.circuitBreaker = circuitBreaker; } diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java index 51768096458f5..08d21de6d048a 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sequence/CircuitBreakerTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.breaker.NoopCircuitBreaker; @@ -50,6 +51,7 @@ import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.async.AsyncExecutionId; import org.elasticsearch.xpack.eql.action.EqlSearchAction; import org.elasticsearch.xpack.eql.action.EqlSearchTask; @@ -81,7 +83,6 @@ import java.util.Collections; import java.util.List; import java.util.concurrent.atomic.AtomicLong; -import java.util.function.BiFunction; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; @@ -201,7 +202,10 @@ public void testMemoryClearedOnShardsException() { assertMemoryCleared(stages, FailureESMockClient::new); } - private void assertMemoryCleared(int sequenceFiltersCount, BiFunction esClientSupplier) { + private void assertMemoryCleared( + int sequenceFiltersCount, + TriFunction esClientSupplier + ) { final int searchRequestsExpectedCount = 2; try ( CircuitBreakerService service = new HierarchyCircuitBreakerService( @@ -209,8 +213,9 @@ private void assertMemoryCleared(int sequenceFiltersCount, BiFunction criteria = buildCriteria(sequenceFiltersCount); @@ -245,8 +250,14 @@ public void testEqlCBCleanedUp_on_ParentCBBreak() { breakerSettings(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) ); - ESMockClient esClient = new SuccessfulESMockClient(service.getBreaker(CIRCUIT_BREAKER_NAME), searchRequestsExpectedCount); + var threadPool = createThreadPool() ) { + final var esClient = new SuccessfulESMockClient( + threadPool, + service.getBreaker(CIRCUIT_BREAKER_NAME), + searchRequestsExpectedCount + ); + CircuitBreaker eqlCircuitBreaker = service.getBreaker(CIRCUIT_BREAKER_NAME); QueryClient eqlClient = buildQueryClient(esClient, eqlCircuitBreaker); List criteria = buildCriteria(sequenceFiltersCount); @@ -359,8 +370,8 @@ private abstract class ESMockClient extends NoOpClient { private int searchRequestsRemainingCount; private final String pitId = "test_pit_id"; - ESMockClient(CircuitBreaker circuitBreaker, int searchRequestsRemainingCount) { - super(getTestName()); + ESMockClient(ThreadPool threadPool, CircuitBreaker circuitBreaker, int searchRequestsRemainingCount) { + super(threadPool); this.circuitBreaker = circuitBreaker; this.searchRequestsRemainingCount = searchRequestsRemainingCount; } @@ -404,8 +415,8 @@ int searchRequestsRemainingCount() { */ private class SuccessfulESMockClient extends ESMockClient { - SuccessfulESMockClient(CircuitBreaker circuitBreaker, int expectedSearchRequestsCount) { - super(circuitBreaker, expectedSearchRequestsCount); + SuccessfulESMockClient(ThreadPool threadPool, CircuitBreaker circuitBreaker, int expectedSearchRequestsCount) { + super(threadPool, circuitBreaker, expectedSearchRequestsCount); } @SuppressWarnings("unchecked") @@ -447,8 +458,8 @@ void handleSearchRequest(ActionListener keyExtractors = emptyList(); public void testHandlingPitFailure() { - try (ESMockClient esClient = new ESMockClient();) { + try (var threadPool = createThreadPool()) { + final var esClient = new ESMockClient(threadPool); EqlConfiguration eqlConfiguration = new EqlConfiguration( new String[] { "test" }, @@ -146,8 +148,8 @@ public void testHandlingPitFailure() { */ private class ESMockClient extends NoOpClient { - ESMockClient() { - super(getTestName()); + ESMockClient(ThreadPool threadPool) { + super(threadPool); } @SuppressWarnings("unchecked") diff --git a/x-pack/plugin/esql/compute/ann/src/main/java/org/elasticsearch/compute/ann/ConvertEvaluator.java b/x-pack/plugin/esql/compute/ann/src/main/java/org/elasticsearch/compute/ann/ConvertEvaluator.java index 69a015b8d5ae9..f09d876cfb90c 100644 --- a/x-pack/plugin/esql/compute/ann/src/main/java/org/elasticsearch/compute/ann/ConvertEvaluator.java +++ b/x-pack/plugin/esql/compute/ann/src/main/java/org/elasticsearch/compute/ann/ConvertEvaluator.java @@ -25,4 +25,9 @@ */ String extraName() default ""; + /** + * Exceptions thrown by the process method to catch and convert + * into a warning and turn into a null value. + */ + Class[] warnExceptions() default {}; } diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/ConvertEvaluatorImplementer.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/ConvertEvaluatorImplementer.java index 002806f11dd4b..f875cd7e6480e 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/ConvertEvaluatorImplementer.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/ConvertEvaluatorImplementer.java @@ -20,6 +20,7 @@ import javax.lang.model.element.ExecutableElement; import javax.lang.model.element.Modifier; import javax.lang.model.element.TypeElement; +import javax.lang.model.type.TypeMirror; import javax.lang.model.util.Elements; import static org.elasticsearch.compute.gen.Methods.appendMethod; @@ -45,8 +46,14 @@ public class ConvertEvaluatorImplementer { private final ClassName implementation; private final TypeName argumentType; private final TypeName resultType; - - public ConvertEvaluatorImplementer(Elements elements, ExecutableElement processFunction, String extraName) { + private final List warnExceptions; + + public ConvertEvaluatorImplementer( + Elements elements, + ExecutableElement processFunction, + String extraName, + List warnExceptions + ) { this.declarationType = (TypeElement) processFunction.getEnclosingElement(); this.processFunction = processFunction; if (processFunction.getParameters().size() != 1) { @@ -55,6 +62,7 @@ public ConvertEvaluatorImplementer(Elements elements, ExecutableElement processF this.extraName = extraName; this.argumentType = TypeName.get(processFunction.getParameters().get(0).asType()); this.resultType = TypeName.get(processFunction.getReturnType()); + this.warnExceptions = warnExceptions; this.implementation = ClassName.get( elements.getPackageOf(declarationType).toString(), @@ -113,29 +121,21 @@ private MethodSpec evalVector() { builder.addStatement("$T vector = ($T) v", vectorType, vectorType); builder.addStatement("int positionCount = v.getPositionCount()"); - String scratchPadName = null; + String scratchPadName = argumentType.equals(BYTES_REF) ? "scratchPad" : null; if (argumentType.equals(BYTES_REF)) { - scratchPadName = "scratchPad"; builder.addStatement("BytesRef $N = new BytesRef()", scratchPadName); } builder.beginControlFlow("if (vector.isConstant())"); { - builder.beginControlFlow("try"); - { + catchingWarnExceptions(builder, () -> { var constVectType = blockType(resultType); builder.addStatement( "return driverContext.blockFactory().newConstant$TWith($N, positionCount)", constVectType, evalValueCall("vector", "0", scratchPadName) ); - } - builder.nextControlFlow("catch (Exception e)"); - { - builder.addStatement("registerException(e)"); - builder.addStatement("return driverContext.blockFactory().newConstantNullBlock(positionCount)"); - } - builder.endControlFlow(); + }, () -> builder.addStatement("return driverContext.blockFactory().newConstantNullBlock(positionCount)")); } builder.endControlFlow(); @@ -148,16 +148,11 @@ private MethodSpec evalVector() { { builder.beginControlFlow("for (int p = 0; p < positionCount; p++)"); { - builder.beginControlFlow("try"); - { - builder.addStatement("builder.$L($N)", appendMethod(resultType), evalValueCall("vector", "p", scratchPadName)); - } - builder.nextControlFlow("catch (Exception e)"); - { - builder.addStatement("registerException(e)"); - builder.addStatement("builder.appendNull()"); - } - builder.endControlFlow(); + catchingWarnExceptions( + builder, + () -> builder.addStatement("builder.$L($N)", appendMethod(resultType), evalValueCall("vector", "p", scratchPadName)), + () -> builder.addStatement("builder.appendNull()") + ); } builder.endControlFlow(); builder.addStatement("return builder.build()"); @@ -167,6 +162,22 @@ private MethodSpec evalVector() { return builder.build(); } + private void catchingWarnExceptions(MethodSpec.Builder builder, Runnable whileCatching, Runnable ifCaught) { + if (warnExceptions.isEmpty()) { + whileCatching.run(); + return; + } + builder.beginControlFlow("try"); + whileCatching.run(); + builder.nextControlFlow( + "catch (" + warnExceptions.stream().map(m -> "$T").collect(Collectors.joining(" | ")) + " e)", + warnExceptions.stream().map(m -> TypeName.get(m)).toArray() + ); + builder.addStatement("registerException(e)"); + ifCaught.run(); + builder.endControlFlow(); + } + private MethodSpec evalBlock() { MethodSpec.Builder builder = MethodSpec.methodBuilder("evalBlock").addAnnotation(Override.class).addModifiers(Modifier.PUBLIC); builder.addParameter(BLOCK, "b").returns(BLOCK); @@ -180,9 +191,8 @@ private MethodSpec evalBlock() { resultBuilderType, buildFromFactory(resultBuilderType) ); - String scratchPadName = null; + String scratchPadName = argumentType.equals(BYTES_REF) ? "scratchPad" : null; if (argumentType.equals(BYTES_REF)) { - scratchPadName = "scratchPad"; builder.addStatement("BytesRef $N = new BytesRef()", scratchPadName); } @@ -197,8 +207,7 @@ private MethodSpec evalBlock() { // builder.addStatement("builder.beginPositionEntry()"); builder.beginControlFlow("for (int i = start; i < end; i++)"); { - builder.beginControlFlow("try"); - { + catchingWarnExceptions(builder, () -> { builder.addStatement("$T value = $N", resultType, evalValueCall("block", "i", scratchPadName)); builder.beginControlFlow("if (positionOpened == false && valueCount > 1)"); { @@ -208,12 +217,7 @@ private MethodSpec evalBlock() { builder.endControlFlow(); builder.addStatement("builder.$N(value)", appendMethod); builder.addStatement("valuesAppended = true"); - } - builder.nextControlFlow("catch (Exception e)"); - { - builder.addStatement("registerException(e)"); - } - builder.endControlFlow(); + }, () -> {}); } builder.endControlFlow(); builder.beginControlFlow("if (valuesAppended == false)"); diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/EvaluatorProcessor.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/EvaluatorProcessor.java index a6668795c592a..ea3ee938298de 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/EvaluatorProcessor.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/EvaluatorProcessor.java @@ -120,7 +120,8 @@ public boolean process(Set set, RoundEnvironment roundEnv new ConvertEvaluatorImplementer( env.getElementUtils(), (ExecutableElement) evaluatorMethod, - convertEvaluatorAnn.extraName() + convertEvaluatorAnn.extraName(), + warnExceptions(evaluatorMethod) ).sourceFile(), env ); @@ -138,7 +139,10 @@ private static List warnExceptions(Element evaluatorMethod) { List result = new ArrayList<>(); for (var mirror : evaluatorMethod.getAnnotationMirrors()) { String annotationType = mirror.getAnnotationType().toString(); - if (annotationType.equals(Evaluator.class.getName()) || annotationType.equals(MvEvaluator.class.getName())) { + if (annotationType.equals(Evaluator.class.getName()) + || annotationType.equals(MvEvaluator.class.getName()) + || annotationType.equals(ConvertEvaluator.class.getName())) { + for (var e : mirror.getElementValues().entrySet()) { if (false == e.getKey().getSimpleName().toString().equals("warnExceptions")) { continue; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec index 02d530a2ae835..f2052462f4d8b 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec @@ -278,7 +278,7 @@ docsWhereFunction // tag::whereFunction[] FROM employees | KEEP first_name, last_name, height -| WHERE length(first_name) < 4 +| WHERE LENGTH(first_name) < 4 // end::whereFunction[] | SORT first_name ; @@ -431,7 +431,7 @@ Hello Universe docsCase // tag::case[] FROM employees -| EVAL type = case( +| EVAL type = CASE( languages <= 1, "monolingual", languages <= 2, "bilingual", "polyglot") diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec index cdc25587793cc..9485bf800dd18 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec @@ -362,7 +362,7 @@ autoBucket // tag::auto_bucket[] FROM employees | WHERE hire_date >= "1985-01-01T00:00:00Z" AND hire_date < "1986-01-01T00:00:00Z" -| EVAL bs = auto_bucket(salary, 20, 25324, 74999) +| EVAL bs = AUTO_BUCKET(salary, 20, 25324, 74999) | SORT hire_date, salary | KEEP hire_date, salary, bs // end::auto_bucket[] diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/null.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/null.csv-spec index 3aa2746266da6..0d7fed9028fe4 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/null.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/null.csv-spec @@ -46,12 +46,12 @@ isNotNullForDocs // tag::is-not-null[] FROM employees | WHERE is_rehired IS NOT NULL -| STATS count(emp_no) +| STATS COUNT(emp_no) // end::is-not-null[] ; // tag::is-not-null-result[] -count(emp_no):long +COUNT(emp_no):long 84 // end::is-not-null-result[] ; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec index 117bb9646bc5d..0b45f9ac5aea4 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec @@ -184,10 +184,16 @@ synopsis:keyword showFunctionsFiltered -show functions | where starts_with(name, "is_"); +// tag::showFunctionsFiltered[] +SHOW functions +| WHERE STARTS_WITH(name, "is_") +// end::showFunctionsFiltered[] +; +// tag::showFunctionsFiltered-result[] name:keyword | synopsis:keyword | argNames:keyword | argTypes:keyword | argDescriptions:keyword | returnType:keyword | description:keyword | optionalArgs:boolean | variadic:boolean is_finite |? is_finite(arg1:?) |arg1 |? | "" |? | "" | false | false is_infinite |? is_infinite(arg1:?) |arg1 |? | "" |? | "" | false | false is_nan |? is_nan(arg1:?) |arg1 |? | "" |? | "" | false | false +// end::showFunctionsFiltered-result[] ; diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBooleanFromDoubleEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBooleanFromDoubleEvaluator.java index 4463883b611d4..c64568251feec 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBooleanFromDoubleEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBooleanFromDoubleEvaluator.java @@ -35,21 +35,11 @@ public Block evalVector(Vector v) { DoubleVector vector = (DoubleVector) v; int positionCount = v.getPositionCount(); if (vector.isConstant()) { - try { - return driverContext.blockFactory().newConstantBooleanBlockWith(evalValue(vector, 0), positionCount); - } catch (Exception e) { - registerException(e); - return driverContext.blockFactory().newConstantNullBlock(positionCount); - } + return driverContext.blockFactory().newConstantBooleanBlockWith(evalValue(vector, 0), positionCount); } try (BooleanBlock.Builder builder = driverContext.blockFactory().newBooleanBlockBuilder(positionCount)) { for (int p = 0; p < positionCount; p++) { - try { - builder.appendBoolean(evalValue(vector, p)); - } catch (Exception e) { - registerException(e); - builder.appendNull(); - } + builder.appendBoolean(evalValue(vector, p)); } return builder.build(); } @@ -72,17 +62,13 @@ public Block evalBlock(Block b) { boolean positionOpened = false; boolean valuesAppended = false; for (int i = start; i < end; i++) { - try { - boolean value = evalValue(block, i); - if (positionOpened == false && valueCount > 1) { - builder.beginPositionEntry(); - positionOpened = true; - } - builder.appendBoolean(value); - valuesAppended = true; - } catch (Exception e) { - registerException(e); + boolean value = evalValue(block, i); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; } + builder.appendBoolean(value); + valuesAppended = true; } if (valuesAppended == false) { builder.appendNull(); diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBooleanFromIntEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBooleanFromIntEvaluator.java index 22c07e6e10f21..daac34639c66a 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBooleanFromIntEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBooleanFromIntEvaluator.java @@ -35,21 +35,11 @@ public Block evalVector(Vector v) { IntVector vector = (IntVector) v; int positionCount = v.getPositionCount(); if (vector.isConstant()) { - try { - return driverContext.blockFactory().newConstantBooleanBlockWith(evalValue(vector, 0), positionCount); - } catch (Exception e) { - registerException(e); - return driverContext.blockFactory().newConstantNullBlock(positionCount); - } + return driverContext.blockFactory().newConstantBooleanBlockWith(evalValue(vector, 0), positionCount); } try (BooleanBlock.Builder builder = driverContext.blockFactory().newBooleanBlockBuilder(positionCount)) { for (int p = 0; p < positionCount; p++) { - try { - builder.appendBoolean(evalValue(vector, p)); - } catch (Exception e) { - registerException(e); - builder.appendNull(); - } + builder.appendBoolean(evalValue(vector, p)); } return builder.build(); } @@ -72,17 +62,13 @@ public Block evalBlock(Block b) { boolean positionOpened = false; boolean valuesAppended = false; for (int i = start; i < end; i++) { - try { - boolean value = evalValue(block, i); - if (positionOpened == false && valueCount > 1) { - builder.beginPositionEntry(); - positionOpened = true; - } - builder.appendBoolean(value); - valuesAppended = true; - } catch (Exception e) { - registerException(e); + boolean value = evalValue(block, i); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; } + builder.appendBoolean(value); + valuesAppended = true; } if (valuesAppended == false) { builder.appendNull(); diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBooleanFromLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBooleanFromLongEvaluator.java index c9264d1ec5017..1e6b2aefce9f3 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBooleanFromLongEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBooleanFromLongEvaluator.java @@ -35,21 +35,11 @@ public Block evalVector(Vector v) { LongVector vector = (LongVector) v; int positionCount = v.getPositionCount(); if (vector.isConstant()) { - try { - return driverContext.blockFactory().newConstantBooleanBlockWith(evalValue(vector, 0), positionCount); - } catch (Exception e) { - registerException(e); - return driverContext.blockFactory().newConstantNullBlock(positionCount); - } + return driverContext.blockFactory().newConstantBooleanBlockWith(evalValue(vector, 0), positionCount); } try (BooleanBlock.Builder builder = driverContext.blockFactory().newBooleanBlockBuilder(positionCount)) { for (int p = 0; p < positionCount; p++) { - try { - builder.appendBoolean(evalValue(vector, p)); - } catch (Exception e) { - registerException(e); - builder.appendNull(); - } + builder.appendBoolean(evalValue(vector, p)); } return builder.build(); } @@ -72,17 +62,13 @@ public Block evalBlock(Block b) { boolean positionOpened = false; boolean valuesAppended = false; for (int i = start; i < end; i++) { - try { - boolean value = evalValue(block, i); - if (positionOpened == false && valueCount > 1) { - builder.beginPositionEntry(); - positionOpened = true; - } - builder.appendBoolean(value); - valuesAppended = true; - } catch (Exception e) { - registerException(e); + boolean value = evalValue(block, i); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; } + builder.appendBoolean(value); + valuesAppended = true; } if (valuesAppended == false) { builder.appendNull(); diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBooleanFromStringEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBooleanFromStringEvaluator.java index 5e68501d13418..ce573a3b8d2d3 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBooleanFromStringEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBooleanFromStringEvaluator.java @@ -37,21 +37,11 @@ public Block evalVector(Vector v) { int positionCount = v.getPositionCount(); BytesRef scratchPad = new BytesRef(); if (vector.isConstant()) { - try { - return driverContext.blockFactory().newConstantBooleanBlockWith(evalValue(vector, 0, scratchPad), positionCount); - } catch (Exception e) { - registerException(e); - return driverContext.blockFactory().newConstantNullBlock(positionCount); - } + return driverContext.blockFactory().newConstantBooleanBlockWith(evalValue(vector, 0, scratchPad), positionCount); } try (BooleanBlock.Builder builder = driverContext.blockFactory().newBooleanBlockBuilder(positionCount)) { for (int p = 0; p < positionCount; p++) { - try { - builder.appendBoolean(evalValue(vector, p, scratchPad)); - } catch (Exception e) { - registerException(e); - builder.appendNull(); - } + builder.appendBoolean(evalValue(vector, p, scratchPad)); } return builder.build(); } @@ -75,17 +65,13 @@ public Block evalBlock(Block b) { boolean positionOpened = false; boolean valuesAppended = false; for (int i = start; i < end; i++) { - try { - boolean value = evalValue(block, i, scratchPad); - if (positionOpened == false && valueCount > 1) { - builder.beginPositionEntry(); - positionOpened = true; - } - builder.appendBoolean(value); - valuesAppended = true; - } catch (Exception e) { - registerException(e); + boolean value = evalValue(block, i, scratchPad); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; } + builder.appendBoolean(value); + valuesAppended = true; } if (valuesAppended == false) { builder.appendNull(); diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBooleanFromUnsignedLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBooleanFromUnsignedLongEvaluator.java index 2924994f894b9..5ec75f10c2ecb 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBooleanFromUnsignedLongEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToBooleanFromUnsignedLongEvaluator.java @@ -35,21 +35,11 @@ public Block evalVector(Vector v) { LongVector vector = (LongVector) v; int positionCount = v.getPositionCount(); if (vector.isConstant()) { - try { - return driverContext.blockFactory().newConstantBooleanBlockWith(evalValue(vector, 0), positionCount); - } catch (Exception e) { - registerException(e); - return driverContext.blockFactory().newConstantNullBlock(positionCount); - } + return driverContext.blockFactory().newConstantBooleanBlockWith(evalValue(vector, 0), positionCount); } try (BooleanBlock.Builder builder = driverContext.blockFactory().newBooleanBlockBuilder(positionCount)) { for (int p = 0; p < positionCount; p++) { - try { - builder.appendBoolean(evalValue(vector, p)); - } catch (Exception e) { - registerException(e); - builder.appendNull(); - } + builder.appendBoolean(evalValue(vector, p)); } return builder.build(); } @@ -72,17 +62,13 @@ public Block evalBlock(Block b) { boolean positionOpened = false; boolean valuesAppended = false; for (int i = start; i < end; i++) { - try { - boolean value = evalValue(block, i); - if (positionOpened == false && valueCount > 1) { - builder.beginPositionEntry(); - positionOpened = true; - } - builder.appendBoolean(value); - valuesAppended = true; - } catch (Exception e) { - registerException(e); + boolean value = evalValue(block, i); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; } + builder.appendBoolean(value); + valuesAppended = true; } if (valuesAppended == false) { builder.appendNull(); diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetimeFromStringEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetimeFromStringEvaluator.java index fb663b0f20b4d..b868fe9b950c8 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetimeFromStringEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetimeFromStringEvaluator.java @@ -4,6 +4,7 @@ // 2.0. package org.elasticsearch.xpack.esql.expression.function.scalar.convert; +import java.lang.IllegalArgumentException; import java.lang.Override; import java.lang.String; import org.apache.lucene.util.BytesRef; @@ -39,7 +40,7 @@ public Block evalVector(Vector v) { if (vector.isConstant()) { try { return driverContext.blockFactory().newConstantLongBlockWith(evalValue(vector, 0, scratchPad), positionCount); - } catch (Exception e) { + } catch (IllegalArgumentException e) { registerException(e); return driverContext.blockFactory().newConstantNullBlock(positionCount); } @@ -48,7 +49,7 @@ public Block evalVector(Vector v) { for (int p = 0; p < positionCount; p++) { try { builder.appendLong(evalValue(vector, p, scratchPad)); - } catch (Exception e) { + } catch (IllegalArgumentException e) { registerException(e); builder.appendNull(); } @@ -83,7 +84,7 @@ public Block evalBlock(Block b) { } builder.appendLong(value); valuesAppended = true; - } catch (Exception e) { + } catch (IllegalArgumentException e) { registerException(e); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDegreesEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDegreesEvaluator.java index 8185f2a6e89b9..bdf1fd8616559 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDegreesEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDegreesEvaluator.java @@ -34,21 +34,11 @@ public Block evalVector(Vector v) { DoubleVector vector = (DoubleVector) v; int positionCount = v.getPositionCount(); if (vector.isConstant()) { - try { - return driverContext.blockFactory().newConstantDoubleBlockWith(evalValue(vector, 0), positionCount); - } catch (Exception e) { - registerException(e); - return driverContext.blockFactory().newConstantNullBlock(positionCount); - } + return driverContext.blockFactory().newConstantDoubleBlockWith(evalValue(vector, 0), positionCount); } try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { for (int p = 0; p < positionCount; p++) { - try { - builder.appendDouble(evalValue(vector, p)); - } catch (Exception e) { - registerException(e); - builder.appendNull(); - } + builder.appendDouble(evalValue(vector, p)); } return builder.build(); } @@ -71,17 +61,13 @@ public Block evalBlock(Block b) { boolean positionOpened = false; boolean valuesAppended = false; for (int i = start; i < end; i++) { - try { - double value = evalValue(block, i); - if (positionOpened == false && valueCount > 1) { - builder.beginPositionEntry(); - positionOpened = true; - } - builder.appendDouble(value); - valuesAppended = true; - } catch (Exception e) { - registerException(e); + double value = evalValue(block, i); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; } + builder.appendDouble(value); + valuesAppended = true; } if (valuesAppended == false) { builder.appendNull(); diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleFromBooleanEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleFromBooleanEvaluator.java index 163cd82c5943d..c831e1b0a314a 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleFromBooleanEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleFromBooleanEvaluator.java @@ -35,21 +35,11 @@ public Block evalVector(Vector v) { BooleanVector vector = (BooleanVector) v; int positionCount = v.getPositionCount(); if (vector.isConstant()) { - try { - return driverContext.blockFactory().newConstantDoubleBlockWith(evalValue(vector, 0), positionCount); - } catch (Exception e) { - registerException(e); - return driverContext.blockFactory().newConstantNullBlock(positionCount); - } + return driverContext.blockFactory().newConstantDoubleBlockWith(evalValue(vector, 0), positionCount); } try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { for (int p = 0; p < positionCount; p++) { - try { - builder.appendDouble(evalValue(vector, p)); - } catch (Exception e) { - registerException(e); - builder.appendNull(); - } + builder.appendDouble(evalValue(vector, p)); } return builder.build(); } @@ -72,17 +62,13 @@ public Block evalBlock(Block b) { boolean positionOpened = false; boolean valuesAppended = false; for (int i = start; i < end; i++) { - try { - double value = evalValue(block, i); - if (positionOpened == false && valueCount > 1) { - builder.beginPositionEntry(); - positionOpened = true; - } - builder.appendDouble(value); - valuesAppended = true; - } catch (Exception e) { - registerException(e); + double value = evalValue(block, i); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; } + builder.appendDouble(value); + valuesAppended = true; } if (valuesAppended == false) { builder.appendNull(); diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleFromIntEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleFromIntEvaluator.java index 4493b2ffabe50..ef1081f4ebd6a 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleFromIntEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleFromIntEvaluator.java @@ -35,21 +35,11 @@ public Block evalVector(Vector v) { IntVector vector = (IntVector) v; int positionCount = v.getPositionCount(); if (vector.isConstant()) { - try { - return driverContext.blockFactory().newConstantDoubleBlockWith(evalValue(vector, 0), positionCount); - } catch (Exception e) { - registerException(e); - return driverContext.blockFactory().newConstantNullBlock(positionCount); - } + return driverContext.blockFactory().newConstantDoubleBlockWith(evalValue(vector, 0), positionCount); } try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { for (int p = 0; p < positionCount; p++) { - try { - builder.appendDouble(evalValue(vector, p)); - } catch (Exception e) { - registerException(e); - builder.appendNull(); - } + builder.appendDouble(evalValue(vector, p)); } return builder.build(); } @@ -72,17 +62,13 @@ public Block evalBlock(Block b) { boolean positionOpened = false; boolean valuesAppended = false; for (int i = start; i < end; i++) { - try { - double value = evalValue(block, i); - if (positionOpened == false && valueCount > 1) { - builder.beginPositionEntry(); - positionOpened = true; - } - builder.appendDouble(value); - valuesAppended = true; - } catch (Exception e) { - registerException(e); + double value = evalValue(block, i); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; } + builder.appendDouble(value); + valuesAppended = true; } if (valuesAppended == false) { builder.appendNull(); diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleFromLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleFromLongEvaluator.java index d8d6a6499a623..fc78d9cfebc01 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleFromLongEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleFromLongEvaluator.java @@ -35,21 +35,11 @@ public Block evalVector(Vector v) { LongVector vector = (LongVector) v; int positionCount = v.getPositionCount(); if (vector.isConstant()) { - try { - return driverContext.blockFactory().newConstantDoubleBlockWith(evalValue(vector, 0), positionCount); - } catch (Exception e) { - registerException(e); - return driverContext.blockFactory().newConstantNullBlock(positionCount); - } + return driverContext.blockFactory().newConstantDoubleBlockWith(evalValue(vector, 0), positionCount); } try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { for (int p = 0; p < positionCount; p++) { - try { - builder.appendDouble(evalValue(vector, p)); - } catch (Exception e) { - registerException(e); - builder.appendNull(); - } + builder.appendDouble(evalValue(vector, p)); } return builder.build(); } @@ -72,17 +62,13 @@ public Block evalBlock(Block b) { boolean positionOpened = false; boolean valuesAppended = false; for (int i = start; i < end; i++) { - try { - double value = evalValue(block, i); - if (positionOpened == false && valueCount > 1) { - builder.beginPositionEntry(); - positionOpened = true; - } - builder.appendDouble(value); - valuesAppended = true; - } catch (Exception e) { - registerException(e); + double value = evalValue(block, i); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; } + builder.appendDouble(value); + valuesAppended = true; } if (valuesAppended == false) { builder.appendNull(); diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleFromStringEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleFromStringEvaluator.java index 39e8d505bb4d7..b1fc80b9260ad 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleFromStringEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleFromStringEvaluator.java @@ -4,6 +4,7 @@ // 2.0. package org.elasticsearch.xpack.esql.expression.function.scalar.convert; +import java.lang.NumberFormatException; import java.lang.Override; import java.lang.String; import org.apache.lucene.util.BytesRef; @@ -39,7 +40,7 @@ public Block evalVector(Vector v) { if (vector.isConstant()) { try { return driverContext.blockFactory().newConstantDoubleBlockWith(evalValue(vector, 0, scratchPad), positionCount); - } catch (Exception e) { + } catch (NumberFormatException e) { registerException(e); return driverContext.blockFactory().newConstantNullBlock(positionCount); } @@ -48,7 +49,7 @@ public Block evalVector(Vector v) { for (int p = 0; p < positionCount; p++) { try { builder.appendDouble(evalValue(vector, p, scratchPad)); - } catch (Exception e) { + } catch (NumberFormatException e) { registerException(e); builder.appendNull(); } @@ -83,7 +84,7 @@ public Block evalBlock(Block b) { } builder.appendDouble(value); valuesAppended = true; - } catch (Exception e) { + } catch (NumberFormatException e) { registerException(e); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleFromUnsignedLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleFromUnsignedLongEvaluator.java index a5ff443f7e7f6..b2e4e5137543a 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleFromUnsignedLongEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDoubleFromUnsignedLongEvaluator.java @@ -35,21 +35,11 @@ public Block evalVector(Vector v) { LongVector vector = (LongVector) v; int positionCount = v.getPositionCount(); if (vector.isConstant()) { - try { - return driverContext.blockFactory().newConstantDoubleBlockWith(evalValue(vector, 0), positionCount); - } catch (Exception e) { - registerException(e); - return driverContext.blockFactory().newConstantNullBlock(positionCount); - } + return driverContext.blockFactory().newConstantDoubleBlockWith(evalValue(vector, 0), positionCount); } try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { for (int p = 0; p < positionCount; p++) { - try { - builder.appendDouble(evalValue(vector, p)); - } catch (Exception e) { - registerException(e); - builder.appendNull(); - } + builder.appendDouble(evalValue(vector, p)); } return builder.build(); } @@ -72,17 +62,13 @@ public Block evalBlock(Block b) { boolean positionOpened = false; boolean valuesAppended = false; for (int i = start; i < end; i++) { - try { - double value = evalValue(block, i); - if (positionOpened == false && valueCount > 1) { - builder.beginPositionEntry(); - positionOpened = true; - } - builder.appendDouble(value); - valuesAppended = true; - } catch (Exception e) { - registerException(e); + double value = evalValue(block, i); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; } + builder.appendDouble(value); + valuesAppended = true; } if (valuesAppended == false) { builder.appendNull(); diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIPFromStringEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIPFromStringEvaluator.java index 8ed666bacc190..bd6e883a6e89e 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIPFromStringEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIPFromStringEvaluator.java @@ -4,6 +4,7 @@ // 2.0. package org.elasticsearch.xpack.esql.expression.function.scalar.convert; +import java.lang.IllegalArgumentException; import java.lang.Override; import java.lang.String; import org.apache.lucene.util.BytesRef; @@ -38,7 +39,7 @@ public Block evalVector(Vector v) { if (vector.isConstant()) { try { return driverContext.blockFactory().newConstantBytesRefBlockWith(evalValue(vector, 0, scratchPad), positionCount); - } catch (Exception e) { + } catch (IllegalArgumentException e) { registerException(e); return driverContext.blockFactory().newConstantNullBlock(positionCount); } @@ -47,7 +48,7 @@ public Block evalVector(Vector v) { for (int p = 0; p < positionCount; p++) { try { builder.appendBytesRef(evalValue(vector, p, scratchPad)); - } catch (Exception e) { + } catch (IllegalArgumentException e) { registerException(e); builder.appendNull(); } @@ -82,7 +83,7 @@ public Block evalBlock(Block b) { } builder.appendBytesRef(value); valuesAppended = true; - } catch (Exception e) { + } catch (IllegalArgumentException e) { registerException(e); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerFromBooleanEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerFromBooleanEvaluator.java index b4e36bd28a51b..f778deb32865f 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerFromBooleanEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerFromBooleanEvaluator.java @@ -35,21 +35,11 @@ public Block evalVector(Vector v) { BooleanVector vector = (BooleanVector) v; int positionCount = v.getPositionCount(); if (vector.isConstant()) { - try { - return driverContext.blockFactory().newConstantIntBlockWith(evalValue(vector, 0), positionCount); - } catch (Exception e) { - registerException(e); - return driverContext.blockFactory().newConstantNullBlock(positionCount); - } + return driverContext.blockFactory().newConstantIntBlockWith(evalValue(vector, 0), positionCount); } try (IntBlock.Builder builder = driverContext.blockFactory().newIntBlockBuilder(positionCount)) { for (int p = 0; p < positionCount; p++) { - try { - builder.appendInt(evalValue(vector, p)); - } catch (Exception e) { - registerException(e); - builder.appendNull(); - } + builder.appendInt(evalValue(vector, p)); } return builder.build(); } @@ -72,17 +62,13 @@ public Block evalBlock(Block b) { boolean positionOpened = false; boolean valuesAppended = false; for (int i = start; i < end; i++) { - try { - int value = evalValue(block, i); - if (positionOpened == false && valueCount > 1) { - builder.beginPositionEntry(); - positionOpened = true; - } - builder.appendInt(value); - valuesAppended = true; - } catch (Exception e) { - registerException(e); + int value = evalValue(block, i); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; } + builder.appendInt(value); + valuesAppended = true; } if (valuesAppended == false) { builder.appendNull(); diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerFromDoubleEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerFromDoubleEvaluator.java index 10820c83ac0c8..b7ff410d07c15 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerFromDoubleEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerFromDoubleEvaluator.java @@ -13,6 +13,8 @@ import org.elasticsearch.compute.data.Vector; import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.ql.InvalidArgumentException; +import org.elasticsearch.xpack.ql.QlIllegalArgumentException; import org.elasticsearch.xpack.ql.tree.Source; /** @@ -37,7 +39,7 @@ public Block evalVector(Vector v) { if (vector.isConstant()) { try { return driverContext.blockFactory().newConstantIntBlockWith(evalValue(vector, 0), positionCount); - } catch (Exception e) { + } catch (InvalidArgumentException | QlIllegalArgumentException e) { registerException(e); return driverContext.blockFactory().newConstantNullBlock(positionCount); } @@ -46,7 +48,7 @@ public Block evalVector(Vector v) { for (int p = 0; p < positionCount; p++) { try { builder.appendInt(evalValue(vector, p)); - } catch (Exception e) { + } catch (InvalidArgumentException | QlIllegalArgumentException e) { registerException(e); builder.appendNull(); } @@ -80,7 +82,7 @@ public Block evalBlock(Block b) { } builder.appendInt(value); valuesAppended = true; - } catch (Exception e) { + } catch (InvalidArgumentException | QlIllegalArgumentException e) { registerException(e); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerFromLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerFromLongEvaluator.java index bb713a5e7f483..742b057c06799 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerFromLongEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerFromLongEvaluator.java @@ -13,6 +13,8 @@ import org.elasticsearch.compute.data.Vector; import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.ql.InvalidArgumentException; +import org.elasticsearch.xpack.ql.QlIllegalArgumentException; import org.elasticsearch.xpack.ql.tree.Source; /** @@ -37,7 +39,7 @@ public Block evalVector(Vector v) { if (vector.isConstant()) { try { return driverContext.blockFactory().newConstantIntBlockWith(evalValue(vector, 0), positionCount); - } catch (Exception e) { + } catch (InvalidArgumentException | QlIllegalArgumentException e) { registerException(e); return driverContext.blockFactory().newConstantNullBlock(positionCount); } @@ -46,7 +48,7 @@ public Block evalVector(Vector v) { for (int p = 0; p < positionCount; p++) { try { builder.appendInt(evalValue(vector, p)); - } catch (Exception e) { + } catch (InvalidArgumentException | QlIllegalArgumentException e) { registerException(e); builder.appendNull(); } @@ -80,7 +82,7 @@ public Block evalBlock(Block b) { } builder.appendInt(value); valuesAppended = true; - } catch (Exception e) { + } catch (InvalidArgumentException | QlIllegalArgumentException e) { registerException(e); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerFromStringEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerFromStringEvaluator.java index d6d577711db4e..bff4d46b09dff 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerFromStringEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerFromStringEvaluator.java @@ -4,6 +4,7 @@ // 2.0. package org.elasticsearch.xpack.esql.expression.function.scalar.convert; +import java.lang.NumberFormatException; import java.lang.Override; import java.lang.String; import org.apache.lucene.util.BytesRef; @@ -39,7 +40,7 @@ public Block evalVector(Vector v) { if (vector.isConstant()) { try { return driverContext.blockFactory().newConstantIntBlockWith(evalValue(vector, 0, scratchPad), positionCount); - } catch (Exception e) { + } catch (NumberFormatException e) { registerException(e); return driverContext.blockFactory().newConstantNullBlock(positionCount); } @@ -48,7 +49,7 @@ public Block evalVector(Vector v) { for (int p = 0; p < positionCount; p++) { try { builder.appendInt(evalValue(vector, p, scratchPad)); - } catch (Exception e) { + } catch (NumberFormatException e) { registerException(e); builder.appendNull(); } @@ -83,7 +84,7 @@ public Block evalBlock(Block b) { } builder.appendInt(value); valuesAppended = true; - } catch (Exception e) { + } catch (NumberFormatException e) { registerException(e); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerFromUnsignedLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerFromUnsignedLongEvaluator.java index c4f26c73c352b..ccd1edc4aa6c2 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerFromUnsignedLongEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIntegerFromUnsignedLongEvaluator.java @@ -13,6 +13,8 @@ import org.elasticsearch.compute.data.Vector; import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.ql.InvalidArgumentException; +import org.elasticsearch.xpack.ql.QlIllegalArgumentException; import org.elasticsearch.xpack.ql.tree.Source; /** @@ -37,7 +39,7 @@ public Block evalVector(Vector v) { if (vector.isConstant()) { try { return driverContext.blockFactory().newConstantIntBlockWith(evalValue(vector, 0), positionCount); - } catch (Exception e) { + } catch (InvalidArgumentException | QlIllegalArgumentException e) { registerException(e); return driverContext.blockFactory().newConstantNullBlock(positionCount); } @@ -46,7 +48,7 @@ public Block evalVector(Vector v) { for (int p = 0; p < positionCount; p++) { try { builder.appendInt(evalValue(vector, p)); - } catch (Exception e) { + } catch (InvalidArgumentException | QlIllegalArgumentException e) { registerException(e); builder.appendNull(); } @@ -80,7 +82,7 @@ public Block evalBlock(Block b) { } builder.appendInt(value); valuesAppended = true; - } catch (Exception e) { + } catch (InvalidArgumentException | QlIllegalArgumentException e) { registerException(e); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongFromBooleanEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongFromBooleanEvaluator.java index 54cc1234ad737..7d6c145405e56 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongFromBooleanEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongFromBooleanEvaluator.java @@ -35,21 +35,11 @@ public Block evalVector(Vector v) { BooleanVector vector = (BooleanVector) v; int positionCount = v.getPositionCount(); if (vector.isConstant()) { - try { - return driverContext.blockFactory().newConstantLongBlockWith(evalValue(vector, 0), positionCount); - } catch (Exception e) { - registerException(e); - return driverContext.blockFactory().newConstantNullBlock(positionCount); - } + return driverContext.blockFactory().newConstantLongBlockWith(evalValue(vector, 0), positionCount); } try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { for (int p = 0; p < positionCount; p++) { - try { - builder.appendLong(evalValue(vector, p)); - } catch (Exception e) { - registerException(e); - builder.appendNull(); - } + builder.appendLong(evalValue(vector, p)); } return builder.build(); } @@ -72,17 +62,13 @@ public Block evalBlock(Block b) { boolean positionOpened = false; boolean valuesAppended = false; for (int i = start; i < end; i++) { - try { - long value = evalValue(block, i); - if (positionOpened == false && valueCount > 1) { - builder.beginPositionEntry(); - positionOpened = true; - } - builder.appendLong(value); - valuesAppended = true; - } catch (Exception e) { - registerException(e); + long value = evalValue(block, i); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; } + builder.appendLong(value); + valuesAppended = true; } if (valuesAppended == false) { builder.appendNull(); diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongFromDoubleEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongFromDoubleEvaluator.java index a78267bc8b4b0..b8b86f1d6cbf1 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongFromDoubleEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongFromDoubleEvaluator.java @@ -13,6 +13,8 @@ import org.elasticsearch.compute.data.Vector; import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.ql.InvalidArgumentException; +import org.elasticsearch.xpack.ql.QlIllegalArgumentException; import org.elasticsearch.xpack.ql.tree.Source; /** @@ -37,7 +39,7 @@ public Block evalVector(Vector v) { if (vector.isConstant()) { try { return driverContext.blockFactory().newConstantLongBlockWith(evalValue(vector, 0), positionCount); - } catch (Exception e) { + } catch (InvalidArgumentException | QlIllegalArgumentException e) { registerException(e); return driverContext.blockFactory().newConstantNullBlock(positionCount); } @@ -46,7 +48,7 @@ public Block evalVector(Vector v) { for (int p = 0; p < positionCount; p++) { try { builder.appendLong(evalValue(vector, p)); - } catch (Exception e) { + } catch (InvalidArgumentException | QlIllegalArgumentException e) { registerException(e); builder.appendNull(); } @@ -80,7 +82,7 @@ public Block evalBlock(Block b) { } builder.appendLong(value); valuesAppended = true; - } catch (Exception e) { + } catch (InvalidArgumentException | QlIllegalArgumentException e) { registerException(e); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongFromIntEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongFromIntEvaluator.java index 0009214b8ee88..dc3a9578ffd9b 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongFromIntEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongFromIntEvaluator.java @@ -35,21 +35,11 @@ public Block evalVector(Vector v) { IntVector vector = (IntVector) v; int positionCount = v.getPositionCount(); if (vector.isConstant()) { - try { - return driverContext.blockFactory().newConstantLongBlockWith(evalValue(vector, 0), positionCount); - } catch (Exception e) { - registerException(e); - return driverContext.blockFactory().newConstantNullBlock(positionCount); - } + return driverContext.blockFactory().newConstantLongBlockWith(evalValue(vector, 0), positionCount); } try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { for (int p = 0; p < positionCount; p++) { - try { - builder.appendLong(evalValue(vector, p)); - } catch (Exception e) { - registerException(e); - builder.appendNull(); - } + builder.appendLong(evalValue(vector, p)); } return builder.build(); } @@ -72,17 +62,13 @@ public Block evalBlock(Block b) { boolean positionOpened = false; boolean valuesAppended = false; for (int i = start; i < end; i++) { - try { - long value = evalValue(block, i); - if (positionOpened == false && valueCount > 1) { - builder.beginPositionEntry(); - positionOpened = true; - } - builder.appendLong(value); - valuesAppended = true; - } catch (Exception e) { - registerException(e); + long value = evalValue(block, i); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; } + builder.appendLong(value); + valuesAppended = true; } if (valuesAppended == false) { builder.appendNull(); diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongFromStringEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongFromStringEvaluator.java index b7007d88a4995..e0eca6b6bcbff 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongFromStringEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongFromStringEvaluator.java @@ -4,6 +4,7 @@ // 2.0. package org.elasticsearch.xpack.esql.expression.function.scalar.convert; +import java.lang.NumberFormatException; import java.lang.Override; import java.lang.String; import org.apache.lucene.util.BytesRef; @@ -39,7 +40,7 @@ public Block evalVector(Vector v) { if (vector.isConstant()) { try { return driverContext.blockFactory().newConstantLongBlockWith(evalValue(vector, 0, scratchPad), positionCount); - } catch (Exception e) { + } catch (NumberFormatException e) { registerException(e); return driverContext.blockFactory().newConstantNullBlock(positionCount); } @@ -48,7 +49,7 @@ public Block evalVector(Vector v) { for (int p = 0; p < positionCount; p++) { try { builder.appendLong(evalValue(vector, p, scratchPad)); - } catch (Exception e) { + } catch (NumberFormatException e) { registerException(e); builder.appendNull(); } @@ -83,7 +84,7 @@ public Block evalBlock(Block b) { } builder.appendLong(value); valuesAppended = true; - } catch (Exception e) { + } catch (NumberFormatException e) { registerException(e); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongFromUnsignedLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongFromUnsignedLongEvaluator.java index 0d548ff9feff9..41f8980581073 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongFromUnsignedLongEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLongFromUnsignedLongEvaluator.java @@ -12,6 +12,8 @@ import org.elasticsearch.compute.data.Vector; import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.ql.InvalidArgumentException; +import org.elasticsearch.xpack.ql.QlIllegalArgumentException; import org.elasticsearch.xpack.ql.tree.Source; /** @@ -36,7 +38,7 @@ public Block evalVector(Vector v) { if (vector.isConstant()) { try { return driverContext.blockFactory().newConstantLongBlockWith(evalValue(vector, 0), positionCount); - } catch (Exception e) { + } catch (InvalidArgumentException | QlIllegalArgumentException e) { registerException(e); return driverContext.blockFactory().newConstantNullBlock(positionCount); } @@ -45,7 +47,7 @@ public Block evalVector(Vector v) { for (int p = 0; p < positionCount; p++) { try { builder.appendLong(evalValue(vector, p)); - } catch (Exception e) { + } catch (InvalidArgumentException | QlIllegalArgumentException e) { registerException(e); builder.appendNull(); } @@ -79,7 +81,7 @@ public Block evalBlock(Block b) { } builder.appendLong(value); valuesAppended = true; - } catch (Exception e) { + } catch (InvalidArgumentException | QlIllegalArgumentException e) { registerException(e); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToRadiansEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToRadiansEvaluator.java index 35c712a3c0b40..3bd997d0b1d38 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToRadiansEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToRadiansEvaluator.java @@ -34,21 +34,11 @@ public Block evalVector(Vector v) { DoubleVector vector = (DoubleVector) v; int positionCount = v.getPositionCount(); if (vector.isConstant()) { - try { - return driverContext.blockFactory().newConstantDoubleBlockWith(evalValue(vector, 0), positionCount); - } catch (Exception e) { - registerException(e); - return driverContext.blockFactory().newConstantNullBlock(positionCount); - } + return driverContext.blockFactory().newConstantDoubleBlockWith(evalValue(vector, 0), positionCount); } try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(positionCount)) { for (int p = 0; p < positionCount; p++) { - try { - builder.appendDouble(evalValue(vector, p)); - } catch (Exception e) { - registerException(e); - builder.appendNull(); - } + builder.appendDouble(evalValue(vector, p)); } return builder.build(); } @@ -71,17 +61,13 @@ public Block evalBlock(Block b) { boolean positionOpened = false; boolean valuesAppended = false; for (int i = start; i < end; i++) { - try { - double value = evalValue(block, i); - if (positionOpened == false && valueCount > 1) { - builder.beginPositionEntry(); - positionOpened = true; - } - builder.appendDouble(value); - valuesAppended = true; - } catch (Exception e) { - registerException(e); + double value = evalValue(block, i); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; } + builder.appendDouble(value); + valuesAppended = true; } if (valuesAppended == false) { builder.appendNull(); diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromBooleanEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromBooleanEvaluator.java index f2b423e351f8e..a68cd61a8c470 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromBooleanEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromBooleanEvaluator.java @@ -36,21 +36,11 @@ public Block evalVector(Vector v) { BooleanVector vector = (BooleanVector) v; int positionCount = v.getPositionCount(); if (vector.isConstant()) { - try { - return driverContext.blockFactory().newConstantBytesRefBlockWith(evalValue(vector, 0), positionCount); - } catch (Exception e) { - registerException(e); - return driverContext.blockFactory().newConstantNullBlock(positionCount); - } + return driverContext.blockFactory().newConstantBytesRefBlockWith(evalValue(vector, 0), positionCount); } try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { for (int p = 0; p < positionCount; p++) { - try { - builder.appendBytesRef(evalValue(vector, p)); - } catch (Exception e) { - registerException(e); - builder.appendNull(); - } + builder.appendBytesRef(evalValue(vector, p)); } return builder.build(); } @@ -73,17 +63,13 @@ public Block evalBlock(Block b) { boolean positionOpened = false; boolean valuesAppended = false; for (int i = start; i < end; i++) { - try { - BytesRef value = evalValue(block, i); - if (positionOpened == false && valueCount > 1) { - builder.beginPositionEntry(); - positionOpened = true; - } - builder.appendBytesRef(value); - valuesAppended = true; - } catch (Exception e) { - registerException(e); + BytesRef value = evalValue(block, i); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; } + builder.appendBytesRef(value); + valuesAppended = true; } if (valuesAppended == false) { builder.appendNull(); diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromDatetimeEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromDatetimeEvaluator.java index c978ae8e1d6a4..569881ad30b61 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromDatetimeEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromDatetimeEvaluator.java @@ -36,21 +36,11 @@ public Block evalVector(Vector v) { LongVector vector = (LongVector) v; int positionCount = v.getPositionCount(); if (vector.isConstant()) { - try { - return driverContext.blockFactory().newConstantBytesRefBlockWith(evalValue(vector, 0), positionCount); - } catch (Exception e) { - registerException(e); - return driverContext.blockFactory().newConstantNullBlock(positionCount); - } + return driverContext.blockFactory().newConstantBytesRefBlockWith(evalValue(vector, 0), positionCount); } try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { for (int p = 0; p < positionCount; p++) { - try { - builder.appendBytesRef(evalValue(vector, p)); - } catch (Exception e) { - registerException(e); - builder.appendNull(); - } + builder.appendBytesRef(evalValue(vector, p)); } return builder.build(); } @@ -73,17 +63,13 @@ public Block evalBlock(Block b) { boolean positionOpened = false; boolean valuesAppended = false; for (int i = start; i < end; i++) { - try { - BytesRef value = evalValue(block, i); - if (positionOpened == false && valueCount > 1) { - builder.beginPositionEntry(); - positionOpened = true; - } - builder.appendBytesRef(value); - valuesAppended = true; - } catch (Exception e) { - registerException(e); + BytesRef value = evalValue(block, i); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; } + builder.appendBytesRef(value); + valuesAppended = true; } if (valuesAppended == false) { builder.appendNull(); diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromDoubleEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromDoubleEvaluator.java index c6fd0435bc96f..69c33e07c1650 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromDoubleEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromDoubleEvaluator.java @@ -36,21 +36,11 @@ public Block evalVector(Vector v) { DoubleVector vector = (DoubleVector) v; int positionCount = v.getPositionCount(); if (vector.isConstant()) { - try { - return driverContext.blockFactory().newConstantBytesRefBlockWith(evalValue(vector, 0), positionCount); - } catch (Exception e) { - registerException(e); - return driverContext.blockFactory().newConstantNullBlock(positionCount); - } + return driverContext.blockFactory().newConstantBytesRefBlockWith(evalValue(vector, 0), positionCount); } try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { for (int p = 0; p < positionCount; p++) { - try { - builder.appendBytesRef(evalValue(vector, p)); - } catch (Exception e) { - registerException(e); - builder.appendNull(); - } + builder.appendBytesRef(evalValue(vector, p)); } return builder.build(); } @@ -73,17 +63,13 @@ public Block evalBlock(Block b) { boolean positionOpened = false; boolean valuesAppended = false; for (int i = start; i < end; i++) { - try { - BytesRef value = evalValue(block, i); - if (positionOpened == false && valueCount > 1) { - builder.beginPositionEntry(); - positionOpened = true; - } - builder.appendBytesRef(value); - valuesAppended = true; - } catch (Exception e) { - registerException(e); + BytesRef value = evalValue(block, i); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; } + builder.appendBytesRef(value); + valuesAppended = true; } if (valuesAppended == false) { builder.appendNull(); diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromIPEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromIPEvaluator.java index 96d7ab6601b4c..00fb269699fe3 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromIPEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromIPEvaluator.java @@ -36,21 +36,11 @@ public Block evalVector(Vector v) { int positionCount = v.getPositionCount(); BytesRef scratchPad = new BytesRef(); if (vector.isConstant()) { - try { - return driverContext.blockFactory().newConstantBytesRefBlockWith(evalValue(vector, 0, scratchPad), positionCount); - } catch (Exception e) { - registerException(e); - return driverContext.blockFactory().newConstantNullBlock(positionCount); - } + return driverContext.blockFactory().newConstantBytesRefBlockWith(evalValue(vector, 0, scratchPad), positionCount); } try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { for (int p = 0; p < positionCount; p++) { - try { - builder.appendBytesRef(evalValue(vector, p, scratchPad)); - } catch (Exception e) { - registerException(e); - builder.appendNull(); - } + builder.appendBytesRef(evalValue(vector, p, scratchPad)); } return builder.build(); } @@ -74,17 +64,13 @@ public Block evalBlock(Block b) { boolean positionOpened = false; boolean valuesAppended = false; for (int i = start; i < end; i++) { - try { - BytesRef value = evalValue(block, i, scratchPad); - if (positionOpened == false && valueCount > 1) { - builder.beginPositionEntry(); - positionOpened = true; - } - builder.appendBytesRef(value); - valuesAppended = true; - } catch (Exception e) { - registerException(e); + BytesRef value = evalValue(block, i, scratchPad); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; } + builder.appendBytesRef(value); + valuesAppended = true; } if (valuesAppended == false) { builder.appendNull(); diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromIntEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromIntEvaluator.java index 95b9cd70fba2b..6e371c90adb28 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromIntEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromIntEvaluator.java @@ -36,21 +36,11 @@ public Block evalVector(Vector v) { IntVector vector = (IntVector) v; int positionCount = v.getPositionCount(); if (vector.isConstant()) { - try { - return driverContext.blockFactory().newConstantBytesRefBlockWith(evalValue(vector, 0), positionCount); - } catch (Exception e) { - registerException(e); - return driverContext.blockFactory().newConstantNullBlock(positionCount); - } + return driverContext.blockFactory().newConstantBytesRefBlockWith(evalValue(vector, 0), positionCount); } try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { for (int p = 0; p < positionCount; p++) { - try { - builder.appendBytesRef(evalValue(vector, p)); - } catch (Exception e) { - registerException(e); - builder.appendNull(); - } + builder.appendBytesRef(evalValue(vector, p)); } return builder.build(); } @@ -73,17 +63,13 @@ public Block evalBlock(Block b) { boolean positionOpened = false; boolean valuesAppended = false; for (int i = start; i < end; i++) { - try { - BytesRef value = evalValue(block, i); - if (positionOpened == false && valueCount > 1) { - builder.beginPositionEntry(); - positionOpened = true; - } - builder.appendBytesRef(value); - valuesAppended = true; - } catch (Exception e) { - registerException(e); + BytesRef value = evalValue(block, i); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; } + builder.appendBytesRef(value); + valuesAppended = true; } if (valuesAppended == false) { builder.appendNull(); diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromLongEvaluator.java index 1a5f2fecdecf4..3dc8f738d7b1d 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromLongEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromLongEvaluator.java @@ -36,21 +36,11 @@ public Block evalVector(Vector v) { LongVector vector = (LongVector) v; int positionCount = v.getPositionCount(); if (vector.isConstant()) { - try { - return driverContext.blockFactory().newConstantBytesRefBlockWith(evalValue(vector, 0), positionCount); - } catch (Exception e) { - registerException(e); - return driverContext.blockFactory().newConstantNullBlock(positionCount); - } + return driverContext.blockFactory().newConstantBytesRefBlockWith(evalValue(vector, 0), positionCount); } try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { for (int p = 0; p < positionCount; p++) { - try { - builder.appendBytesRef(evalValue(vector, p)); - } catch (Exception e) { - registerException(e); - builder.appendNull(); - } + builder.appendBytesRef(evalValue(vector, p)); } return builder.build(); } @@ -73,17 +63,13 @@ public Block evalBlock(Block b) { boolean positionOpened = false; boolean valuesAppended = false; for (int i = start; i < end; i++) { - try { - BytesRef value = evalValue(block, i); - if (positionOpened == false && valueCount > 1) { - builder.beginPositionEntry(); - positionOpened = true; - } - builder.appendBytesRef(value); - valuesAppended = true; - } catch (Exception e) { - registerException(e); + BytesRef value = evalValue(block, i); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; } + builder.appendBytesRef(value); + valuesAppended = true; } if (valuesAppended == false) { builder.appendNull(); diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromUnsignedLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromUnsignedLongEvaluator.java index 9650dbb5e47fd..4bce2c1fec40f 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromUnsignedLongEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromUnsignedLongEvaluator.java @@ -36,21 +36,11 @@ public Block evalVector(Vector v) { LongVector vector = (LongVector) v; int positionCount = v.getPositionCount(); if (vector.isConstant()) { - try { - return driverContext.blockFactory().newConstantBytesRefBlockWith(evalValue(vector, 0), positionCount); - } catch (Exception e) { - registerException(e); - return driverContext.blockFactory().newConstantNullBlock(positionCount); - } + return driverContext.blockFactory().newConstantBytesRefBlockWith(evalValue(vector, 0), positionCount); } try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { for (int p = 0; p < positionCount; p++) { - try { - builder.appendBytesRef(evalValue(vector, p)); - } catch (Exception e) { - registerException(e); - builder.appendNull(); - } + builder.appendBytesRef(evalValue(vector, p)); } return builder.build(); } @@ -73,17 +63,13 @@ public Block evalBlock(Block b) { boolean positionOpened = false; boolean valuesAppended = false; for (int i = start; i < end; i++) { - try { - BytesRef value = evalValue(block, i); - if (positionOpened == false && valueCount > 1) { - builder.beginPositionEntry(); - positionOpened = true; - } - builder.appendBytesRef(value); - valuesAppended = true; - } catch (Exception e) { - registerException(e); + BytesRef value = evalValue(block, i); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; } + builder.appendBytesRef(value); + valuesAppended = true; } if (valuesAppended == false) { builder.appendNull(); diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromVersionEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromVersionEvaluator.java index 1c2a5a7a80b46..a37696e149d4c 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromVersionEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToStringFromVersionEvaluator.java @@ -36,21 +36,11 @@ public Block evalVector(Vector v) { int positionCount = v.getPositionCount(); BytesRef scratchPad = new BytesRef(); if (vector.isConstant()) { - try { - return driverContext.blockFactory().newConstantBytesRefBlockWith(evalValue(vector, 0, scratchPad), positionCount); - } catch (Exception e) { - registerException(e); - return driverContext.blockFactory().newConstantNullBlock(positionCount); - } + return driverContext.blockFactory().newConstantBytesRefBlockWith(evalValue(vector, 0, scratchPad), positionCount); } try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { for (int p = 0; p < positionCount; p++) { - try { - builder.appendBytesRef(evalValue(vector, p, scratchPad)); - } catch (Exception e) { - registerException(e); - builder.appendNull(); - } + builder.appendBytesRef(evalValue(vector, p, scratchPad)); } return builder.build(); } @@ -74,17 +64,13 @@ public Block evalBlock(Block b) { boolean positionOpened = false; boolean valuesAppended = false; for (int i = start; i < end; i++) { - try { - BytesRef value = evalValue(block, i, scratchPad); - if (positionOpened == false && valueCount > 1) { - builder.beginPositionEntry(); - positionOpened = true; - } - builder.appendBytesRef(value); - valuesAppended = true; - } catch (Exception e) { - registerException(e); + BytesRef value = evalValue(block, i, scratchPad); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; } + builder.appendBytesRef(value); + valuesAppended = true; } if (valuesAppended == false) { builder.appendNull(); diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongFromBooleanEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongFromBooleanEvaluator.java index 3386bbd4d808d..619a4ec09d60b 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongFromBooleanEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongFromBooleanEvaluator.java @@ -35,21 +35,11 @@ public Block evalVector(Vector v) { BooleanVector vector = (BooleanVector) v; int positionCount = v.getPositionCount(); if (vector.isConstant()) { - try { - return driverContext.blockFactory().newConstantLongBlockWith(evalValue(vector, 0), positionCount); - } catch (Exception e) { - registerException(e); - return driverContext.blockFactory().newConstantNullBlock(positionCount); - } + return driverContext.blockFactory().newConstantLongBlockWith(evalValue(vector, 0), positionCount); } try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { for (int p = 0; p < positionCount; p++) { - try { - builder.appendLong(evalValue(vector, p)); - } catch (Exception e) { - registerException(e); - builder.appendNull(); - } + builder.appendLong(evalValue(vector, p)); } return builder.build(); } @@ -72,17 +62,13 @@ public Block evalBlock(Block b) { boolean positionOpened = false; boolean valuesAppended = false; for (int i = start; i < end; i++) { - try { - long value = evalValue(block, i); - if (positionOpened == false && valueCount > 1) { - builder.beginPositionEntry(); - positionOpened = true; - } - builder.appendLong(value); - valuesAppended = true; - } catch (Exception e) { - registerException(e); + long value = evalValue(block, i); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; } + builder.appendLong(value); + valuesAppended = true; } if (valuesAppended == false) { builder.appendNull(); diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongFromDoubleEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongFromDoubleEvaluator.java index d85fc923f0e5b..6d57bbd978370 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongFromDoubleEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongFromDoubleEvaluator.java @@ -13,6 +13,8 @@ import org.elasticsearch.compute.data.Vector; import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.ql.InvalidArgumentException; +import org.elasticsearch.xpack.ql.QlIllegalArgumentException; import org.elasticsearch.xpack.ql.tree.Source; /** @@ -37,7 +39,7 @@ public Block evalVector(Vector v) { if (vector.isConstant()) { try { return driverContext.blockFactory().newConstantLongBlockWith(evalValue(vector, 0), positionCount); - } catch (Exception e) { + } catch (InvalidArgumentException | QlIllegalArgumentException e) { registerException(e); return driverContext.blockFactory().newConstantNullBlock(positionCount); } @@ -46,7 +48,7 @@ public Block evalVector(Vector v) { for (int p = 0; p < positionCount; p++) { try { builder.appendLong(evalValue(vector, p)); - } catch (Exception e) { + } catch (InvalidArgumentException | QlIllegalArgumentException e) { registerException(e); builder.appendNull(); } @@ -80,7 +82,7 @@ public Block evalBlock(Block b) { } builder.appendLong(value); valuesAppended = true; - } catch (Exception e) { + } catch (InvalidArgumentException | QlIllegalArgumentException e) { registerException(e); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongFromIntEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongFromIntEvaluator.java index 595ed38f4f90e..d3ccf82f2cb05 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongFromIntEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongFromIntEvaluator.java @@ -35,21 +35,11 @@ public Block evalVector(Vector v) { IntVector vector = (IntVector) v; int positionCount = v.getPositionCount(); if (vector.isConstant()) { - try { - return driverContext.blockFactory().newConstantLongBlockWith(evalValue(vector, 0), positionCount); - } catch (Exception e) { - registerException(e); - return driverContext.blockFactory().newConstantNullBlock(positionCount); - } + return driverContext.blockFactory().newConstantLongBlockWith(evalValue(vector, 0), positionCount); } try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { for (int p = 0; p < positionCount; p++) { - try { - builder.appendLong(evalValue(vector, p)); - } catch (Exception e) { - registerException(e); - builder.appendNull(); - } + builder.appendLong(evalValue(vector, p)); } return builder.build(); } @@ -72,17 +62,13 @@ public Block evalBlock(Block b) { boolean positionOpened = false; boolean valuesAppended = false; for (int i = start; i < end; i++) { - try { - long value = evalValue(block, i); - if (positionOpened == false && valueCount > 1) { - builder.beginPositionEntry(); - positionOpened = true; - } - builder.appendLong(value); - valuesAppended = true; - } catch (Exception e) { - registerException(e); + long value = evalValue(block, i); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; } + builder.appendLong(value); + valuesAppended = true; } if (valuesAppended == false) { builder.appendNull(); diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongFromLongEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongFromLongEvaluator.java index ec91840e2dbd0..2f01aef20edde 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongFromLongEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongFromLongEvaluator.java @@ -34,21 +34,11 @@ public Block evalVector(Vector v) { LongVector vector = (LongVector) v; int positionCount = v.getPositionCount(); if (vector.isConstant()) { - try { - return driverContext.blockFactory().newConstantLongBlockWith(evalValue(vector, 0), positionCount); - } catch (Exception e) { - registerException(e); - return driverContext.blockFactory().newConstantNullBlock(positionCount); - } + return driverContext.blockFactory().newConstantLongBlockWith(evalValue(vector, 0), positionCount); } try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(positionCount)) { for (int p = 0; p < positionCount; p++) { - try { - builder.appendLong(evalValue(vector, p)); - } catch (Exception e) { - registerException(e); - builder.appendNull(); - } + builder.appendLong(evalValue(vector, p)); } return builder.build(); } @@ -71,17 +61,13 @@ public Block evalBlock(Block b) { boolean positionOpened = false; boolean valuesAppended = false; for (int i = start; i < end; i++) { - try { - long value = evalValue(block, i); - if (positionOpened == false && valueCount > 1) { - builder.beginPositionEntry(); - positionOpened = true; - } - builder.appendLong(value); - valuesAppended = true; - } catch (Exception e) { - registerException(e); + long value = evalValue(block, i); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; } + builder.appendLong(value); + valuesAppended = true; } if (valuesAppended == false) { builder.appendNull(); diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongFromStringEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongFromStringEvaluator.java index 1d53f0cde4936..4552154560421 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongFromStringEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLongFromStringEvaluator.java @@ -4,6 +4,7 @@ // 2.0. package org.elasticsearch.xpack.esql.expression.function.scalar.convert; +import java.lang.NumberFormatException; import java.lang.Override; import java.lang.String; import org.apache.lucene.util.BytesRef; @@ -39,7 +40,7 @@ public Block evalVector(Vector v) { if (vector.isConstant()) { try { return driverContext.blockFactory().newConstantLongBlockWith(evalValue(vector, 0, scratchPad), positionCount); - } catch (Exception e) { + } catch (NumberFormatException e) { registerException(e); return driverContext.blockFactory().newConstantNullBlock(positionCount); } @@ -48,7 +49,7 @@ public Block evalVector(Vector v) { for (int p = 0; p < positionCount; p++) { try { builder.appendLong(evalValue(vector, p, scratchPad)); - } catch (Exception e) { + } catch (NumberFormatException e) { registerException(e); builder.appendNull(); } @@ -83,7 +84,7 @@ public Block evalBlock(Block b) { } builder.appendLong(value); valuesAppended = true; - } catch (Exception e) { + } catch (NumberFormatException e) { registerException(e); } } diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToVersionFromStringEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToVersionFromStringEvaluator.java index 14057b0010d93..5945129a8ae05 100644 --- a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToVersionFromStringEvaluator.java +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToVersionFromStringEvaluator.java @@ -36,21 +36,11 @@ public Block evalVector(Vector v) { int positionCount = v.getPositionCount(); BytesRef scratchPad = new BytesRef(); if (vector.isConstant()) { - try { - return driverContext.blockFactory().newConstantBytesRefBlockWith(evalValue(vector, 0, scratchPad), positionCount); - } catch (Exception e) { - registerException(e); - return driverContext.blockFactory().newConstantNullBlock(positionCount); - } + return driverContext.blockFactory().newConstantBytesRefBlockWith(evalValue(vector, 0, scratchPad), positionCount); } try (BytesRefBlock.Builder builder = driverContext.blockFactory().newBytesRefBlockBuilder(positionCount)) { for (int p = 0; p < positionCount; p++) { - try { - builder.appendBytesRef(evalValue(vector, p, scratchPad)); - } catch (Exception e) { - registerException(e); - builder.appendNull(); - } + builder.appendBytesRef(evalValue(vector, p, scratchPad)); } return builder.build(); } @@ -74,17 +64,13 @@ public Block evalBlock(Block b) { boolean positionOpened = false; boolean valuesAppended = false; for (int i = start; i < end; i++) { - try { - BytesRef value = evalValue(block, i, scratchPad); - if (positionOpened == false && valueCount > 1) { - builder.beginPositionEntry(); - positionOpened = true; - } - builder.appendBytesRef(value); - valuesAppended = true; - } catch (Exception e) { - registerException(e); + BytesRef value = evalValue(block, i, scratchPad); + if (positionOpened == false && valueCount > 1) { + builder.beginPositionEntry(); + positionOpened = true; } + builder.appendBytesRef(value); + valuesAppended = true; } if (valuesAppended == false) { builder.appendNull(); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetime.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetime.java index d73cb59308be7..9910447708b44 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetime.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetime.java @@ -60,7 +60,7 @@ protected NodeInfo info() { return NodeInfo.create(this, ToDatetime::new, field()); } - @ConvertEvaluator(extraName = "FromString") + @ConvertEvaluator(extraName = "FromString", warnExceptions = { IllegalArgumentException.class }) static long fromKeyword(BytesRef in) { return DateParse.process(in, DateParse.DEFAULT_FORMATTER); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDouble.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDouble.java index 9972ae1d3dd81..e83a0eae8d7a8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDouble.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDouble.java @@ -67,7 +67,7 @@ static double fromBoolean(boolean bool) { return bool ? 1d : 0d; } - @ConvertEvaluator(extraName = "FromString") + @ConvertEvaluator(extraName = "FromString", warnExceptions = { NumberFormatException.class }) static double fromKeyword(BytesRef in) { return Double.parseDouble(in.utf8ToString()); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIP.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIP.java index 07c0bfedb98c9..4829d39b09d65 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIP.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToIP.java @@ -52,7 +52,7 @@ protected NodeInfo info() { return NodeInfo.create(this, ToIP::new, field()); } - @ConvertEvaluator(extraName = "FromString") + @ConvertEvaluator(extraName = "FromString", warnExceptions = { IllegalArgumentException.class }) static BytesRef fromKeyword(BytesRef asString) { return parseIP(asString.utf8ToString()); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToInteger.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToInteger.java index 3f3b492095949..480962ca27f86 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToInteger.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToInteger.java @@ -9,6 +9,8 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.ann.ConvertEvaluator; +import org.elasticsearch.xpack.ql.InvalidArgumentException; +import org.elasticsearch.xpack.ql.QlIllegalArgumentException; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.NodeInfo; import org.elasticsearch.xpack.ql.tree.Source; @@ -68,7 +70,7 @@ static int fromBoolean(boolean bool) { return bool ? 1 : 0; } - @ConvertEvaluator(extraName = "FromString") + @ConvertEvaluator(extraName = "FromString", warnExceptions = { NumberFormatException.class }) static int fromKeyword(BytesRef in) { String asString = in.utf8ToString(); try { @@ -82,17 +84,17 @@ static int fromKeyword(BytesRef in) { } } - @ConvertEvaluator(extraName = "FromDouble") + @ConvertEvaluator(extraName = "FromDouble", warnExceptions = { InvalidArgumentException.class, QlIllegalArgumentException.class }) static int fromDouble(double dbl) { return fromLong(safeDoubleToLong(dbl)); } - @ConvertEvaluator(extraName = "FromUnsignedLong") + @ConvertEvaluator(extraName = "FromUnsignedLong", warnExceptions = { InvalidArgumentException.class, QlIllegalArgumentException.class }) static int fromUnsignedLong(long lng) { return fromLong(ToLong.fromUnsignedLong(lng)); } - @ConvertEvaluator(extraName = "FromLong") + @ConvertEvaluator(extraName = "FromLong", warnExceptions = { InvalidArgumentException.class, QlIllegalArgumentException.class }) static int fromLong(long lng) { return safeToInt(lng); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLong.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLong.java index e7f60abc6c3d4..8907ba930024e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLong.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToLong.java @@ -9,6 +9,8 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.ann.ConvertEvaluator; +import org.elasticsearch.xpack.ql.InvalidArgumentException; +import org.elasticsearch.xpack.ql.QlIllegalArgumentException; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.NodeInfo; import org.elasticsearch.xpack.ql.tree.Source; @@ -69,7 +71,7 @@ static long fromBoolean(boolean bool) { return bool ? 1L : 0L; } - @ConvertEvaluator(extraName = "FromString") + @ConvertEvaluator(extraName = "FromString", warnExceptions = { NumberFormatException.class }) static long fromKeyword(BytesRef in) { String asString = in.utf8ToString(); try { @@ -83,12 +85,12 @@ static long fromKeyword(BytesRef in) { } } - @ConvertEvaluator(extraName = "FromDouble") + @ConvertEvaluator(extraName = "FromDouble", warnExceptions = { InvalidArgumentException.class, QlIllegalArgumentException.class }) static long fromDouble(double dbl) { return safeDoubleToLong(dbl); } - @ConvertEvaluator(extraName = "FromUnsignedLong") + @ConvertEvaluator(extraName = "FromUnsignedLong", warnExceptions = { InvalidArgumentException.class, QlIllegalArgumentException.class }) static long fromUnsignedLong(long ul) { return safeToLong(unsignedLongAsNumber(ul)); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLong.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLong.java index be96fdb7139d1..1b7ee01e50c54 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLong.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToUnsignedLong.java @@ -9,6 +9,8 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.ann.ConvertEvaluator; +import org.elasticsearch.xpack.ql.InvalidArgumentException; +import org.elasticsearch.xpack.ql.QlIllegalArgumentException; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.NodeInfo; import org.elasticsearch.xpack.ql.tree.Source; @@ -70,13 +72,13 @@ static long fromBoolean(boolean bool) { return bool ? ONE_AS_UNSIGNED_LONG : ZERO_AS_UNSIGNED_LONG; } - @ConvertEvaluator(extraName = "FromString") + @ConvertEvaluator(extraName = "FromString", warnExceptions = { NumberFormatException.class }) static long fromKeyword(BytesRef in) { String asString = in.utf8ToString(); return asLongUnsigned(safeToUnsignedLong(asString)); } - @ConvertEvaluator(extraName = "FromDouble") + @ConvertEvaluator(extraName = "FromDouble", warnExceptions = { InvalidArgumentException.class, QlIllegalArgumentException.class }) static long fromDouble(double dbl) { return asLongUnsigned(safeToUnsignedLong(dbl)); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index a4c5638ae815f..094ecc9bfe569 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -297,7 +297,6 @@ public final void testCrankyEvaluateBlockWithoutNulls() { * input pattern contained only a single value. *

    */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/100820") public final void testCrankyEvaluateBlockWithoutNullsFloating() { assumeTrue("sometimes the cranky breaker silences warnings, just skip these cases", testCase.getExpectedWarnings() == null); try { diff --git a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ClusterStateWaitThresholdBreachTests.java b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ClusterStateWaitThresholdBreachTests.java index 83eee53195c8c..da3323966fb94 100644 --- a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ClusterStateWaitThresholdBreachTests.java +++ b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/ClusterStateWaitThresholdBreachTests.java @@ -62,11 +62,6 @@ public void refreshDataStreamAndPolicy() { managedIndex = "index-" + randomAlphaOfLengthBetween(10, 15).toLowerCase(Locale.ROOT); } - @Override - protected boolean ignoreExternalCluster() { - return true; - } - @Override protected Collection> nodePlugins() { return Arrays.asList(LocalStateCompositeXPackPlugin.class, IndexLifecycle.class, Ccr.class); diff --git a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/DataStreamAndIndexLifecycleMixingTests.java b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/DataStreamAndIndexLifecycleMixingTests.java index f1f1e1b967d5f..668cc4121b7b5 100644 --- a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/DataStreamAndIndexLifecycleMixingTests.java +++ b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/DataStreamAndIndexLifecycleMixingTests.java @@ -80,10 +80,6 @@ protected Collection> nodePlugins() { return List.of(LocalStateCompositeXPackPlugin.class, IndexLifecycle.class, DataStreamsPlugin.class); } - protected boolean ignoreExternalCluster() { - return true; - } - @Override protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { Settings.Builder settings = Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings)); diff --git a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/DataTiersMigrationsTests.java b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/DataTiersMigrationsTests.java index a2cdc377a20e2..9dfc3ddcda91e 100644 --- a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/DataTiersMigrationsTests.java +++ b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/DataTiersMigrationsTests.java @@ -55,11 +55,6 @@ public void refreshDataStreamAndPolicy() { managedIndex = "index-" + randomAlphaOfLengthBetween(10, 15).toLowerCase(Locale.ROOT); } - @Override - protected boolean ignoreExternalCluster() { - return true; - } - @Override protected Collection> nodePlugins() { return Arrays.asList(LocalStateCompositeXPackPlugin.class, IndexLifecycle.class); diff --git a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java index 0a252a0d62958..069771515d1b6 100644 --- a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java +++ b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java @@ -109,11 +109,6 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { return nodeSettings.build(); } - @Override - protected boolean ignoreExternalCluster() { - return true; - } - @Override protected Collection> nodePlugins() { return Arrays.asList(LocalStateCompositeXPackPlugin.class, IndexLifecycle.class, TestILMPlugin.class); diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunnerTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunnerTests.java index 22376ac789b9d..d2ec684d3d1ab 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunnerTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunnerTests.java @@ -131,7 +131,6 @@ public void prepare() { @After public void shutdown() { historyStore.close(); - noopClient.close(); threadPool.shutdownNow(); } diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleTransitionTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleTransitionTests.java index b1e07729bee0a..9449e0c0574dc 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleTransitionTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleTransitionTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.ilm; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -592,7 +591,8 @@ public void testValidateTransitionToCachedStepMissingFromPolicy() { IndexMetadata meta = buildIndexMetadata("my-policy", executionState); - try (Client client = new NoOpClient(getTestName())) { + try (var threadPool = createThreadPool()) { + final var client = new NoOpClient(threadPool); Step.StepKey currentStepKey = new Step.StepKey("hot", RolloverAction.NAME, WaitForRolloverReadyStep.NAME); Step.StepKey nextStepKey = new Step.StepKey("hot", RolloverAction.NAME, RolloverStep.NAME); Step currentStep = new WaitForRolloverReadyStep( @@ -648,7 +648,8 @@ public void testValidateTransitionToCachedStepWhenMissingPhaseFromPolicy() { IndexMetadata meta = buildIndexMetadata("my-policy", executionState); - try (Client client = new NoOpClient(getTestName())) { + try (var threadPool = createThreadPool()) { + final var client = new NoOpClient(threadPool); Step.StepKey currentStepKey = new Step.StepKey("warm", MigrateAction.NAME, DataTierMigrationRoutedStep.NAME); Step.StepKey nextStepKey = new Step.StepKey("warm", PhaseCompleteStep.NAME, PhaseCompleteStep.NAME); @@ -708,7 +709,8 @@ public void testValidateTransitionToInjectedMissingStep() { IndexMetadata meta = buildIndexMetadata("my-policy", executionState); - try (Client client = new NoOpClient(getTestName())) { + try (var threadPool = createThreadPool()) { + final var client = new NoOpClient(threadPool); Step.StepKey currentStepKey = new Step.StepKey("warm", MigrateAction.NAME, MigrateAction.NAME); Step.StepKey nextStepKey = new Step.StepKey("warm", MigrateAction.NAME, DataTierMigrationRoutedStep.NAME); @@ -1198,7 +1200,8 @@ public void testMoveStateToNextActionAndUpdateCachedPhase() { 2L ); - try (Client client = new NoOpClient(getTestName())) { + try (var threadPool = createThreadPool()) { + final var client = new NoOpClient(threadPool); LifecycleExecutionState newState = moveStateToNextActionAndUpdateCachedPhase( meta, meta.getLifecycleExecutionState(), @@ -1235,7 +1238,8 @@ public void testMoveStateToNextActionAndUpdateCachedPhase() { 2L ); - try (Client client = new NoOpClient(getTestName())) { + try (var threadPool = createThreadPool()) { + final var client = new NoOpClient(threadPool); LifecycleExecutionState newState = moveStateToNextActionAndUpdateCachedPhase( meta, meta.getLifecycleExecutionState(), diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/history/ILMHistoryStoreTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/history/ILMHistoryStoreTests.java index 8f8e0451cdbb9..6ac3a4522fb3d 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/history/ILMHistoryStoreTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/history/ILMHistoryStoreTests.java @@ -93,7 +93,6 @@ public void setup() { public void setdown() { historyStore.close(); clusterService.close(); - client.close(); threadPool.shutdownNow(); } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/huggingface/HuggingFaceElserActionTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/huggingface/HuggingFaceElserActionTests.java index 713312204e65b..9809acf536c86 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/huggingface/HuggingFaceElserActionTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/action/huggingface/HuggingFaceElserActionTests.java @@ -35,9 +35,9 @@ import java.util.concurrent.TimeUnit; import static org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfig.DEFAULT_RESULTS_FIELD; -import static org.elasticsearch.xpack.inference.external.http.Utils.createThreadPool; import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; import static org.elasticsearch.xpack.inference.external.http.Utils.getUrl; +import static org.elasticsearch.xpack.inference.external.http.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.external.http.Utils.mockClusterServiceEmpty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; @@ -55,7 +55,7 @@ public class HuggingFaceElserActionTests extends ESTestCase { @Before public void init() throws Exception { webServer.start(); - threadPool = createThreadPool(getTestName()); + threadPool = createThreadPool(inferenceUtilityPool()); clientManager = HttpClientManager.create(Settings.EMPTY, threadPool, mockClusterServiceEmpty(), mock(ThrottlerManager.class)); } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/HttpClientManagerTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/HttpClientManagerTests.java index 3e07bd773c65e..246e7d6d44c5a 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/HttpClientManagerTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/HttpClientManagerTests.java @@ -26,7 +26,7 @@ import java.util.concurrent.TimeUnit; import static org.elasticsearch.xpack.inference.external.http.HttpClientTests.createHttpPost; -import static org.elasticsearch.xpack.inference.external.http.Utils.createThreadPool; +import static org.elasticsearch.xpack.inference.external.http.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.external.http.Utils.mockClusterService; import static org.elasticsearch.xpack.inference.external.http.Utils.mockClusterServiceEmpty; import static org.hamcrest.Matchers.equalTo; @@ -46,7 +46,7 @@ public class HttpClientManagerTests extends ESTestCase { @Before public void init() throws Exception { webServer.start(); - threadPool = createThreadPool(getTestName()); + threadPool = createThreadPool(inferenceUtilityPool()); } @After diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/HttpClientTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/HttpClientTests.java index c72d9167a9e06..3a7ec9d1b0f55 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/HttpClientTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/HttpClientTests.java @@ -42,7 +42,7 @@ import java.util.concurrent.TimeUnit; import static org.elasticsearch.core.Strings.format; -import static org.elasticsearch.xpack.inference.external.http.Utils.createThreadPool; +import static org.elasticsearch.xpack.inference.external.http.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.external.http.Utils.mockClusterService; import static org.elasticsearch.xpack.inference.logging.ThrottlerManagerTests.mockThrottlerManager; import static org.hamcrest.Matchers.equalTo; @@ -63,7 +63,7 @@ public class HttpClientTests extends ESTestCase { @Before public void init() throws Exception { webServer.start(); - threadPool = createThreadPool(getTestName()); + threadPool = createThreadPool(inferenceUtilityPool()); } @After diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/IdleConnectionEvictorTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/IdleConnectionEvictorTests.java index dba80923c487d..a46586fa6121b 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/IdleConnectionEvictorTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/IdleConnectionEvictorTests.java @@ -20,7 +20,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import static org.elasticsearch.xpack.inference.external.http.Utils.createThreadPool; +import static org.elasticsearch.xpack.inference.external.http.Utils.inferenceUtilityPool; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.doAnswer; @@ -36,7 +36,7 @@ public class IdleConnectionEvictorTests extends ESTestCase { @Before public void init() { - threadPool = createThreadPool(getTestName()); + threadPool = createThreadPool(inferenceUtilityPool()); } @After diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/Utils.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/Utils.java index becb0cc43e1e8..22c36fe38a25c 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/Utils.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/Utils.java @@ -13,8 +13,6 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.http.MockWebServer; import org.elasticsearch.threadpool.ScalingExecutorBuilder; -import org.elasticsearch.threadpool.TestThreadPool; -import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.DeprecationHandler; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentParser; @@ -81,17 +79,14 @@ public static Map entityAsMap(InputStream body) throws IOExcepti } } - public static ThreadPool createThreadPool(String name) { - return new TestThreadPool( - name, - new ScalingExecutorBuilder( - UTILITY_THREAD_POOL_NAME, - 1, - 4, - TimeValue.timeValueMinutes(10), - false, - "xpack.inference.utility_thread_pool" - ) + public static ScalingExecutorBuilder inferenceUtilityPool() { + return new ScalingExecutorBuilder( + UTILITY_THREAD_POOL_NAME, + 1, + 4, + TimeValue.timeValueMinutes(10), + false, + "xpack.inference.utility_thread_pool" ); } } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestExecutorServiceTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestExecutorServiceTests.java index 245ce09848a7f..2dd31144b3bc2 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestExecutorServiceTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestExecutorServiceTests.java @@ -29,7 +29,7 @@ import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.xpack.inference.external.http.HttpClientTests.createHttpPost; -import static org.elasticsearch.xpack.inference.external.http.Utils.createThreadPool; +import static org.elasticsearch.xpack.inference.external.http.Utils.inferenceUtilityPool; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.mockito.ArgumentMatchers.any; @@ -45,7 +45,7 @@ public class HttpRequestExecutorServiceTests extends ESTestCase { @Before public void init() { - threadPool = createThreadPool(getTestName()); + threadPool = createThreadPool(inferenceUtilityPool()); } @After diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderFactoryTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderFactoryTests.java index 3434b951147d7..82c41794695fd 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderFactoryTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/HttpRequestSenderFactoryTests.java @@ -35,7 +35,7 @@ import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.xpack.inference.external.http.HttpClientTests.createHttpPost; -import static org.elasticsearch.xpack.inference.external.http.Utils.createThreadPool; +import static org.elasticsearch.xpack.inference.external.http.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.external.http.Utils.mockClusterServiceEmpty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; @@ -56,7 +56,7 @@ public class HttpRequestSenderFactoryTests extends ESTestCase { @Before public void init() throws Exception { webServer.start(); - threadPool = createThreadPool(getTestName()); + threadPool = createThreadPool(inferenceUtilityPool()); clientManager = HttpClientManager.create(Settings.EMPTY, threadPool, mockClusterServiceEmpty(), mock(ThrottlerManager.class)); threadRef.set(null); } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestTaskTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestTaskTests.java index f3718954d8ad9..e6c47c891f0d7 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestTaskTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/http/sender/RequestTaskTests.java @@ -40,7 +40,7 @@ import static org.elasticsearch.xpack.inference.external.http.HttpClientTests.createConnectionManager; import static org.elasticsearch.xpack.inference.external.http.HttpClientTests.createHttpPost; import static org.elasticsearch.xpack.inference.external.http.HttpClientTests.emptyHttpSettings; -import static org.elasticsearch.xpack.inference.external.http.Utils.createThreadPool; +import static org.elasticsearch.xpack.inference.external.http.Utils.inferenceUtilityPool; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; @@ -63,7 +63,7 @@ public class RequestTaskTests extends ESTestCase { @Before public void init() throws Exception { webServer.start(); - threadPool = createThreadPool(getTestName()); + threadPool = createThreadPool(inferenceUtilityPool()); } @After diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/huggingface/HuggingFaceClientTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/huggingface/HuggingFaceClientTests.java index 3463067143994..0cc97ca38de80 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/huggingface/HuggingFaceClientTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/external/huggingface/HuggingFaceClientTests.java @@ -31,9 +31,9 @@ import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfig.DEFAULT_RESULTS_FIELD; -import static org.elasticsearch.xpack.inference.external.http.Utils.createThreadPool; import static org.elasticsearch.xpack.inference.external.http.Utils.entityAsMap; import static org.elasticsearch.xpack.inference.external.http.Utils.getUrl; +import static org.elasticsearch.xpack.inference.external.http.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.external.http.Utils.mockClusterServiceEmpty; import static org.elasticsearch.xpack.inference.external.request.huggingface.HuggingFaceElserRequestTests.createRequest; import static org.elasticsearch.xpack.inference.logging.ThrottlerManagerTests.mockThrottlerManager; @@ -53,7 +53,7 @@ public class HuggingFaceClientTests extends ESTestCase { @Before public void init() throws Exception { webServer.start(); - threadPool = createThreadPool(getTestName()); + threadPool = createThreadPool(inferenceUtilityPool()); clientManager = HttpClientManager.create(Settings.EMPTY, threadPool, mockClusterServiceEmpty(), mockThrottlerManager()); } diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/logging/ThrottlerManagerTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/logging/ThrottlerManagerTests.java index 01374d02a21c3..ba9e7851c9ad4 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/logging/ThrottlerManagerTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/logging/ThrottlerManagerTests.java @@ -15,7 +15,7 @@ import org.junit.After; import org.junit.Before; -import static org.elasticsearch.xpack.inference.external.http.Utils.createThreadPool; +import static org.elasticsearch.xpack.inference.external.http.Utils.inferenceUtilityPool; import static org.elasticsearch.xpack.inference.external.http.Utils.mockClusterServiceEmpty; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; @@ -31,7 +31,7 @@ public class ThrottlerManagerTests extends ESTestCase { @Before public void init() { - threadPool = createThreadPool(getTestName()); + threadPool = createThreadPool(inferenceUtilityPool()); } @After diff --git a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/logging/ThrottlerTests.java b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/logging/ThrottlerTests.java index df95232ff85f7..27df66c54cd1c 100644 --- a/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/logging/ThrottlerTests.java +++ b/x-pack/plugin/inference/src/test/java/org/elasticsearch/xpack/inference/logging/ThrottlerTests.java @@ -22,7 +22,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import static org.elasticsearch.xpack.inference.external.http.Utils.createThreadPool; +import static org.elasticsearch.xpack.inference.external.http.Utils.inferenceUtilityPool; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doAnswer; @@ -40,7 +40,7 @@ public class ThrottlerTests extends ESTestCase { @Before public void init() { - threadPool = createThreadPool(getTestName()); + threadPool = createThreadPool(inferenceUtilityPool()); } @After diff --git a/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/action/TransportDeletePipelineActionTests.java b/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/action/TransportDeletePipelineActionTests.java index 56b7965be3686..159da551917c4 100644 --- a/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/action/TransportDeletePipelineActionTests.java +++ b/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/action/TransportDeletePipelineActionTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockUtils; import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteTransportException; import org.elasticsearch.transport.TransportService; @@ -27,7 +28,8 @@ public class TransportDeletePipelineActionTests extends ESTestCase { public void testDeletePipelineWithMissingIndex() throws Exception { - try (Client client = getFailureClient(new IndexNotFoundException("missing .logstash"))) { + try (var threadPool = createThreadPool()) { + final var client = getFailureClient(threadPool, new IndexNotFoundException("missing .logstash")); TransportService transportService = MockUtils.setupTransportServiceWithThreadpoolExecutor(); final TransportDeletePipelineAction action = new TransportDeletePipelineAction( transportService, @@ -41,8 +43,8 @@ public void testDeletePipelineWithMissingIndex() throws Exception { } } - private Client getFailureClient(Exception e) { - return new NoOpClient(getTestName()) { + private Client getFailureClient(ThreadPool threadPool, Exception e) { + return new NoOpClient(threadPool) { @Override protected void doExecute( ActionType action, diff --git a/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/action/TransportGetPipelineActionTests.java b/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/action/TransportGetPipelineActionTests.java index d8a4d048f1fe4..7f1a0f2bcc2cb 100644 --- a/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/action/TransportGetPipelineActionTests.java +++ b/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/action/TransportGetPipelineActionTests.java @@ -35,6 +35,7 @@ import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.test.MockUtils; import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteTransportException; import org.elasticsearch.transport.TransportService; @@ -99,7 +100,8 @@ public void onFailure(Exception e) { } }; - try (Client client = getMockClient(multiGetResponse)) { + try (var threadPool = createThreadPool()) { + final var client = getMockClient(threadPool, multiGetResponse); Loggers.addAppender(logger, mockLogAppender); TransportService transportService = MockUtils.setupTransportServiceWithThreadpoolExecutor(); TransportGetPipelineAction action = new TransportGetPipelineAction(transportService, mock(ActionFilters.class), client); @@ -151,7 +153,8 @@ public void onFailure(Exception e) { }; TransportService transportService = MockUtils.setupTransportServiceWithThreadpoolExecutor(); - try (Client client = getMockClient(searchResponse)) { + try (var threadPool = createThreadPool()) { + final var client = getMockClient(threadPool, searchResponse); new TransportGetPipelineAction(transportService, mock(ActionFilters.class), client).doExecute( null, request, @@ -163,7 +166,8 @@ public void onFailure(Exception e) { } public void testMissingIndexHandling() throws Exception { - try (Client failureClient = getFailureClient(new IndexNotFoundException("foo"))) { + try (var threadPool = createThreadPool()) { + final var failureClient = getFailureClient(threadPool, new IndexNotFoundException("foo")); TransportService transportService = MockUtils.setupTransportServiceWithThreadpoolExecutor(); final TransportGetPipelineAction action = new TransportGetPipelineAction( transportService, @@ -178,8 +182,8 @@ public void testMissingIndexHandling() throws Exception { } } - private Client getMockClient(ActionResponse response) { - return new NoOpClient(getTestName()) { + private Client getMockClient(ThreadPool threadPool, ActionResponse response) { + return new NoOpClient(threadPool) { @Override @SuppressWarnings("unchecked") protected void doExecute( @@ -192,8 +196,8 @@ protected void }; } - private Client getFailureClient(Exception e) { - return new NoOpClient(getTestName()) { + private Client getFailureClient(ThreadPool threadPool, Exception e) { + return new NoOpClient(threadPool) { @Override protected void doExecute( ActionType action, diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java index 46a4e008cc752..e88464c1ff5c4 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java @@ -25,10 +25,12 @@ import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.Template; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.core.PathUtils; import org.elasticsearch.datastreams.DataStreamsPlugin; import org.elasticsearch.env.Environment; @@ -49,7 +51,9 @@ import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.ExternalTestCluster; import org.elasticsearch.test.SecuritySettingsSourceField; +import org.elasticsearch.test.TestCluster; import org.elasticsearch.transport.netty4.Netty4Plugin; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.autoscaling.Autoscaling; @@ -96,7 +100,10 @@ import java.io.IOException; import java.io.UncheckedIOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; import java.net.URISyntaxException; +import java.net.URL; import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; @@ -165,8 +172,7 @@ protected Function getClientWrapper() { return client -> client.filterWithHeader(headers); } - @Override - protected Settings externalClusterClientSettings() { + private Settings externalClusterClientSettings() { final Path home = createTempDir(); final Path xpackConf = home.resolve("config"); try { @@ -207,6 +213,35 @@ protected Settings externalClusterClientSettings() { return builder.build(); } + @Override + protected TestCluster buildTestCluster(Scope scope, long seed) throws IOException { + final String clusterAddresses = System.getProperty(TESTS_CLUSTER); + assertTrue(TESTS_CLUSTER + " must be set", Strings.hasLength(clusterAddresses)); + if (scope == Scope.TEST) { + throw new IllegalArgumentException("Cannot run TEST scope test with " + TESTS_CLUSTER); + } + final String clusterName = System.getProperty(TESTS_CLUSTER_NAME); + if (Strings.isNullOrEmpty(clusterName)) { + throw new IllegalArgumentException("External test cluster name must be provided"); + } + final String[] stringAddresses = clusterAddresses.split(","); + final TransportAddress[] transportAddresses = new TransportAddress[stringAddresses.length]; + int i = 0; + for (String stringAddress : stringAddresses) { + URL url = new URL("http://" + stringAddress); + InetAddress inetAddress = InetAddress.getByName(url.getHost()); + transportAddresses[i++] = new TransportAddress(new InetSocketAddress(inetAddress, url.getPort())); + } + return new ExternalTestCluster( + createTempDir(), + externalClusterClientSettings(), + nodePlugins(), + getClientWrapper(), + clusterName, + transportAddresses + ); + } + protected void cleanUp() { setUpgradeModeTo(false); cleanUpResources(); diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ScheduledEventsIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ScheduledEventsIT.java index 45d1e57a52f46..ca43638e5d038 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ScheduledEventsIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ScheduledEventsIT.java @@ -45,7 +45,6 @@ public void cleanUpTest() { cleanUp(); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/95096") public void testScheduledEvents() throws IOException { TimeValue bucketSpan = TimeValue.timeValueMinutes(30); diff --git a/x-pack/plugin/ml/src/main/java/module-info.java b/x-pack/plugin/ml/src/main/java/module-info.java index a73c9bdfa32b4..52dee889d15fc 100644 --- a/x-pack/plugin/ml/src/main/java/module-info.java +++ b/x-pack/plugin/ml/src/main/java/module-info.java @@ -33,6 +33,7 @@ provides org.elasticsearch.painless.spi.PainlessExtension with org.elasticsearch.xpack.ml.MachineLearningPainlessExtension; provides org.elasticsearch.xpack.autoscaling.AutoscalingExtension with org.elasticsearch.xpack.ml.autoscaling.MlAutoscalingExtension; + provides org.elasticsearch.features.FeatureSpecification with org.elasticsearch.xpack.ml.MlFeatures; exports org.elasticsearch.xpack.ml; exports org.elasticsearch.xpack.ml.action; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index f4bce4906c0b0..b4b8084b4b328 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -44,6 +44,7 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.TimeValue; import org.elasticsearch.env.Environment; +import org.elasticsearch.features.NodeFeature; import org.elasticsearch.index.analysis.CharFilterFactory; import org.elasticsearch.index.analysis.TokenizerFactory; import org.elasticsearch.index.query.QueryBuilder; @@ -485,6 +486,8 @@ public class MachineLearning extends Plugin public static final String TRAINED_MODEL_CIRCUIT_BREAKER_NAME = "model_inference"; + public static final NodeFeature STATE_RESET_FALLBACK_ON_DISABLED = new NodeFeature("ml.state_reset_fallback_on_disabled"); + private static final long DEFAULT_MODEL_CIRCUIT_BREAKER_LIMIT = (long) ((0.50) * JvmInfo.jvmInfo().getMem().getHeapMax().getBytes()); private static final double DEFAULT_MODEL_CIRCUIT_BREAKER_OVERHEAD = 1.0D; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlFeatures.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlFeatures.java new file mode 100644 index 0000000000000..29aa189b2acd4 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlFeatures.java @@ -0,0 +1,24 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml; + +import org.elasticsearch.Version; +import org.elasticsearch.features.FeatureSpecification; +import org.elasticsearch.features.NodeFeature; + +import java.util.Map; + +/** + * This class specifies source code features exposed by the Shutdown plugin. + */ +public class MlFeatures implements FeatureSpecification { + @Override + public Map getHistoricalFeatures() { + return Map.of(MachineLearning.STATE_RESET_FALLBACK_ON_DISABLED, Version.V_8_7_0); + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsAction.java index 84372a111c0bd..1c535326b3296 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsAction.java @@ -45,9 +45,11 @@ import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; import org.elasticsearch.xpack.core.ml.inference.TrainedModelType; import org.elasticsearch.xpack.core.ml.inference.assignment.AssignmentStats; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; import org.elasticsearch.xpack.core.ml.inference.persistence.InferenceIndexConstants; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceStats; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TrainedModelSizeStats; +import org.elasticsearch.xpack.core.ml.utils.TransportVersionUtils; import org.elasticsearch.xpack.ml.inference.ModelAliasMetadata; import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.inference.persistence.TrainedModelDefinitionDoc; @@ -296,29 +298,23 @@ private void modelSizeStats( for (TrainedModelConfig model : models) { if (model.getModelType() == TrainedModelType.PYTORCH) { long totalDefinitionLength = pytorchTotalDefinitionLengthsByModelId.getOrDefault(model.getModelId(), 0L); + // We ensure that in the mixed cluster state trained model stats uses the same values for memory estimation + // as the rebalancer. + boolean useNewMemoryFields = TrainedModelAssignment.useNewMemoryFields( + TransportVersionUtils.getMinTransportVersion(clusterService.state()) + ); long estimatedMemoryUsageBytes = totalDefinitionLength > 0L ? StartTrainedModelDeploymentAction.estimateMemoryUsageBytes( model.getModelId(), totalDefinitionLength, - model.getPerDeploymentMemoryBytes(), - model.getPerAllocationMemoryBytes(), + useNewMemoryFields ? model.getPerDeploymentMemoryBytes() : 0, + useNewMemoryFields ? model.getPerAllocationMemoryBytes() : 0, numberOfAllocations ) : 0L; modelSizeStatsByModelId.put( model.getModelId(), - new TrainedModelSizeStats( - totalDefinitionLength, - totalDefinitionLength > 0L - ? StartTrainedModelDeploymentAction.estimateMemoryUsageBytes( - model.getModelId(), - totalDefinitionLength, - model.getPerDeploymentMemoryBytes(), - model.getPerAllocationMemoryBytes(), - numberOfAllocations - ) - : 0L - ) + new TrainedModelSizeStats(totalDefinitionLength, estimatedMemoryUsageBytes) ); } else { modelSizeStatsByModelId.put(model.getModelId(), new TrainedModelSizeStats(model.getModelSize(), 0)); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterService.java index 2caf338d2a3c7..fe4462d6556ee 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterService.java @@ -47,6 +47,7 @@ import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.ml.utils.MlPlatformArchitecturesUtil; +import org.elasticsearch.xpack.core.ml.utils.TransportVersionUtils; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.autoscaling.NodeAvailabilityZoneMapper; import org.elasticsearch.xpack.ml.inference.assignment.planning.AllocationReducer; @@ -76,6 +77,8 @@ public class TrainedModelAssignmentClusterService implements ClusterStateListene private static final TransportVersion RENAME_ALLOCATION_TO_ASSIGNMENT_TRANSPORT_VERSION = TransportVersions.V_8_3_0; public static final TransportVersion DISTRIBUTED_MODEL_ALLOCATION_TRANSPORT_VERSION = TransportVersions.V_8_4_0; + private static final TransportVersion NEW_ALLOCATION_MEMORY_VERSION = TransportVersions.V_8_500_064; + private final ClusterService clusterService; private final ThreadPool threadPool; private final NodeLoadDetector nodeLoadDetector; @@ -644,12 +647,14 @@ private TrainedModelAssignmentMetadata.Builder rebalanceAssignments( Map nodeLoads = detectNodeLoads(nodes, currentState); TrainedModelAssignmentMetadata currentMetadata = TrainedModelAssignmentMetadata.fromState(currentState); + boolean useNewMemoryFields = TrainedModelAssignment.useNewMemoryFields(TransportVersionUtils.getMinTransportVersion(currentState)); TrainedModelAssignmentRebalancer rebalancer = new TrainedModelAssignmentRebalancer( currentMetadata, nodeLoads, nodeAvailabilityZoneMapper.buildMlNodesByAvailabilityZone(currentState), modelToAdd, - allocatedProcessorsScale + allocatedProcessorsScale, + useNewMemoryFields ); Set shuttingDownNodeIds = currentState.metadata().nodeShutdowns().getAllNodeIds(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancer.java index e1241dc8a93c3..6e6b447fcea3d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancer.java @@ -52,18 +52,22 @@ class TrainedModelAssignmentRebalancer { private final Optional deploymentToAdd; private final int allocatedProcessorsScale; + private final boolean useNewMemoryFields; + TrainedModelAssignmentRebalancer( TrainedModelAssignmentMetadata currentMetadata, Map nodeLoads, Map, Collection> mlNodesByZone, Optional deploymentToAdd, - int allocatedProcessorsScale + int allocatedProcessorsScale, + boolean useNewMemoryFields ) { this.currentMetadata = Objects.requireNonNull(currentMetadata); this.nodeLoads = Objects.requireNonNull(nodeLoads); this.mlNodesByZone = Objects.requireNonNull(mlNodesByZone); this.deploymentToAdd = Objects.requireNonNull(deploymentToAdd); this.allocatedProcessorsScale = allocatedProcessorsScale; + this.useNewMemoryFields = useNewMemoryFields; } TrainedModelAssignmentMetadata.Builder rebalance() { @@ -138,9 +142,11 @@ private static void copyAssignments( AssignmentPlan.Node originalNode = originalNodeById.get(assignment.getKey().id()); dest.assignModelToNode(m, originalNode, assignment.getValue()); if (m.currentAllocationsByNodeId().containsKey(originalNode.id())) { + // TODO (#101612) requiredMemory should be calculated by the AssignmentPlan.Builder // As the node has all its available memory we need to manually account memory of models with // current allocations. - dest.accountMemory(m, originalNode); + long requiredMemory = m.estimateMemoryUsageBytes(m.currentAllocationsByNodeId().get(originalNode.id())); + dest.accountMemory(m, originalNode, requiredMemory); } } } @@ -168,11 +174,14 @@ private AssignmentPlan computePlanForNormalPriorityModels( .collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().getTargetAllocations())); return new AssignmentPlan.Deployment( assignment.getDeploymentId(), - assignment.getTaskParams().estimateMemoryUsageBytes(), + assignment.getTaskParams().getModelBytes(), assignment.getTaskParams().getNumberOfAllocations(), assignment.getTaskParams().getThreadsPerAllocation(), currentAssignments, - assignment.getMaxAssignedAllocations() + assignment.getMaxAssignedAllocations(), + // in the mixed cluster state use old memory fields to avoid unstable assignment plans + useNewMemoryFields ? assignment.getTaskParams().getPerDeploymentMemoryBytes() : 0, + useNewMemoryFields ? assignment.getTaskParams().getPerAllocationMemoryBytes() : 0 ); }) .forEach(planDeployments::add); @@ -181,11 +190,14 @@ private AssignmentPlan computePlanForNormalPriorityModels( planDeployments.add( new AssignmentPlan.Deployment( taskParams.getDeploymentId(), - taskParams.estimateMemoryUsageBytes(), + taskParams.getModelBytes(), taskParams.getNumberOfAllocations(), taskParams.getThreadsPerAllocation(), Map.of(), - 0 + 0, + // in the mixed cluster state use old memory fields to avoid unstable assignment plans + useNewMemoryFields ? taskParams.getPerDeploymentMemoryBytes() : 0, + useNewMemoryFields ? taskParams.getPerAllocationMemoryBytes() : 0 ) ); } @@ -217,12 +229,14 @@ private AssignmentPlan computePlanForLowPriorityModels(Set assignableNod .map( assignment -> new AssignmentPlan.Deployment( assignment.getDeploymentId(), - assignment.getTaskParams().estimateMemoryUsageBytes(), + assignment.getTaskParams().getModelBytes(), assignment.getTaskParams().getNumberOfAllocations(), assignment.getTaskParams().getThreadsPerAllocation(), findFittingAssignments(assignment, assignableNodeIds, remainingNodeMemory), assignment.getMaxAssignedAllocations(), - Priority.LOW + Priority.LOW, + (useNewMemoryFields == false) ? assignment.getTaskParams().getPerDeploymentMemoryBytes() : 0, + (useNewMemoryFields == false) ? assignment.getTaskParams().getPerAllocationMemoryBytes() : 0 ) ) .forEach(planDeployments::add); @@ -231,12 +245,14 @@ private AssignmentPlan computePlanForLowPriorityModels(Set assignableNod planDeployments.add( new AssignmentPlan.Deployment( taskParams.getDeploymentId(), - taskParams.estimateMemoryUsageBytes(), + taskParams.getModelBytes(), taskParams.getNumberOfAllocations(), taskParams.getThreadsPerAllocation(), Map.of(), 0, - Priority.LOW + Priority.LOW, + (useNewMemoryFields == false) ? taskParams.getPerDeploymentMemoryBytes() : 0, + (useNewMemoryFields == false) ? taskParams.getPerAllocationMemoryBytes() : 0 ) ); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AbstractPreserveAllocations.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AbstractPreserveAllocations.java index 4843cc43d1187..026b433a8c2d4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AbstractPreserveAllocations.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AbstractPreserveAllocations.java @@ -35,7 +35,8 @@ private Node modifyNodePreservingAllocations(Node n) { int coresUsed = 0; for (Deployment m : deployments) { if (m.currentAllocationsByNodeId().containsKey(n.id())) { - bytesUsed += m.memoryBytes(); + int allocations = m.currentAllocationsByNodeId().get(n.id()); + bytesUsed += m.estimateMemoryUsageBytes(allocations); coresUsed += calculateUsedCores(n, m); } } @@ -58,7 +59,9 @@ Deployment modifyModelPreservingPreviousAssignments(Deployment m) { m.allocations() - calculatePreservedAllocations(m), m.threadsPerAllocation(), calculateAllocationsPerNodeToPreserve(m), - m.maxAssignedAllocations() + m.maxAssignedAllocations(), + m.perDeploymentMemoryBytes(), + m.perAllocationMemoryBytes() ); } @@ -67,28 +70,37 @@ AssignmentPlan mergePreservedAllocations(AssignmentPlan assignmentPlan) { // they will not match the models/nodes members we have in this class. // Therefore, we build a lookup table based on the ids so we can merge the plan // with its preserved allocations. - final Map, Integer> assignmentsByModelNodeIdPair = new HashMap<>(); + final Map, Integer> plannedAssignmentsByModelNodeIdPair = new HashMap<>(); for (Deployment m : assignmentPlan.models()) { Map assignments = assignmentPlan.assignments(m).orElse(Map.of()); for (Map.Entry nodeAssignment : assignments.entrySet()) { - assignmentsByModelNodeIdPair.put(Tuple.tuple(m.id(), nodeAssignment.getKey().id()), nodeAssignment.getValue()); + plannedAssignmentsByModelNodeIdPair.put(Tuple.tuple(m.id(), nodeAssignment.getKey().id()), nodeAssignment.getValue()); } } AssignmentPlan.Builder mergedPlanBuilder = AssignmentPlan.builder(nodes, deployments); - for (Deployment m : deployments) { - for (Node n : nodes) { - int allocations = assignmentsByModelNodeIdPair.getOrDefault(Tuple.tuple(m.id(), n.id()), 0); - if (m.currentAllocationsByNodeId().containsKey(n.id())) { - if (mergedPlanBuilder.getRemainingMemory(n) >= m.memoryBytes()) { - allocations += addPreservedAllocations(n, m); - // As the node has all its available memory we need to manually account memory of models with - // current allocations. - mergedPlanBuilder.accountMemory(m, n); + for (Node n : nodes) { + // TODO (#101612) Should the first loop happen in the builder constructor? + for (Deployment deploymentAllocationsToPreserve : deployments) { + + // if the model m is already allocated on the node n and I want to preserve this allocation + int preservedAllocations = addPreservedAllocations(n, deploymentAllocationsToPreserve); + if (preservedAllocations > 0) { + long requiredMemory = deploymentAllocationsToPreserve.estimateMemoryUsageBytes(preservedAllocations); + if (mergedPlanBuilder.canAssign(deploymentAllocationsToPreserve, n, preservedAllocations, requiredMemory)) { + mergedPlanBuilder.assignModelToNode(deploymentAllocationsToPreserve, n, preservedAllocations, requiredMemory); } } - if (allocations > 0) { - mergedPlanBuilder.assignModelToNode(m, n, allocations); + } + for (Deployment deploymentNewAllocations : deployments) { + int newAllocations = plannedAssignmentsByModelNodeIdPair.getOrDefault( + Tuple.tuple(deploymentNewAllocations.id(), n.id()), + 0 + ); + + long requiredMemory = mergedPlanBuilder.getDeploymentMemoryRequirement(deploymentNewAllocations, n, newAllocations); + if (newAllocations > 0 && mergedPlanBuilder.canAssign(deploymentNewAllocations, n, newAllocations, requiredMemory)) { + mergedPlanBuilder.assignModelToNode(deploymentNewAllocations, n, newAllocations); } } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlan.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlan.java index 72a83d7579463..1dce7f0bb46ba 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlan.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlan.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.Maps; import org.elasticsearch.core.Tuple; +import org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction; import org.elasticsearch.xpack.core.ml.inference.assignment.Priority; import java.util.ArrayList; @@ -36,18 +37,32 @@ public record Deployment( int threadsPerAllocation, Map currentAllocationsByNodeId, int maxAssignedAllocations, - Priority priority + Priority priority, + long perDeploymentMemoryBytes, + long perAllocationMemoryBytes ) { public Deployment( String id, - long memoryBytes, + long modelBytes, int allocations, int threadsPerAllocation, Map currentAllocationsByNodeId, - int maxAssignedAllocations + int maxAssignedAllocations, + long perDeploymentMemoryBytes, + long perAllocationMemoryBytes ) { - this(id, memoryBytes, allocations, threadsPerAllocation, currentAllocationsByNodeId, maxAssignedAllocations, Priority.NORMAL); + this( + id, + modelBytes, + allocations, + threadsPerAllocation, + currentAllocationsByNodeId, + maxAssignedAllocations, + Priority.NORMAL, + perDeploymentMemoryBytes, + perAllocationMemoryBytes + ); } int getCurrentAssignedAllocations() { @@ -58,6 +73,60 @@ boolean hasEverBeenAllocated() { return maxAssignedAllocations > 0; } + public long estimateMemoryUsageBytes(int allocations) { + return StartTrainedModelDeploymentAction.estimateMemoryUsageBytes( + id, + memoryBytes, + perDeploymentMemoryBytes, + perAllocationMemoryBytes, + allocations + ); + } + + long estimateAdditionalMemoryUsageBytes(int allocationsOld, int allocationsNew) { + return StartTrainedModelDeploymentAction.estimateMemoryUsageBytes( + id, + memoryBytes, + perDeploymentMemoryBytes, + perAllocationMemoryBytes, + allocationsNew + ) - StartTrainedModelDeploymentAction.estimateMemoryUsageBytes( + id, + memoryBytes, + perDeploymentMemoryBytes, + perAllocationMemoryBytes, + allocationsOld + ); + + } + + long minimumMemoryRequiredBytes() { + return StartTrainedModelDeploymentAction.estimateMemoryUsageBytes( + id, + memoryBytes, + perDeploymentMemoryBytes, + perAllocationMemoryBytes, + 1 + ); + } + + int findOptimalAllocations(int maxAllocations, long availableMemoryBytes) { + if (perDeploymentMemoryBytes > 0 && perAllocationMemoryBytes > 0) { + return (int) Math.max( + Math.min(maxAllocations, Math.floorDiv(availableMemoryBytes - estimateMemoryUsageBytes(0), perAllocationMemoryBytes)), + 0 + ); + } + return maxAllocations; + } + + int findExcessAllocations(int maxAllocations, long availableMemoryBytes) { + if (perDeploymentMemoryBytes > 0 && perAllocationMemoryBytes > 0) { + return (int) Math.min(maxAllocations, Math.floorDiv(availableMemoryBytes, perAllocationMemoryBytes)); + } + return maxAllocations; + } + @Override public String toString() { return id @@ -71,6 +140,8 @@ public String toString() { + currentAllocationsByNodeId + ") (max_assigned_allocations = " + maxAssignedAllocations + + ") (memory_usage = " + + ByteSizeValue.ofBytes(estimateMemoryUsageBytes(allocations)) + ")"; } }; @@ -304,19 +375,42 @@ int getRemainingAllocations(Deployment m) { } boolean canAssign(Deployment deployment, Node node, int allocations) { - return (isAlreadyAssigned(deployment, node) - || (deployment.memoryBytes() <= remainingNodeMemory.get(node)) - && (deployment.priority == Priority.LOW - || allocations * deployment.threadsPerAllocation() <= remainingNodeCores.get(node))); + long requiredMemory = getDeploymentMemoryRequirement(deployment, node, allocations); + return canAssign(deployment, node, allocations, requiredMemory); + } + + boolean canAssign(Deployment deployment, Node node, int allocations, long requiredMemory) { + return (requiredMemory <= remainingNodeMemory.get(node)) + && (deployment.priority == Priority.LOW || allocations * deployment.threadsPerAllocation() <= remainingNodeCores.get(node)); + } + + public long getDeploymentMemoryRequirement(Deployment deployment, Node node, int newAllocations) { + int assignedAllocations = getAssignedAllocations(deployment, node); + + if (assignedAllocations > 0) { + return deployment.estimateAdditionalMemoryUsageBytes(assignedAllocations, assignedAllocations + newAllocations); + } + return deployment.estimateMemoryUsageBytes(newAllocations); } public Builder assignModelToNode(Deployment deployment, Node node, int allocations) { + return assignModelToNode(deployment, node, allocations, getDeploymentMemoryRequirement(deployment, node, allocations)); + } + + public Builder assignModelToNode(Deployment deployment, Node node, int allocations, long requiredMemory) { if (allocations <= 0) { return this; } - if (isAlreadyAssigned(deployment, node) == false && deployment.memoryBytes() > remainingNodeMemory.get(node)) { + if (/*isAlreadyAssigned(deployment, node) == false + &&*/ requiredMemory > remainingNodeMemory.get(node)) { throw new IllegalArgumentException( - "not enough memory on node [" + node.id() + "] to assign model [" + deployment.id() + "]" + "not enough memory on node [" + + node.id() + + "] to assign [" + + allocations + + "] allocations to deployment [" + + deployment.id() + + "]" ); } if (deployment.priority == Priority.NORMAL && allocations * deployment.threadsPerAllocation() > remainingNodeCores.get(node)) { @@ -333,9 +427,9 @@ public Builder assignModelToNode(Deployment deployment, Node node, int allocatio ); } - long additionalModelMemory = isAlreadyAssigned(deployment, node) ? 0 : deployment.memoryBytes; assignments.get(deployment).compute(node, (n, remAllocations) -> remAllocations + allocations); - remainingNodeMemory.compute(node, (n, remMemory) -> remMemory - additionalModelMemory); + accountMemory(deployment, node, requiredMemory); + if (deployment.priority == Priority.NORMAL) { remainingNodeCores.compute(node, (n, remCores) -> remCores - allocations * deployment.threadsPerAllocation()); } @@ -347,9 +441,26 @@ private boolean isAlreadyAssigned(Deployment deployment, Node node) { return deployment.currentAllocationsByNodeId().containsKey(node.id()) || assignments.get(deployment).get(node) > 0; } + private int getAssignedAllocations(Deployment deployment, Node node) { + int currentAllocations = getCurrentAllocations(deployment, node); + int assignmentAllocations = assignments.get(deployment).get(node); + return currentAllocations + assignmentAllocations; + } + + private static int getCurrentAllocations(Deployment m, Node n) { + return m.currentAllocationsByNodeId.containsKey(n.id()) ? m.currentAllocationsByNodeId.get(n.id()) : 0; + } + public void accountMemory(Deployment m, Node n) { - remainingNodeMemory.computeIfPresent(n, (k, v) -> v - m.memoryBytes()); - if (remainingNodeMemory.get(n) < 0) { + // TODO (#101612) remove or refactor unused method + long requiredMemory = getDeploymentMemoryRequirement(m, n, getCurrentAllocations(m, n)); + accountMemory(m, n, requiredMemory); + } + + public void accountMemory(Deployment m, Node n, long requiredMemory) { + // TODO (#101612) computation of required memory should be done internally + remainingNodeMemory.computeIfPresent(n, (k, v) -> v - requiredMemory); + if (remainingNodeMemory.containsKey(n) && remainingNodeMemory.get(n) < 0) { throw new IllegalArgumentException("not enough memory on node [" + n.id() + "] to assign model [" + m.id() + "]"); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanner.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanner.java index 73b713cced32a..b1c017b1a784c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanner.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanner.java @@ -115,8 +115,11 @@ private AssignmentPlan solveAllocatingAtLeastOnceModelsThatWerePreviouslyAllocat m.memoryBytes(), 1, m.threadsPerAllocation(), - m.currentAllocationsByNodeId(), - m.maxAssignedAllocations() + // don't rely on the current allocation + new HashMap<>(), + m.maxAssignedAllocations(), + m.perDeploymentMemoryBytes(), + m.perAllocationMemoryBytes() ) ) .toList(); @@ -145,7 +148,9 @@ private AssignmentPlan solveAllocatingAtLeastOnceModelsThatWerePreviouslyAllocat m.allocations(), m.threadsPerAllocation(), currentAllocationsByNodeId, - m.maxAssignedAllocations() + m.maxAssignedAllocations(), + m.perDeploymentMemoryBytes(), + m.perAllocationMemoryBytes() ); }).toList(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/LinearProgrammingPlanSolver.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/LinearProgrammingPlanSolver.java index 90c5a2257d94d..bd97680e285cc 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/LinearProgrammingPlanSolver.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/LinearProgrammingPlanSolver.java @@ -68,6 +68,8 @@ class LinearProgrammingPlanSolver { private final Map normalizedMemoryPerNode; private final Map coresPerNode; private final Map normalizedMemoryPerModel; + private final Map normalizedMemoryPerAllocation; + private final Map normalizedMinimumDeploymentMemoryRequired; private final int maxNodeCores; private final long maxModelMemoryBytes; @@ -84,12 +86,17 @@ class LinearProgrammingPlanSolver { .filter(m -> m.threadsPerAllocation() <= maxNodeCores) .toList(); - maxModelMemoryBytes = this.deployments.stream().map(AssignmentPlan.Deployment::memoryBytes).max(Long::compareTo).orElse(1L); + // We use the maximum memory to deploy a model with one allocation as the normalization factor. + maxModelMemoryBytes = this.deployments.stream().map(m -> m.minimumMemoryRequiredBytes()).max(Long::compareTo).orElse(1L); normalizedMemoryPerNode = this.nodes.stream() .collect(Collectors.toMap(Function.identity(), n -> n.availableMemoryBytes() / (double) maxModelMemoryBytes)); coresPerNode = this.nodes.stream().collect(Collectors.toMap(Function.identity(), Node::cores)); normalizedMemoryPerModel = this.deployments.stream() - .collect(Collectors.toMap(Function.identity(), m -> m.memoryBytes() / (double) maxModelMemoryBytes)); + .collect(Collectors.toMap(Function.identity(), m -> m.estimateMemoryUsageBytes(0) / (double) maxModelMemoryBytes)); + normalizedMemoryPerAllocation = this.deployments.stream() + .collect(Collectors.toMap(Function.identity(), m -> m.perAllocationMemoryBytes() / (double) maxModelMemoryBytes)); + normalizedMinimumDeploymentMemoryRequired = this.deployments.stream() + .collect(Collectors.toMap(Function.identity(), m -> m.minimumMemoryRequiredBytes() / (double) maxModelMemoryBytes)); } AssignmentPlan solvePlan(boolean useBinPackingOnly) { @@ -133,8 +140,8 @@ private double weightForAllocationVar( Node n, Map, Double> weights ) { - return (1 + weights.get(Tuple.tuple(m, n)) - (m.memoryBytes() > n.availableMemoryBytes() ? 10 : 0)) - L1 * normalizedMemoryPerModel - .get(m) / maxNodeCores; + return (1 + weights.get(Tuple.tuple(m, n)) - (m.minimumMemoryRequiredBytes() > n.availableMemoryBytes() ? 10 : 0)) - L1 + * normalizedMemoryPerModel.get(m) / maxNodeCores; } private Tuple, Double>, AssignmentPlan> calculateWeightsAndBinPackingPlan() { @@ -156,9 +163,9 @@ private Tuple, Double>, AssignmentPlan> calculateWei .sorted(Comparator.comparingDouble(n -> descendingSizeAnyFitsNodeOrder(n, m, assignmentPlan))) .toList(); for (Node n : orderedNodes) { - int allocations = Math.min( - assignmentPlan.getRemainingCores(n) / m.threadsPerAllocation(), - assignmentPlan.getRemainingAllocations(m) + int allocations = m.findOptimalAllocations( + Math.min(assignmentPlan.getRemainingCores(n) / m.threadsPerAllocation(), assignmentPlan.getRemainingAllocations(m)), + assignmentPlan.getRemainingMemory(n) ); if (allocations > 0 && assignmentPlan.canAssign(m, n, allocations)) { assignmentPlan.assignModelToNode(m, n, allocations); @@ -185,7 +192,8 @@ private Tuple, Double>, AssignmentPlan> calculateWei } private double descendingSizeAnyFitsModelOrder(AssignmentPlan.Deployment m) { - return (m.currentAllocationsByNodeId().isEmpty() ? 1 : 2) * -normalizedMemoryPerModel.get(m) * m.threadsPerAllocation(); + return (m.currentAllocationsByNodeId().isEmpty() ? 1 : 2) * -normalizedMinimumDeploymentMemoryRequired.get(m) * m + .threadsPerAllocation(); } private double descendingSizeAnyFitsNodeOrder(Node n, AssignmentPlan.Deployment m, AssignmentPlan.Builder assignmentPlan) { @@ -307,7 +315,10 @@ private boolean solveLinearProgram( List modelMemories = new ArrayList<>(); deployments.stream().filter(m -> m.currentAllocationsByNodeId().containsKey(n.id()) == false).forEach(m -> { allocations.add(allocationVars.get(Tuple.tuple(m, n))); - modelMemories.add(normalizedMemoryPerModel.get(m) * m.threadsPerAllocation() / (double) coresPerNode.get(n)); + modelMemories.add( + (normalizedMemoryPerModel.get(m) / (double) coresPerNode.get(n) + normalizedMemoryPerAllocation.get(m)) * m + .threadsPerAllocation() + ); }); model.addExpression("used_memory_on_node_" + n.id() + "_not_more_than_available") .upper(normalizedMemoryPerNode.get(n)) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveAllAllocations.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveAllAllocations.java index f10ece8f5a593..72109941ad477 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveAllAllocations.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveAllAllocations.java @@ -37,6 +37,6 @@ protected int calculatePreservedAllocations(Deployment m) { @Override protected int addPreservedAllocations(Node n, Deployment m) { - return m.currentAllocationsByNodeId().get(n.id()); + return m.currentAllocationsByNodeId().containsKey(n.id()) ? m.currentAllocationsByNodeId().get(n.id()) : 0; } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveOneAllocation.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveOneAllocation.java index 324e1a8d69a53..43b8860803596 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveOneAllocation.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveOneAllocation.java @@ -37,6 +37,6 @@ protected int calculatePreservedAllocations(AssignmentPlan.Deployment m) { @Override protected int addPreservedAllocations(Node n, AssignmentPlan.Deployment m) { - return 1; + return m.currentAllocationsByNodeId().containsKey(n.id()) ? 1 : 0; } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/RandomizedAssignmentRounding.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/RandomizedAssignmentRounding.java index dafc07099f850..8bdc99998a0c2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/RandomizedAssignmentRounding.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/RandomizedAssignmentRounding.java @@ -135,8 +135,9 @@ private void assignUnderSubscribedNodes(Collection nodeSelection) { for (AssignmentPlan.Deployment m : deployments) { Tuple assignment = Tuple.tuple(m, n); if (assignments.get(assignment) > 0) { - totalModelMemory += m.memoryBytes(); - maxTotalThreads += (int) Math.ceil(allocations.get(assignment)) * m.threadsPerAllocation(); + int roundedAllocations = (int) Math.ceil(allocations.get(assignment)); + totalModelMemory += m.estimateMemoryUsageBytes(roundedAllocations); + maxTotalThreads += roundedAllocations * m.threadsPerAllocation(); assignedDeployments.add(m); } } @@ -199,9 +200,12 @@ private void assignExcessCores(Node n) { if (resourceTracker.remainingNodeCores.get(n) <= 0) { break; } - int extraAllocations = Math.min( - resourceTracker.remainingNodeCores.get(n) / m.threadsPerAllocation(), - resourceTracker.remainingModelAllocations.get(m) + int extraAllocations = m.findExcessAllocations( + Math.min( + resourceTracker.remainingNodeCores.get(n) / m.threadsPerAllocation(), + resourceTracker.remainingModelAllocations.get(m) + ), + resourceTracker.remainingNodeMemory.get(n) ); allocations.compute(Tuple.tuple(m, n), (k, v) -> v + extraAllocations); resourceTracker.assign(m, n, extraAllocations); @@ -211,7 +215,7 @@ private void assignExcessCores(Node n) { } private static double remainingModelOrder(AssignmentPlan.Deployment m) { - return (m.currentAllocationsByNodeId().isEmpty() ? 1 : 2) * -m.memoryBytes(); + return (m.currentAllocationsByNodeId().isEmpty() ? 1 : 2) * -m.minimumMemoryRequiredBytes(); } private boolean hasSoftAssignments(Node n) { @@ -275,15 +279,17 @@ private void doRandomizedRounding(List> s int roundedAllocations = random.nextDouble() < roundUpProbability ? (int) Math.ceil(allocations.get(assignment)) : (int) Math.floor(allocations.get(assignment)); - - if (m.memoryBytes() > resourceTracker.remainingNodeMemory.get(n) + if (m.estimateMemoryUsageBytes(roundedAllocations) > resourceTracker.remainingNodeMemory.get(n) || m.threadsPerAllocation() > resourceTracker.remainingNodeCores.get(n) || roundedAllocations == 0 || random.nextDouble() > assignments.get(assignment)) { unassign(assignment); assignUnderSubscribedNodes(Set.of(n)); } else { - roundedAllocations = Math.min(roundedAllocations, resourceTracker.remainingNodeCores.get(n) / m.threadsPerAllocation()); + roundedAllocations = m.findOptimalAllocations( + Math.min(roundedAllocations, resourceTracker.remainingNodeCores.get(n) / m.threadsPerAllocation()), + resourceTracker.remainingNodeMemory.get(n) + ); assignModelToNode(m, n, roundedAllocations); unassignOversizedModels(n); assignExcessCores(n); @@ -294,7 +300,8 @@ private void doRandomizedRounding(List> s private void unassignOversizedModels(Node n) { for (AssignmentPlan.Deployment m : deployments) { Tuple assignment = Tuple.tuple(m, n); - if (assignments.get(assignment) < 1.0 && m.memoryBytes() > resourceTracker.remainingNodeMemory.get(n)) { + int roundedAllocations = (int) Math.ceil(allocations.get(assignment)); + if (assignments.get(assignment) < 1.0 && m.minimumMemoryRequiredBytes() > resourceTracker.remainingNodeMemory.get(n)) { unassign(assignment); } } @@ -303,7 +310,11 @@ private void unassignOversizedModels(Node n) { private AssignmentPlan toPlan() { AssignmentPlan.Builder builder = AssignmentPlan.builder(nodes, deployments); for (Map.Entry, Integer> assignment : tryAssigningRemainingCores().entrySet()) { - builder.assignModelToNode(assignment.getKey().v1(), assignment.getKey().v2(), assignment.getValue()); + // TODO (#101612) The model should be assigned to the node only when it is possible. This means, that canAssign should be + // integrated into the assignModelToNode. + if (builder.canAssign(assignment.getKey().v1(), assignment.getKey().v2(), assignment.getValue())) { + builder.assignModelToNode(assignment.getKey().v1(), assignment.getKey().v2(), assignment.getValue()); + } } return builder.build(); } @@ -338,7 +349,7 @@ private Map, Integer> tryAssigningRemaini .toList()) { for (Node n : nodes.stream() .filter( - n -> resourceTracker.remainingNodeMemory.get(n) >= m.memoryBytes() + n -> resourceTracker.remainingNodeMemory.get(n) >= m.minimumMemoryRequiredBytes() && resourceTracker.remainingNodeCores.get(n) >= m.threadsPerAllocation() && resultAllocations.get(Tuple.tuple(m, n)) == 0 ) @@ -354,10 +365,15 @@ private Map, Integer> tryAssigningRemaini ) ) .toList()) { - int assigningAllocations = Math.min( resourceTracker.remainingNodeCores.get(n) / m.threadsPerAllocation(), - resourceTracker.remainingModelAllocations.get(m) + Math.min( + resourceTracker.remainingModelAllocations.get(m), + m.findOptimalAllocations( + resourceTracker.remainingNodeCores.get(n) / m.threadsPerAllocation(), + resourceTracker.remainingModelAllocations.get(m) + ) + ) ); resourceTracker.assign(m, n, assigningAllocations); resultAllocations.put(Tuple.tuple(m, n), assigningAllocations); @@ -427,7 +443,7 @@ private static class ResourceTracker { void assign(AssignmentPlan.Deployment m, Node n, int allocations) { if (assignments.contains(Tuple.tuple(m, n)) == false) { assignments.add(Tuple.tuple(m, n)); - remainingNodeMemory.compute(n, (k, v) -> v - m.memoryBytes()); + remainingNodeMemory.compute(n, (k, v) -> v - m.estimateMemoryUsageBytes(allocations)); } remainingNodeCores.compute(n, (k, v) -> v - allocations * m.threadsPerAllocation()); remainingModelAllocations.compute(m, (k, v) -> v - allocations); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/ZoneAwareAssignmentPlanner.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/ZoneAwareAssignmentPlanner.java index 9870aa93bf6ce..8c9499ca9e00c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/ZoneAwareAssignmentPlanner.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/planning/ZoneAwareAssignmentPlanner.java @@ -126,10 +126,12 @@ private AssignmentPlan computeZonePlan( modelIdToTargetAllocations.get(m.id()), m.threadsPerAllocation(), m.currentAllocationsByNodeId(), - // Only force assigning at least once previously assigned models that have not had any allocation yet (tryAssigningPreviouslyAssignedModels && modelIdToRemainingAllocations.get(m.id()) == m.allocations()) ? m.maxAssignedAllocations() - : 0 + : 0, + // Only force assigning at least once previously assigned models that have not had any allocation yet + m.perDeploymentMemoryBytes(), + m.perAllocationMemoryBytes() ) ) .toList(); @@ -151,7 +153,9 @@ private AssignmentPlan computePlanAcrossAllNodes(List plans) { m.allocations(), m.threadsPerAllocation(), allocationsByNodeIdByModelId.get(m.id()), - m.maxAssignedAllocations() + m.maxAssignedAllocations(), + m.perDeploymentMemoryBytes(), + m.perAllocationMemoryBytes() ) ) .toList(); @@ -180,9 +184,13 @@ private AssignmentPlan swapOriginalModelsInPlan( Node originalNode = originalNodeById.get(assignment.getKey().id()); planBuilder.assignModelToNode(originalDeployment, originalNode, assignment.getValue()); if (originalDeployment.currentAllocationsByNodeId().containsKey(originalNode.id())) { + // TODO (#101612) requiredMemory should be calculated by the AssignmentPlan.Builder // As the node has all its available memory we need to manually account memory of models with // current allocations. - planBuilder.accountMemory(m, originalNode); + long requiredMemory = originalDeployment.estimateMemoryUsageBytes( + originalDeployment.currentAllocationsByNodeId().get(originalNode.id()) + ); + planBuilder.accountMemory(m, originalNode, requiredMemory); } } } diff --git a/x-pack/plugin/ml/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification b/x-pack/plugin/ml/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification new file mode 100644 index 0000000000000..7dbef291bdd46 --- /dev/null +++ b/x-pack/plugin/ml/src/main/resources/META-INF/services/org.elasticsearch.features.FeatureSpecification @@ -0,0 +1,8 @@ +# +# Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +# or more contributor license agreements. Licensed under the Elastic License +# 2.0; you may not use this file except in compliance with the Elastic License +# 2.0. +# + +org.elasticsearch.xpack.ml.MlFeatures diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancerTests.java index 8ccf8839cfc08..334fdfbb8b922 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancerTests.java @@ -44,7 +44,8 @@ public void testRebalance_GivenNoAssignments() { Map.of(), Map.of(), Optional.empty(), - 1 + 1, + false ).rebalance().build(); assertThat(result.allAssignments().isEmpty(), is(true)); } @@ -78,7 +79,8 @@ public void testRebalance_GivenAllAssignmentsAreSatisfied_ShouldMakeNoChanges() nodeLoads, Map.of(), Optional.empty(), - 1 + 1, + false ).rebalance().build(); assertThat(currentMetadata, equalTo(result)); @@ -116,7 +118,8 @@ public void testRebalance_GivenAllAssignmentsAreSatisfied_GivenOutdatedRoutingEn nodeLoads, Map.of(List.of(), List.of(node1, node2)), Optional.empty(), - 1 + 1, + false ).rebalance().build(); assertThat(result.allAssignments(), is(aMapWithSize(2))); @@ -140,7 +143,7 @@ public void testRebalance_GivenModelToAddAlreadyExists() { .build(); expectThrows( ResourceAlreadyExistsException.class, - () -> new TrainedModelAssignmentRebalancer(currentMetadata, Map.of(), Map.of(), Optional.of(taskParams), 1).rebalance() + () -> new TrainedModelAssignmentRebalancer(currentMetadata, Map.of(), Map.of(), Optional.of(taskParams), 1, false).rebalance() ); } @@ -154,7 +157,8 @@ public void testRebalance_GivenFirstModelToAdd_NoMLNodes() throws Exception { Map.of(), Map.of(), Optional.of(taskParams), - 1 + 1, + false ).rebalance().build(); TrainedModelAssignment assignment = result.getDeploymentAssignment(modelId); @@ -181,7 +185,8 @@ public void testRebalance_GivenFirstModelToAdd_NotEnoughProcessors() throws Exce nodeLoads, Map.of(List.of(), List.of(node)), Optional.of(taskParams), - 1 + 1, + false ).rebalance().build(); TrainedModelAssignment assignment = result.getDeploymentAssignment(modelId); @@ -217,7 +222,8 @@ public void testRebalance_GivenFirstModelToAdd_NotEnoughMemory() throws Exceptio nodeLoads, Map.of(), Optional.of(taskParams), - 1 + 1, + false ).rebalance().build(); TrainedModelAssignment assignment = result.getDeploymentAssignment(modelId); @@ -253,7 +259,8 @@ public void testRebalance_GivenFirstModelToAdd_ErrorDetectingNodeLoad() throws E nodeLoads, Map.of(), Optional.of(taskParams), - 1 + 1, + false ).rebalance().build(); TrainedModelAssignment assignment = result.getDeploymentAssignment(modelId); @@ -289,7 +296,8 @@ public void testRebalance_GivenProblemsOnMultipleNodes() throws Exception { nodeLoads, Map.of(List.of(), List.of(node1, node2)), Optional.of(taskParams), - 1 + 1, + false ).rebalance().build(); TrainedModelAssignment assignment = result.getDeploymentAssignment(modelId); @@ -322,7 +330,8 @@ public void testRebalance_GivenFirstModelToAdd_FitsFully() throws Exception { nodeLoads, Map.of(List.of(), List.of(node1)), Optional.of(taskParams), - 1 + 1, + false ).rebalance().build(); TrainedModelAssignment assignment = result.getDeploymentAssignment(modelId); @@ -361,7 +370,8 @@ public void testRebalance_GivenModelToAdd_AndPreviousAssignments_AndTwoNodes_All nodeLoads, Map.of(List.of(), List.of(node1, node2)), Optional.of(taskParams), - 1 + 1, + false ).rebalance().build(); assertThat(result.allAssignments(), is(aMapWithSize(2))); @@ -425,7 +435,8 @@ public void testRebalance_GivenPreviousAssignments_AndNewNode() throws Exception nodeLoads, Map.of(List.of(), List.of(node1, node2, node3)), Optional.empty(), - 1 + 1, + false ).rebalance().build(); assertThat(result.allAssignments(), is(aMapWithSize(2))); @@ -489,7 +500,8 @@ public void testRebalance_GivenPreviousAssignments_AndRemovedNode_AndRemainingNo nodeLoads, Map.of(List.of(), List.of(node1)), Optional.empty(), - 1 + 1, + false ).rebalance().build(); assertThat(result.allAssignments(), is(aMapWithSize(2))); @@ -559,7 +571,8 @@ public void testRebalance_GivenPreviousAssignments_AndRemovedNode_AndRemainingNo nodeLoads, Map.of(List.of(), List.of(node1)), Optional.empty(), - 1 + 1, + false ).rebalance().build(); assertThat(result.allAssignments(), is(aMapWithSize(2))); @@ -608,7 +621,8 @@ public void testRebalance_GivenFailedAssignment_RestartsAssignment() throws Exce nodeLoads, Map.of(List.of(), List.of(node1)), Optional.empty(), - 1 + 1, + false ).rebalance().build(); assertThat(result.allAssignments(), is(aMapWithSize(1))); @@ -642,7 +656,8 @@ public void testRebalance_GivenLowPriorityModelToAdd_OnlyModel_NotEnoughMemory() nodeLoads, Map.of(), Optional.of(taskParams), - 1 + 1, + false ).rebalance().build(); TrainedModelAssignment assignment = result.getDeploymentAssignment(deploymentId); @@ -658,8 +673,8 @@ public void testRebalance_GivenLowPriorityModelToAdd_OnlyModel_NotEnoughMemory() public void testRebalance_GivenLowPriorityModelToAdd_NotEnoughMemoryNorProcessors() throws Exception { long nodeMemoryBytes = ByteSizeValue.ofGb(1).getBytes(); - DiscoveryNode node1 = buildNode("node-1", nodeMemoryBytes, 1); - DiscoveryNode node2 = buildNode("node-2", nodeMemoryBytes, 1); + DiscoveryNode node1 = buildNode("node-1", nodeMemoryBytes, 8); + DiscoveryNode node2 = buildNode("node-2", nodeMemoryBytes, 8); Map nodeLoads = new HashMap<>(); nodeLoads.put(node1, NodeLoad.builder("node-1").setMaxMemory(nodeMemoryBytes).build()); @@ -688,7 +703,8 @@ public void testRebalance_GivenLowPriorityModelToAdd_NotEnoughMemoryNorProcessor nodeLoads, Map.of(List.of("zone-1"), List.of(node1), List.of("zone-2"), List.of(node2)), Optional.of(taskParams1), - 1 + 1, + false ).rebalance().build(); TrainedModelAssignment assignment = result.getDeploymentAssignment(deployment1); @@ -727,7 +743,8 @@ public void testRebalance_GivenMixedPriorityModels_NotEnoughMemoryForLowPriority nodeLoads, Map.of(List.of(), List.of(node1)), Optional.empty(), - 1 + 1, + false ).rebalance().build(); { @@ -780,7 +797,8 @@ public void testRebalance_GivenMixedPriorityModels_TwoZones_EachNodeCanHoldOneMo nodeLoads, Map.of(List.of("zone-1"), List.of(node1), List.of("zone-2"), List.of(node2)), Optional.empty(), - 1 + 1, + false ).rebalance().build(); List assignedNodes = new ArrayList<>(); @@ -834,7 +852,8 @@ public void testRebalance_GivenModelUsingAllCpu_FittingLowPriorityModelCanStart( nodeLoads, Map.of(List.of(), List.of(node1)), Optional.empty(), - 1 + 1, + false ).rebalance().build(); { @@ -884,7 +903,8 @@ public void testRebalance_GivenMultipleLowPriorityModels_AndMultipleNodes() thro nodeLoads, Map.of(List.of(), List.of(node1, node2)), Optional.empty(), - 1 + 1, + false ).rebalance().build(); { @@ -934,7 +954,8 @@ public void testRebalance_GivenNormalPriorityModelToLoad_EvictsLowPriorityModel( nodeLoads, Map.of(List.of(), List.of(node1)), Optional.of(taskParams2), - 1 + 1, + false ).rebalance().build(); { @@ -986,7 +1007,8 @@ public void testRebalance_GivenNormalPriorityModelToLoad_AndLowPriorityModelCanS nodeLoads, Map.of(List.of(), List.of(node1, node2)), Optional.of(taskParams2), - 1 + 1, + false ).rebalance().build(); { @@ -1038,7 +1060,8 @@ public void testRebalance_GivenNormalPriorityModelToLoad_AndLowPriorityModelMust nodeLoads, Map.of(List.of(), List.of(node1, node2)), Optional.of(taskParams2), - 1 + 1, + false ).rebalance().build(); { @@ -1084,7 +1107,8 @@ public void testRebalance_GivenFirstModelToAdd_GivenScalingProcessorSetting() { nodeLoads, Map.of(List.of(), List.of(node)), Optional.of(taskParams), - 2 + 2, + false ).rebalance().build(); TrainedModelAssignment assignment = result.getDeploymentAssignment(modelId); @@ -1106,7 +1130,8 @@ public void testRebalance_GivenFirstModelToAdd_GivenScalingProcessorSetting() { nodeLoads, Map.of(List.of(), List.of(node)), Optional.of(taskParams), - 1 + 1, + false ).rebalance().build(); assignment = result.getDeploymentAssignment(modelId); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanTests.java index 3ecdd5000ba35..cbbb38f1d1ddd 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlanTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.ml.inference.assignment.planning; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.ml.inference.assignment.planning.AssignmentPlan.Deployment; import org.elasticsearch.xpack.ml.inference.assignment.planning.AssignmentPlan.Node; @@ -24,109 +25,248 @@ public class AssignmentPlanTests extends ESTestCase { public void testBuilderCtor_GivenDuplicateNode() { Node n = new Node("n_1", 100, 4); - AssignmentPlan.Deployment m = new AssignmentPlan.Deployment("m_1", 40, 1, 2, Map.of(), 0); + AssignmentPlan.Deployment m = new AssignmentPlan.Deployment("m_1", 40, 1, 2, Map.of(), 0, 0, 0); expectThrows(IllegalArgumentException.class, () -> AssignmentPlan.builder(List.of(n, n), List.of(m))); } public void testBuilderCtor_GivenDuplicateModel() { Node n = new Node("n_1", 100, 4); - Deployment m = new AssignmentPlan.Deployment("m_1", 40, 1, 2, Map.of(), 0); + Deployment m = new AssignmentPlan.Deployment("m_1", 40, 1, 2, Map.of(), 0, 0, 0); expectThrows(IllegalArgumentException.class, () -> AssignmentPlan.builder(List.of(n), List.of(m, m))); } public void testAssignModelToNode_GivenNoPreviousAssignment() { - Node n = new Node("n_1", 100, 4); - AssignmentPlan.Deployment m = new AssignmentPlan.Deployment("m_1", 40, 1, 2, Map.of(), 0); + Node n = new Node("n_1", ByteSizeValue.ofMb(350).getBytes(), 4); - AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); + { // old memory format + AssignmentPlan.Deployment m = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(40).getBytes(), 1, 2, Map.of(), 0, 0, 0); - assertThat(builder.getRemainingCores(n), equalTo(4)); - assertThat(builder.getRemainingMemory(n), equalTo(100L)); - assertThat(builder.getRemainingAllocations(m), equalTo(1)); - assertThat(builder.getRemainingThreads(m), equalTo(2)); + AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); - builder.assignModelToNode(m, n, 1); + assertThat(builder.getRemainingCores(n), equalTo(4)); + assertThat(builder.getRemainingMemory(n), equalTo(ByteSizeValue.ofMb(350).getBytes())); + assertThat(builder.getRemainingAllocations(m), equalTo(1)); + assertThat(builder.getRemainingThreads(m), equalTo(2)); - assertThat(builder.getRemainingCores(n), equalTo(2)); - assertThat(builder.getRemainingMemory(n), equalTo(60L)); - assertThat(builder.getRemainingAllocations(m), equalTo(0)); - assertThat(builder.getRemainingThreads(m), equalTo(0)); + builder.assignModelToNode(m, n, 1); - AssignmentPlan plan = builder.build(); + assertThat(builder.getRemainingCores(n), equalTo(2)); + assertThat(builder.getRemainingMemory(n), equalTo(ByteSizeValue.ofMb(30).getBytes())); + assertThat(builder.getRemainingAllocations(m), equalTo(0)); + assertThat(builder.getRemainingThreads(m), equalTo(0)); - assertThat(plan.models(), contains(m)); - assertThat(plan.satisfiesCurrentAssignments(), is(true)); - assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 1))); + AssignmentPlan plan = builder.build(); + + assertThat(plan.models(), contains(m)); + assertThat(plan.satisfiesCurrentAssignments(), is(true)); + assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 1))); + } + { // new memory format + AssignmentPlan.Deployment m = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(20).getBytes(), + 1, + 2, + Map.of(), + 0, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(30).getBytes() + ); + + AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); + + assertThat(builder.getRemainingCores(n), equalTo(4)); + assertThat(builder.getRemainingMemory(n), equalTo(ByteSizeValue.ofMb(350).getBytes())); + assertThat(builder.getRemainingAllocations(m), equalTo(1)); + assertThat(builder.getRemainingThreads(m), equalTo(2)); + + builder.assignModelToNode(m, n, 1); + + assertThat(builder.getRemainingCores(n), equalTo(2)); + assertThat(builder.getRemainingMemory(n), equalTo(0L)); + assertThat(builder.getRemainingAllocations(m), equalTo(0)); + assertThat(builder.getRemainingThreads(m), equalTo(0)); + + AssignmentPlan plan = builder.build(); + + assertThat(plan.models(), contains(m)); + assertThat(plan.satisfiesCurrentAssignments(), is(true)); + assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 1))); + } } public void testAssignModelToNode_GivenNewPlanSatisfiesCurrentAssignment() { - Node n = new Node("n_1", 100, 4); - AssignmentPlan.Deployment m = new AssignmentPlan.Deployment("m_1", 40, 2, 2, Map.of("n_1", 1), 0); + Node n = new Node("n_1", ByteSizeValue.ofMb(350).getBytes(), 4); + { // old memory format + AssignmentPlan.Deployment m = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(30).getBytes(), + 2, + 2, + Map.of("n_1", 1), + 0, + 0, + 0 + ); - AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); + AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); - builder.assignModelToNode(m, n, 1); + builder.assignModelToNode(m, n, 1); - assertThat(builder.getRemainingCores(n), equalTo(2)); - assertThat(builder.getRemainingMemory(n), equalTo(100L)); - assertThat(builder.getRemainingAllocations(m), equalTo(1)); - assertThat(builder.getRemainingThreads(m), equalTo(2)); + assertThat(builder.getRemainingCores(n), equalTo(2)); + assertThat(builder.getRemainingMemory(n), equalTo(ByteSizeValue.ofMb(350).getBytes())); + assertThat(builder.getRemainingAllocations(m), equalTo(1)); + assertThat(builder.getRemainingThreads(m), equalTo(2)); - AssignmentPlan plan = builder.build(); + AssignmentPlan plan = builder.build(); - assertThat(plan.models(), contains(m)); - assertThat(plan.satisfiesCurrentAssignments(), is(true)); - assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 1))); + assertThat(plan.models(), contains(m)); + assertThat(plan.satisfiesCurrentAssignments(), is(true)); + assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 1))); + } + { // new memory format + AssignmentPlan.Deployment m = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(25).getBytes(), + 2, + 2, + Map.of("n_1", 1), + 0, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(25).getBytes() + ); + + AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); + + builder.assignModelToNode(m, n, 1); + + assertThat(builder.getRemainingCores(n), equalTo(2)); + assertThat(builder.getRemainingMemory(n), equalTo(ByteSizeValue.ofMb(325).getBytes())); + assertThat(builder.getRemainingAllocations(m), equalTo(1)); + assertThat(builder.getRemainingThreads(m), equalTo(2)); + + AssignmentPlan plan = builder.build(); + + assertThat(plan.models(), contains(m)); + assertThat(plan.satisfiesCurrentAssignments(), is(true)); + assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 1))); + + } } public void testAssignModelToNode_GivenNewPlanDoesNotSatisfyCurrentAssignment() { - Node n = new Node("n_1", 100, 4); - AssignmentPlan.Deployment m = new AssignmentPlan.Deployment("m_1", 40, 2, 2, Map.of("n_1", 2), 0); + Node n = new Node("n_1", ByteSizeValue.ofMb(300).getBytes(), 4); + { + // old memory format + Deployment m = new Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 2, 2, Map.of("n_1", 2), 0, 0, 0); - AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); + AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); - builder.assignModelToNode(m, n, 1); + builder.assignModelToNode(m, n, 1); - assertThat(builder.getRemainingCores(n), equalTo(2)); - assertThat(builder.getRemainingMemory(n), equalTo(100L)); - assertThat(builder.getRemainingAllocations(m), equalTo(1)); - assertThat(builder.getRemainingThreads(m), equalTo(2)); + assertThat(builder.getRemainingCores(n), equalTo(2)); + assertThat(builder.getRemainingMemory(n), equalTo(ByteSizeValue.ofMb(300).getBytes())); + assertThat(builder.getRemainingAllocations(m), equalTo(1)); + assertThat(builder.getRemainingThreads(m), equalTo(2)); - AssignmentPlan plan = builder.build(); + AssignmentPlan plan = builder.build(); - assertThat(plan.models(), contains(m)); - assertThat(plan.satisfiesCurrentAssignments(), is(false)); - assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 1))); + assertThat(plan.models(), contains(m)); + assertThat(plan.satisfiesCurrentAssignments(), is(false)); + assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 1))); + } + { + // new memory format + Deployment m = new Deployment( + "m_1", + ByteSizeValue.ofMb(25).getBytes(), + 2, + 2, + Map.of("n_1", 2), + 0, + ByteSizeValue.ofMb(250).getBytes(), + ByteSizeValue.ofMb(25).getBytes() + ); + + AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); + + builder.assignModelToNode(m, n, 1); + + assertThat(builder.getRemainingCores(n), equalTo(2)); + assertThat(builder.getRemainingMemory(n), equalTo(ByteSizeValue.ofMb(275).getBytes())); + assertThat(builder.getRemainingAllocations(m), equalTo(1)); + assertThat(builder.getRemainingThreads(m), equalTo(2)); + + AssignmentPlan plan = builder.build(); + + assertThat(plan.models(), contains(m)); + assertThat(plan.satisfiesCurrentAssignments(), is(false)); + assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 1))); + } } public void testAssignModelToNode_GivenPreviouslyUnassignedModelDoesNotFit() { - Node n = new Node("n_1", 100, 4); - Deployment m = new AssignmentPlan.Deployment("m_1", 101, 2, 2, Map.of(), 0); + Node n = new Node("n_1", ByteSizeValue.ofMb(340 - 1).getBytes(), 4); + Deployment m = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(50).getBytes(), 2, 2, Map.of(), 0, 0, 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); Exception e = expectThrows(IllegalArgumentException.class, () -> builder.assignModelToNode(m, n, 1)); - assertThat(e.getMessage(), equalTo("not enough memory on node [n_1] to assign model [m_1]")); + assertThat(e.getMessage(), equalTo("not enough memory on node [n_1] to assign [1] allocations to deployment [m_1]")); } public void testAssignModelToNode_GivenPreviouslyAssignedModelDoesNotFit() { - Node n = new Node("n_1", 100, 4); - AssignmentPlan.Deployment m = new AssignmentPlan.Deployment("m_1", 101, 2, 2, Map.of("n_1", 1), 0); + { // old memory format + Node n = new Node("n_1", ByteSizeValue.ofMb(340 - 1).getBytes(), 4); + AssignmentPlan.Deployment m = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(50).getBytes(), + 2, + 2, + Map.of("n_1", 1), + 0, + 0, + 0 + ); - AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); - builder.assignModelToNode(m, n, 2); - AssignmentPlan plan = builder.build(); + AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); - assertThat(plan.models(), contains(m)); - assertThat(plan.satisfiesCurrentAssignments(), is(true)); - assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 2))); + builder.assignModelToNode(m, n, 2); + AssignmentPlan plan = builder.build(); + + assertThat(plan.models(), contains(m)); + assertThat(plan.satisfiesCurrentAssignments(), is(true)); + assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 2))); + } + { // new memory format + Node n = new Node("n_1", ByteSizeValue.ofMb(340 - 1).getBytes(), 4); + AssignmentPlan.Deployment m = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(30).getBytes(), + 2, + 2, + Map.of("n_1", 1), + 0, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(5).getBytes() + ); + + AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); + + builder.assignModelToNode(m, n, 2); + AssignmentPlan plan = builder.build(); + + assertThat(plan.models(), contains(m)); + assertThat(plan.satisfiesCurrentAssignments(), is(true)); + assertThat(plan.assignments(m).get(), equalTo(Map.of(n, 2))); + } } public void testAssignModelToNode_GivenNotEnoughCores_AndSingleThreadPerAllocation() { - Node n = new Node("n_1", 100, 4); - Deployment m = new AssignmentPlan.Deployment("m_1", 100, 5, 1, Map.of(), 0); + Node n = new Node("n_1", ByteSizeValue.ofMb(500).getBytes(), 4); + Deployment m = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(100).getBytes(), 5, 1, Map.of(), 0, 0, 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); Exception e = expectThrows(IllegalArgumentException.class, () -> builder.assignModelToNode(m, n, 5)); @@ -138,8 +278,8 @@ public void testAssignModelToNode_GivenNotEnoughCores_AndSingleThreadPerAllocati } public void testAssignModelToNode_GivenNotEnoughCores_AndMultipleThreadsPerAllocation() { - Node n = new Node("n_1", 100, 5); - AssignmentPlan.Deployment m = new AssignmentPlan.Deployment("m_1", 100, 3, 2, Map.of(), 0); + Node n = new Node("n_1", ByteSizeValue.ofMb(500).getBytes(), 5); + AssignmentPlan.Deployment m = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(100).getBytes(), 3, 2, Map.of(), 0, 0, 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); Exception e = expectThrows(IllegalArgumentException.class, () -> builder.assignModelToNode(m, n, 3)); @@ -151,13 +291,22 @@ public void testAssignModelToNode_GivenNotEnoughCores_AndMultipleThreadsPerAlloc } public void testAssignModelToNode_GivenSameModelAssignedTwice() { - Node n = new Node("n_1", 100, 8); - Deployment m = new AssignmentPlan.Deployment("m_1", 60, 4, 2, Map.of(), 0); + Node n = new Node("n_1", ByteSizeValue.ofMb(1000).getBytes(), 8); + Deployment m = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(50).getBytes(), + 4, + 2, + Map.of(), + 0, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(50).getBytes() + ); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); assertThat(builder.getRemainingCores(n), equalTo(8)); - assertThat(builder.getRemainingMemory(n), equalTo(100L)); + assertThat(builder.getRemainingMemory(n), equalTo(ByteSizeValue.ofMb(1000).getBytes())); assertThat(builder.getRemainingAllocations(m), equalTo(4)); assertThat(builder.getRemainingThreads(m), equalTo(8)); assertThat(builder.canAssign(m, n, 1), is(true)); @@ -165,7 +314,7 @@ public void testAssignModelToNode_GivenSameModelAssignedTwice() { builder.assignModelToNode(m, n, 1); assertThat(builder.getRemainingCores(n), equalTo(6)); - assertThat(builder.getRemainingMemory(n), equalTo(40L)); + assertThat(builder.getRemainingMemory(n), equalTo(ByteSizeValue.ofMb(600).getBytes())); assertThat(builder.getRemainingAllocations(m), equalTo(3)); assertThat(builder.getRemainingThreads(m), equalTo(6)); assertThat(builder.canAssign(m, n, 2), is(true)); @@ -173,7 +322,7 @@ public void testAssignModelToNode_GivenSameModelAssignedTwice() { builder.assignModelToNode(m, n, 2); assertThat(builder.getRemainingCores(n), equalTo(2)); - assertThat(builder.getRemainingMemory(n), equalTo(40L)); + assertThat(builder.getRemainingMemory(n), equalTo(ByteSizeValue.ofMb(500).getBytes())); assertThat(builder.getRemainingAllocations(m), equalTo(1)); assertThat(builder.getRemainingThreads(m), equalTo(2)); @@ -186,7 +335,7 @@ public void testAssignModelToNode_GivenSameModelAssignedTwice() { public void testCanAssign_GivenPreviouslyUnassignedModelDoesNotFit() { Node n = new Node("n_1", 100, 5); - AssignmentPlan.Deployment m = new AssignmentPlan.Deployment("m_1", 101, 1, 1, Map.of(), 0); + AssignmentPlan.Deployment m = new AssignmentPlan.Deployment("m_1", 101, 1, 1, Map.of(), 0, 0, 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); @@ -194,17 +343,33 @@ public void testCanAssign_GivenPreviouslyUnassignedModelDoesNotFit() { } public void testCanAssign_GivenPreviouslyAssignedModelDoesNotFit() { - Node n = new Node("n_1", 100, 5); - Deployment m = new AssignmentPlan.Deployment("m_1", 101, 1, 1, Map.of("n_1", 1), 0); - - AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); - - assertThat(builder.canAssign(m, n, 1), is(true)); + Node n = new Node("n_1", ByteSizeValue.ofMb(300).getBytes(), 5); + { + // old memory format + Deployment m = new Deployment("m_1", ByteSizeValue.ofMb(31).getBytes(), 1, 1, Map.of("n_1", 1), 0, 0, 0); + AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); + assertThat(builder.canAssign(m, n, 1), is(true)); + } + { + // new memory format + Deployment m = new Deployment( + "m_1", + ByteSizeValue.ofMb(25).getBytes(), + 1, + 1, + Map.of("n_1", 1), + 0, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(10).getBytes() + ); + AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); + assertThat(builder.canAssign(m, n, 1), is(true)); + } } public void testCanAssign_GivenEnoughMemory() { - Node n = new Node("n_1", 100, 5); - AssignmentPlan.Deployment m = new AssignmentPlan.Deployment("m_1", 100, 3, 2, Map.of(), 0); + Node n = new Node("n_1", ByteSizeValue.ofMb(440).getBytes(), 5); + AssignmentPlan.Deployment m = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(100).getBytes(), 3, 2, Map.of(), 0, 0, 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); @@ -216,16 +381,25 @@ public void testCanAssign_GivenEnoughMemory() { public void testCompareTo_GivenDifferenceInPreviousAssignments() { AssignmentPlan planSatisfyingPreviousAssignments; AssignmentPlan planNotSatisfyingPreviousAssignments; - Node n = new Node("n_1", 100, 5); + Node n = new Node("n_1", ByteSizeValue.ofMb(300).getBytes(), 5); { - Deployment m = new AssignmentPlan.Deployment("m_1", 100, 3, 2, Map.of("n_1", 2), 0); + Deployment m = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 3, 2, Map.of("n_1", 2), 0, 0, 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); builder.assignModelToNode(m, n, 2); planSatisfyingPreviousAssignments = builder.build(); } { - AssignmentPlan.Deployment m = new AssignmentPlan.Deployment("m_1", 100, 3, 2, Map.of("n_1", 3), 0); + AssignmentPlan.Deployment m = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(30).getBytes(), + 3, + 2, + Map.of("n_1", 3), + 0, + 0, + 0 + ); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); builder.assignModelToNode(m, n, 2); planNotSatisfyingPreviousAssignments = builder.build(); @@ -238,8 +412,17 @@ public void testCompareTo_GivenDifferenceInPreviousAssignments() { public void testCompareTo_GivenDifferenceInAllocations() { AssignmentPlan planWithMoreAllocations; AssignmentPlan planWithFewerAllocations; - Node n = new Node("n_1", 100, 5); - AssignmentPlan.Deployment m = new AssignmentPlan.Deployment("m_1", 100, 3, 2, Map.of("n_1", 1), 0); + Node n = new Node("n_1", ByteSizeValue.ofMb(300).getBytes(), 5); + AssignmentPlan.Deployment m = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(30).getBytes(), + 3, + 2, + Map.of("n_1", 1), + 0, + 0, + 0 + ); { AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); @@ -259,16 +442,25 @@ public void testCompareTo_GivenDifferenceInAllocations() { public void testCompareTo_GivenDifferenceInMemory() { AssignmentPlan planUsingMoreMemory; AssignmentPlan planUsingLessMemory; - Node n = new Node("n_1", 100, 5); + Node n = new Node("n_1", ByteSizeValue.ofMb(300).getBytes(), 5); { - Deployment m = new AssignmentPlan.Deployment("m_1", 100, 3, 2, Map.of("n_1", 1), 0); + Deployment m = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 3, 2, Map.of("n_1", 1), 0, 0, 0); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); builder.assignModelToNode(m, n, 2); planUsingMoreMemory = builder.build(); } { - AssignmentPlan.Deployment m = new AssignmentPlan.Deployment("m_1", 99, 3, 2, Map.of("n_1", 1), 0); + AssignmentPlan.Deployment m = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(29).getBytes(), + 3, + 2, + Map.of("n_1", 1), + 0, + 0, + 0 + ); AssignmentPlan.Builder builder = AssignmentPlan.builder(List.of(n), List.of(m)); builder.assignModelToNode(m, n, 2); planUsingLessMemory = builder.build(); @@ -279,26 +471,96 @@ public void testCompareTo_GivenDifferenceInMemory() { } public void testSatisfiesAllModels_GivenAllModelsAreSatisfied() { - Node node1 = new Node("n_1", 100, 4); - Node node2 = new Node("n_2", 100, 4); - AssignmentPlan.Deployment deployment1 = new AssignmentPlan.Deployment("m_1", 50, 1, 2, Map.of(), 0); - AssignmentPlan.Deployment deployment2 = new AssignmentPlan.Deployment("m_2", 30, 2, 1, Map.of(), 0); - AssignmentPlan.Deployment deployment3 = new AssignmentPlan.Deployment("m_3", 20, 4, 1, Map.of(), 0); - AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(deployment1, deployment2, deployment3)) - .assignModelToNode(deployment1, node1, 1) - .assignModelToNode(deployment2, node2, 2) - .assignModelToNode(deployment3, node1, 2) - .assignModelToNode(deployment3, node2, 2) - .build(); - assertThat(plan.satisfiesAllModels(), is(true)); + Node node1 = new Node("n_1", ByteSizeValue.ofMb(1000).getBytes(), 4); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(1000).getBytes(), 4); + { + // old memory format + AssignmentPlan.Deployment deployment1 = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(50).getBytes(), + 1, + 2, + Map.of(), + 0, + 0, + 0 + ); + AssignmentPlan.Deployment deployment2 = new AssignmentPlan.Deployment( + "m_2", + ByteSizeValue.ofMb(30).getBytes(), + 2, + 1, + Map.of(), + 0, + 0, + 0 + ); + AssignmentPlan.Deployment deployment3 = new AssignmentPlan.Deployment( + "m_3", + ByteSizeValue.ofMb(20).getBytes(), + 4, + 1, + Map.of(), + 0, + 0, + 0 + ); + AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(deployment1, deployment2, deployment3)) + .assignModelToNode(deployment1, node1, 1) + .assignModelToNode(deployment2, node2, 2) + .assignModelToNode(deployment3, node1, 2) + .assignModelToNode(deployment3, node2, 2) + .build(); + assertThat(plan.satisfiesAllModels(), is(true)); + } + { + // new memory format + AssignmentPlan.Deployment deployment1 = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(50).getBytes(), + 1, + 2, + Map.of(), + 0, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(10).getBytes() + ); + AssignmentPlan.Deployment deployment2 = new AssignmentPlan.Deployment( + "m_2", + ByteSizeValue.ofMb(30).getBytes(), + 2, + 1, + Map.of(), + 0, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(10).getBytes() + ); + AssignmentPlan.Deployment deployment3 = new AssignmentPlan.Deployment( + "m_3", + ByteSizeValue.ofMb(20).getBytes(), + 4, + 1, + Map.of(), + 0, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(10).getBytes() + ); + AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(deployment1, deployment2, deployment3)) + .assignModelToNode(deployment1, node1, 1) + .assignModelToNode(deployment2, node2, 2) + .assignModelToNode(deployment3, node1, 2) + .assignModelToNode(deployment3, node2, 2) + .build(); + assertThat(plan.satisfiesAllModels(), is(true)); + } } public void testSatisfiesAllModels_GivenOneModelHasOneAllocationLess() { - Node node1 = new Node("n_1", 100, 4); - Node node2 = new Node("n_2", 100, 4); - AssignmentPlan.Deployment deployment1 = new AssignmentPlan.Deployment("m_1", 50, 1, 2, Map.of(), 0); - AssignmentPlan.Deployment deployment2 = new AssignmentPlan.Deployment("m_2", 30, 2, 1, Map.of(), 0); - Deployment deployment3 = new Deployment("m_3", 20, 4, 1, Map.of(), 0); + Node node1 = new Node("n_1", ByteSizeValue.ofMb(1000).getBytes(), 4); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(1000).getBytes(), 4); + Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(50).getBytes(), 1, 2, Map.of(), 0, 0, 0); + Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(30).getBytes(), 2, 1, Map.of(), 0, 0, 0); + Deployment deployment3 = new Deployment("m_3", ByteSizeValue.ofMb(20).getBytes(), 4, 1, Map.of(), 0, 0, 0); AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(deployment1, deployment2, deployment3)) .assignModelToNode(deployment1, node1, 1) .assignModelToNode(deployment2, node2, 2) @@ -309,11 +571,11 @@ public void testSatisfiesAllModels_GivenOneModelHasOneAllocationLess() { } public void testArePreviouslyAssignedModelsAssigned_GivenTrue() { - Node node1 = new Node("n_1", 100, 4); - Node node2 = new Node("n_2", 100, 4); - AssignmentPlan.Deployment deployment1 = new AssignmentPlan.Deployment("m_1", 50, 1, 2, Map.of(), 3); - AssignmentPlan.Deployment deployment2 = new Deployment("m_2", 30, 2, 1, Map.of(), 4); - AssignmentPlan.Deployment deployment3 = new AssignmentPlan.Deployment("m_3", 20, 4, 1, Map.of(), 0); + Node node1 = new Node("n_1", ByteSizeValue.ofMb(1000).getBytes(), 4); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(1000).getBytes(), 4); + Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(50).getBytes(), 1, 2, Map.of(), 3, 0, 0); + Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(30).getBytes(), 2, 1, Map.of(), 4, 0, 0); + Deployment deployment3 = new Deployment("m_3", ByteSizeValue.ofMb(20).getBytes(), 4, 1, Map.of(), 0, 0, 0); AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(deployment1, deployment2, deployment3)) .assignModelToNode(deployment1, node1, 1) .assignModelToNode(deployment2, node2, 1) @@ -322,10 +584,10 @@ public void testArePreviouslyAssignedModelsAssigned_GivenTrue() { } public void testArePreviouslyAssignedModelsAssigned_GivenFalse() { - Node node1 = new Node("n_1", 100, 4); - Node node2 = new Node("n_2", 100, 4); - AssignmentPlan.Deployment deployment1 = new Deployment("m_1", 50, 1, 2, Map.of(), 3); - AssignmentPlan.Deployment deployment2 = new AssignmentPlan.Deployment("m_2", 30, 2, 1, Map.of(), 4); + Node node1 = new Node("n_1", ByteSizeValue.ofMb(1000).getBytes(), 4); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(1000).getBytes(), 4); + Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(50).getBytes(), 1, 2, Map.of(), 3, 0, 0); + Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(30).getBytes(), 2, 1, Map.of(), 4, 0, 0); AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(deployment1, deployment2)) .assignModelToNode(deployment1, node1, 1) .build(); @@ -333,12 +595,39 @@ public void testArePreviouslyAssignedModelsAssigned_GivenFalse() { } public void testCountPreviouslyAssignedThatAreStillAssigned() { - Node node1 = new Node("n_1", 100, 4); - Node node2 = new Node("n_2", 100, 4); - Deployment deployment1 = new AssignmentPlan.Deployment("m_1", 50, 1, 2, Map.of(), 3); - AssignmentPlan.Deployment deployment2 = new AssignmentPlan.Deployment("m_2", 30, 2, 1, Map.of(), 4); - AssignmentPlan.Deployment deployment3 = new AssignmentPlan.Deployment("m_3", 20, 4, 1, Map.of(), 1); - AssignmentPlan.Deployment deployment4 = new AssignmentPlan.Deployment("m_4", 20, 4, 1, Map.of(), 0); + Node node1 = new Node("n_1", ByteSizeValue.ofMb(1000).getBytes(), 4); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(1000).getBytes(), 4); + Deployment deployment1 = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(50).getBytes(), 1, 2, Map.of(), 3, 0, 0); + AssignmentPlan.Deployment deployment2 = new AssignmentPlan.Deployment( + "m_2", + ByteSizeValue.ofMb(30).getBytes(), + 2, + 1, + Map.of(), + 4, + 0, + 0 + ); + AssignmentPlan.Deployment deployment3 = new AssignmentPlan.Deployment( + "m_3", + ByteSizeValue.ofMb(20).getBytes(), + 4, + 1, + Map.of(), + 1, + 0, + 0 + ); + AssignmentPlan.Deployment deployment4 = new AssignmentPlan.Deployment( + "m_4", + ByteSizeValue.ofMb(20).getBytes(), + 4, + 1, + Map.of(), + 0, + 0, + 0 + ); AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(deployment1, deployment2, deployment3, deployment4)) .assignModelToNode(deployment1, node1, 1) .assignModelToNode(deployment2, node2, 1) diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlannerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlannerTests.java index 82a291a8d9fb2..6a72ccf4c4445 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlannerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/AssignmentPlannerTests.java @@ -33,50 +33,144 @@ public class AssignmentPlannerTests extends ESTestCase { + private static long scaleNodeSize(long nodeMemory) { + // 240 Mb is the size in StartTrainedModelDeploymentAction.MEMORY_OVERHEAD + return ByteSizeValue.ofMb(240 + 2 * nodeMemory).getBytes(); + } + public void testModelThatDoesNotFitInMemory() { - List nodes = List.of(new Node("n_1", 100, 4)); - Deployment deployment = new AssignmentPlan.Deployment("m_1", 101, 4, 1, Map.of(), 0); - AssignmentPlan plan = new AssignmentPlanner(nodes, List.of(deployment)).computePlan(); - assertThat(plan.assignments(deployment).isEmpty(), is(true)); + { // Without perDeploymentMemory and perAllocationMemory specified + List nodes = List.of(new Node("n_1", scaleNodeSize(50), 4)); + Deployment deployment = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(51).getBytes(), 4, 1, Map.of(), 0, 0, 0); + AssignmentPlan plan = new AssignmentPlanner(nodes, List.of(deployment)).computePlan(); + assertThat(plan.assignments(deployment).isEmpty(), is(true)); + } + { // With perDeploymentMemory and perAllocationMemory specified + List nodes = List.of(new Node("n_1", scaleNodeSize(55), 4)); + Deployment deployment = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(50).getBytes(), + 4, + 1, + Map.of(), + 0, + ByteSizeValue.ofMb(250).getBytes(), + ByteSizeValue.ofMb(51).getBytes() + ); + AssignmentPlan plan = new AssignmentPlanner(nodes, List.of(deployment)).computePlan(); + assertThat(plan.assignments(deployment).isEmpty(), is(true)); + } } public void testModelWithThreadsPerAllocationNotFittingOnAnyNode() { - List nodes = List.of(new Node("n_1", 100, 4), new Node("n_2", 100, 5)); - Deployment deployment = new AssignmentPlan.Deployment("m_1", 1, 1, 6, Map.of(), 0); + List nodes = List.of(new Node("n_1", scaleNodeSize(100), 4), new Node("n_2", scaleNodeSize(100), 5)); + Deployment deployment = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(1).getBytes(), 1, 6, Map.of(), 0, 0, 0); AssignmentPlan plan = new AssignmentPlanner(nodes, List.of(deployment)).computePlan(); assertThat(plan.assignments(deployment).isEmpty(), is(true)); } public void testSingleModelThatFitsFullyOnSingleNode() { { - Node node = new Node("n_1", 100, 4); - Deployment deployment = new AssignmentPlan.Deployment("m_1", 100, 1, 1, Map.of(), 0); + Node node = new Node("n_1", scaleNodeSize(100), 4); + Deployment deployment = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(100).getBytes(), 1, 1, Map.of(), 0, 0, 0); + AssignmentPlan plan = new AssignmentPlanner(List.of(node), List.of(deployment)).computePlan(); + assertModelFullyAssignedToNode(plan, deployment, node); + } + { + Node node = new Node("n_1", scaleNodeSize(1000), 8); + Deployment deployment = new Deployment("m_1", ByteSizeValue.ofMb(1000).getBytes(), 8, 1, Map.of(), 0, 0, 0); AssignmentPlan plan = new AssignmentPlanner(List.of(node), List.of(deployment)).computePlan(); assertModelFullyAssignedToNode(plan, deployment, node); } { - Node node = new Node("n_1", 1000, 8); - Deployment deployment = new Deployment("m_1", 1000, 8, 1, Map.of(), 0); + Node node = new Node("n_1", scaleNodeSize(10000), 16); + AssignmentPlan.Deployment deployment = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(10000).getBytes(), + 1, + 16, + Map.of(), + 0, + 0, + 0 + ); AssignmentPlan plan = new AssignmentPlanner(List.of(node), List.of(deployment)).computePlan(); assertModelFullyAssignedToNode(plan, deployment, node); } { - Node node = new Node("n_1", 10000, 16); - AssignmentPlan.Deployment deployment = new AssignmentPlan.Deployment("m_1", 10000, 1, 16, Map.of(), 0); + Node node = new Node("n_1", scaleNodeSize(100), 4); + Deployment deployment = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(100).getBytes(), 1, 1, Map.of(), 0, 0, 0); + AssignmentPlan plan = new AssignmentPlanner(List.of(node), List.of(deployment)).computePlan(); + assertModelFullyAssignedToNode(plan, deployment, node); + } + } + + public void testSingleModelThatFitsFullyOnSingleNode_NewMemoryFields() { + { + Node node = new Node("n_1", ByteSizeValue.ofMb(500).getBytes(), 4); + Deployment deployment = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(100).getBytes(), + 1, + 1, + Map.of(), + 0, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(100).getBytes() + ); + AssignmentPlan plan = new AssignmentPlanner(List.of(node), List.of(deployment)).computePlan(); + assertModelFullyAssignedToNode(plan, deployment, node); + } + { + Node node = new Node("n_1", ByteSizeValue.ofMb(1000).getBytes(), 8); + Deployment deployment = new Deployment( + "m_1", + ByteSizeValue.ofMb(100).getBytes(), + 8, + 1, + Map.of(), + 0, + ByteSizeValue.ofMb(100).getBytes(), + ByteSizeValue.ofMb(100).getBytes() + ); AssignmentPlan plan = new AssignmentPlanner(List.of(node), List.of(deployment)).computePlan(); assertModelFullyAssignedToNode(plan, deployment, node); } } public void testSingleModelThatFitsFullyOnSingleNode_GivenTwoNodes_ShouldBeFullyAssignedOnOneNode() { - Node node1 = new Node("n_1", 100, 4); - Node node2 = new Node("n_2", 100, 4); - AssignmentPlan.Deployment deployment = new Deployment("m_1", 100, 4, 1, Map.of(), 0); + Node node1 = new Node("n_1", scaleNodeSize(100), 4); + Node node2 = new Node("n_2", scaleNodeSize(100), 4); + AssignmentPlan.Deployment deployment = new Deployment("m_1", ByteSizeValue.ofMb(100).getBytes(), 4, 1, Map.of(), 0, 0, 0); AssignmentPlan plan = new AssignmentPlanner(List.of(node1, node2), List.of(deployment)).computePlan(); Map assignments = plan.assignments(deployment).get(); - if (assignments.get(node1) > 0) { + if (assignments.get(node1) != null) { + assertThat(assignments.get(node1), equalTo(4)); + } else { + assertThat(assignments.get(node2), equalTo(4)); + } + } + + public void testSingleModelThatFitsFullyOnSingleNode_GivenTwoNodes_ShouldBeFullyAssignedOnOneNode_NewMemoryFields() { + Node node1 = new Node("n_1", ByteSizeValue.ofMb(1000).getBytes(), 4); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(1000).getBytes(), 4); + AssignmentPlan.Deployment deployment = new Deployment( + "m_1", + ByteSizeValue.ofMb(100).getBytes(), + 4, + 1, + Map.of(), + 0, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(150).getBytes() + ); + + AssignmentPlan plan = new AssignmentPlanner(List.of(node1, node2), List.of(deployment)).computePlan(); + + Map assignments = plan.assignments(deployment).get(); + if (assignments.get(node1) != null) { assertThat(assignments.get(node1), equalTo(4)); } else { assertThat(assignments.get(node2), equalTo(4)); @@ -84,10 +178,53 @@ public void testSingleModelThatFitsFullyOnSingleNode_GivenTwoNodes_ShouldBeFully } public void testModelWithMoreAllocationsThanAvailableCores_GivenSingleThreadPerAllocation() { - AssignmentPlan.Deployment deployment = new Deployment("m_1", 30, 10, 1, Map.of(), 0); + AssignmentPlan.Deployment deployment = new Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 10, 1, Map.of(), 0, 0, 0); + // Single node + { + Node node = new Node("n_1", scaleNodeSize(100), 4); + AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node), List.of(deployment)).computePlan(); + assertThat(assignmentPlan.assignments(deployment).isPresent(), is(true)); + Map assignments = assignmentPlan.assignments(deployment).get(); + assertThat(assignments.get(node), equalTo(4)); + } + // Two nodes + { + Node node1 = new Node("n_1", scaleNodeSize(100), 4); + Node node2 = new Node("n_2", scaleNodeSize(100), 2); + AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1, node2), List.of(deployment)).computePlan(); + assertThat(assignmentPlan.assignments(deployment).isPresent(), is(true)); + Map assignments = assignmentPlan.assignments(deployment).get(); + assertThat(assignments.get(node1), equalTo(4)); + assertThat(assignments.get(node2), equalTo(2)); + } + // Three nodes + { + Node node1 = new Node("n_1", scaleNodeSize(100), 4); + Node node2 = new Node("n_2", scaleNodeSize(100), 2); + Node node3 = new Node("n_3", scaleNodeSize(100), 3); + AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1, node2, node3), List.of(deployment)).computePlan(); + assertThat(assignmentPlan.assignments(deployment).isPresent(), is(true)); + Map assignments = assignmentPlan.assignments(deployment).get(); + assertThat(assignments.get(node1), equalTo(4)); + assertThat(assignments.get(node2), equalTo(2)); + assertThat(assignments.get(node3), equalTo(3)); + } + } + + public void testModelWithMoreAllocationsThanAvailableCores_GivenSingleThreadPerAllocation_NewMemoryFields() { + AssignmentPlan.Deployment deployment = new Deployment( + "m_1", + ByteSizeValue.ofMb(100).getBytes(), + 10, + 1, + Map.of(), + 0, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(100).getBytes() + ); // Single node { - Node node = new Node("n_1", 100, 4); + Node node = new Node("n_1", ByteSizeValue.ofMb(800).getBytes(), 4); AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node), List.of(deployment)).computePlan(); assertThat(assignmentPlan.assignments(deployment).isPresent(), is(true)); Map assignments = assignmentPlan.assignments(deployment).get(); @@ -95,8 +232,8 @@ public void testModelWithMoreAllocationsThanAvailableCores_GivenSingleThreadPerA } // Two nodes { - Node node1 = new Node("n_1", 100, 4); - Node node2 = new Node("n_2", 100, 2); + Node node1 = new Node("n_1", ByteSizeValue.ofMb(800).getBytes(), 4); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(600).getBytes(), 2); AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1, node2), List.of(deployment)).computePlan(); assertThat(assignmentPlan.assignments(deployment).isPresent(), is(true)); Map assignments = assignmentPlan.assignments(deployment).get(); @@ -105,9 +242,9 @@ public void testModelWithMoreAllocationsThanAvailableCores_GivenSingleThreadPerA } // Three nodes { - Node node1 = new Node("n_1", 100, 4); - Node node2 = new Node("n_2", 100, 2); - Node node3 = new Node("n_3", 100, 3); + Node node1 = new Node("n_1", ByteSizeValue.ofMb(800).getBytes(), 4); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(600).getBytes(), 2); + Node node3 = new Node("n_3", ByteSizeValue.ofMb(700).getBytes(), 3); AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1, node2, node3), List.of(deployment)).computePlan(); assertThat(assignmentPlan.assignments(deployment).isPresent(), is(true)); Map assignments = assignmentPlan.assignments(deployment).get(); @@ -118,14 +255,105 @@ public void testModelWithMoreAllocationsThanAvailableCores_GivenSingleThreadPerA } public void testMultipleModelsAndNodesWithSingleSolution() { - Node node1 = new Node("n_1", 100, 7); - Node node2 = new Node("n_2", 100, 7); - Node node3 = new Node("n_3", 100, 2); - Node node4 = new Node("n_4", 100, 2); - Deployment deployment1 = new Deployment("m_1", 50, 2, 4, Map.of(), 0); - AssignmentPlan.Deployment deployment2 = new Deployment("m_2", 50, 2, 3, Map.of(), 0); - Deployment deployment3 = new AssignmentPlan.Deployment("m_3", 50, 1, 2, Map.of(), 0); - AssignmentPlan.Deployment deployment4 = new AssignmentPlan.Deployment("m_4", 50, 2, 1, Map.of(), 0); + Node node1 = new Node("n_1", 2 * scaleNodeSize(50), 7); + Node node2 = new Node("n_2", 2 * scaleNodeSize(50), 7); + Node node3 = new Node("n_3", 2 * scaleNodeSize(50), 2); + Node node4 = new Node("n_4", 2 * scaleNodeSize(50), 2); + Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(50).getBytes(), 2, 4, Map.of(), 0, 0, 0); + Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(50).getBytes(), 2, 3, Map.of(), 0, 0, 0); + Deployment deployment3 = new Deployment("m_3", ByteSizeValue.ofMb(50).getBytes(), 1, 2, Map.of(), 0, 0, 0); + Deployment deployment4 = new Deployment("m_4", ByteSizeValue.ofMb(50).getBytes(), 2, 1, Map.of(), 0, 0, 0); + + AssignmentPlan plan = new AssignmentPlanner( + List.of(node1, node2, node3, node4), + List.of(deployment1, deployment2, deployment3, deployment4) + ).computePlan(); + + { + assertThat(plan.assignments(deployment1).isPresent(), is(true)); + Map assignments = plan.assignments(deployment1).get(); + assertThat(assignments.get(node1), equalTo(1)); + assertThat(assignments.get(node2), equalTo(1)); + assertThat(assignments.get(node3), is(nullValue())); + assertThat(assignments.get(node4), is(nullValue())); + } + { + assertThat(plan.assignments(deployment2).isPresent(), is(true)); + Map assignments = plan.assignments(deployment2).get(); + assertThat(assignments.get(node1), equalTo(1)); + assertThat(assignments.get(node2), equalTo(1)); + assertThat(assignments.get(node3), is(nullValue())); + assertThat(assignments.get(node4), is(nullValue())); + } + { + assertThat(plan.assignments(deployment3).isPresent(), is(true)); + Map assignments = plan.assignments(deployment3).get(); + assertThat(assignments.get(node1), is(nullValue())); + assertThat(assignments.get(node2), is(nullValue())); + // Will either be on node 3 or 4 + Node assignedNode = assignments.get(node3) != null ? node3 : node4; + Node otherNode = assignedNode.equals(node3) ? node4 : node3; + assertThat(assignments.get(assignedNode), equalTo(1)); + assertThat(assignments.get(otherNode), is(nullValue())); + } + { + assertThat(plan.assignments(deployment4).isPresent(), is(true)); + Map assignments = plan.assignments(deployment4).get(); + assertThat(assignments.get(node1), is(nullValue())); + assertThat(assignments.get(node2), is(nullValue())); + // Will either be on node 3 or 4 + Node assignedNode = assignments.get(node3) != null ? node3 : node4; + Node otherNode = assignedNode.equals(node3) ? node4 : node3; + assertThat(assignments.get(assignedNode), equalTo(2)); + assertThat(assignments.get(otherNode), is(nullValue())); + } + } + + public void testMultipleModelsAndNodesWithSingleSolution_NewMemoryFields() { + Node node1 = new Node("n_1", ByteSizeValue.ofMb(800).getBytes(), 7); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(800).getBytes(), 7); + Node node3 = new Node("n_3", ByteSizeValue.ofMb(900).getBytes(), 2); + Node node4 = new Node("n_4", ByteSizeValue.ofMb(900).getBytes(), 2); + Deployment deployment1 = new Deployment( + "m_1", + ByteSizeValue.ofMb(50).getBytes(), + 2, + 4, + Map.of(), + 0, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(50).getBytes() + ); + Deployment deployment2 = new Deployment( + "m_2", + ByteSizeValue.ofMb(50).getBytes(), + 2, + 3, + Map.of(), + 0, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(50).getBytes() + ); + Deployment deployment3 = new Deployment( + "m_3", + ByteSizeValue.ofMb(50).getBytes(), + 1, + 2, + Map.of(), + 0, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(50).getBytes() + ); + Deployment deployment4 = new Deployment( + "m_4", + ByteSizeValue.ofMb(50).getBytes(), + 2, + 1, + Map.of(), + 0, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(50).getBytes() + ); AssignmentPlan plan = new AssignmentPlanner( List.of(node1, node2, node3, node4), @@ -173,10 +401,53 @@ public void testMultipleModelsAndNodesWithSingleSolution() { } public void testModelWithMoreAllocationsThanAvailableCores_GivenThreeThreadsPerAllocation() { - Deployment deployment = new AssignmentPlan.Deployment("m_1", 30, 10, 3, Map.of(), 0); + Deployment deployment = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 10, 3, Map.of(), 0, 0, 0); + // Single node + { + Node node = new Node("n_1", scaleNodeSize(100), 4); + AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node), List.of(deployment)).computePlan(); + assertThat(assignmentPlan.assignments(deployment).isPresent(), is(true)); + Map assignments = assignmentPlan.assignments(deployment).get(); + assertThat(assignments.get(node), equalTo(1)); + } + // Two nodes + { + Node node1 = new Node("n_1", scaleNodeSize(100), 4); + Node node2 = new Node("n_2", scaleNodeSize(100), 8); + AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1, node2), List.of(deployment)).computePlan(); + assertThat(assignmentPlan.assignments(deployment).isPresent(), is(true)); + Map assignments = assignmentPlan.assignments(deployment).get(); + assertThat(assignments.get(node1), equalTo(1)); + assertThat(assignments.get(node2), equalTo(2)); + } + // Three nodes + { + Node node1 = new Node("n_1", scaleNodeSize(100), 4); + Node node2 = new Node("n_2", scaleNodeSize(100), 7); + Node node3 = new Node("n_3", scaleNodeSize(100), 15); + AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1, node2, node3), List.of(deployment)).computePlan(); + assertThat(assignmentPlan.assignments(deployment).isPresent(), is(true)); + Map assignments = assignmentPlan.assignments(deployment).get(); + assertThat(assignments.get(node1), equalTo(1)); + assertThat(assignments.get(node2), equalTo(2)); + assertThat(assignments.get(node3), equalTo(5)); + } + } + + public void testModelWithMoreAllocationsThanAvailableCores_GivenThreeThreadsPerAllocation_NewMemoryFields() { + Deployment deployment = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(50).getBytes(), + 10, + 3, + Map.of(), + 0, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(50).getBytes() + ); // Single node { - Node node = new Node("n_1", 100, 4); + Node node = new Node("n_1", ByteSizeValue.ofMb(800).getBytes(), 4); AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node), List.of(deployment)).computePlan(); assertThat(assignmentPlan.assignments(deployment).isPresent(), is(true)); Map assignments = assignmentPlan.assignments(deployment).get(); @@ -184,8 +455,8 @@ public void testModelWithMoreAllocationsThanAvailableCores_GivenThreeThreadsPerA } // Two nodes { - Node node1 = new Node("n_1", 100, 4); - Node node2 = new Node("n_2", 100, 8); + Node node1 = new Node("n_1", ByteSizeValue.ofMb(800).getBytes(), 4); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(800).getBytes(), 8); AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1, node2), List.of(deployment)).computePlan(); assertThat(assignmentPlan.assignments(deployment).isPresent(), is(true)); Map assignments = assignmentPlan.assignments(deployment).get(); @@ -194,9 +465,9 @@ public void testModelWithMoreAllocationsThanAvailableCores_GivenThreeThreadsPerA } // Three nodes { - Node node1 = new Node("n_1", 100, 4); - Node node2 = new Node("n_2", 100, 7); - Node node3 = new Node("n_3", 100, 15); + Node node1 = new Node("n_1", ByteSizeValue.ofMb(800).getBytes(), 4); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(800).getBytes(), 7); + Node node3 = new Node("n_3", ByteSizeValue.ofMb(800).getBytes(), 15); AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1, node2, node3), List.of(deployment)).computePlan(); assertThat(assignmentPlan.assignments(deployment).isPresent(), is(true)); Map assignments = assignmentPlan.assignments(deployment).get(); @@ -207,8 +478,17 @@ public void testModelWithMoreAllocationsThanAvailableCores_GivenThreeThreadsPerA } public void testModelWithPreviousAssignmentAndNoMoreCoresAvailable() { - Node node = new Node("n_1", 100, 4); - AssignmentPlan.Deployment deployment = new AssignmentPlan.Deployment("m_1", 30, 4, 1, Map.of("n_1", 4), 0); + Node node = new Node("n_1", scaleNodeSize(100), 4); + AssignmentPlan.Deployment deployment = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(30).getBytes(), + 4, + 1, + Map.of("n_1", 4), + 0, + 0, + 0 + ); AssignmentPlan plan = new AssignmentPlanner(List.of(node), List.of(deployment)).computePlan(); assertThat(plan.assignments(deployment).isPresent(), is(true)); @@ -217,26 +497,117 @@ public void testModelWithPreviousAssignmentAndNoMoreCoresAvailable() { public void testFullCoreUtilization_GivenModelsWithSingleThreadPerAllocation() { List nodes = List.of( - new Node("n_1", ByteSizeValue.ofGb(6).getBytes(), 8), - new Node("n_2", ByteSizeValue.ofGb(6).getBytes(), 8), - new Node("n_3", ByteSizeValue.ofGb(6).getBytes(), 8), - new Node("n_4", ByteSizeValue.ofGb(6).getBytes(), 8), - new Node("n_5", ByteSizeValue.ofGb(16).getBytes(), 16), - new Node("n_6", ByteSizeValue.ofGb(8).getBytes(), 16) + new Node("n_1", ByteSizeValue.ofGb(18).getBytes(), 8), + new Node("n_2", ByteSizeValue.ofGb(18).getBytes(), 8), + new Node("n_3", ByteSizeValue.ofGb(18).getBytes(), 8), + new Node("n_4", ByteSizeValue.ofGb(18).getBytes(), 8), + new Node("n_5", ByteSizeValue.ofGb(64).getBytes(), 16), + new Node("n_6", ByteSizeValue.ofGb(32).getBytes(), 16) ); List deployments = List.of( - new Deployment("m_1", ByteSizeValue.ofGb(4).getBytes(), 10, 1, Map.of("n_1", 5), 0), - new AssignmentPlan.Deployment("m_2", ByteSizeValue.ofGb(2).getBytes(), 3, 1, Map.of("n_3", 2), 0), - new AssignmentPlan.Deployment("m_3", ByteSizeValue.ofGb(3).getBytes(), 3, 1, Map.of(), 0), - new Deployment("m_4", ByteSizeValue.ofGb(1).getBytes(), 4, 1, Map.of("n_3", 2), 0), - new Deployment("m_5", ByteSizeValue.ofGb(6).getBytes(), 2, 1, Map.of(), 0), - new Deployment("m_6", ByteSizeValue.ofGb(1).getBytes(), 12, 1, Map.of(), 0), - new AssignmentPlan.Deployment("m_7", ByteSizeValue.ofGb(1).getBytes() / 2, 12, 1, Map.of("n_2", 6), 0), - new Deployment("m_8", ByteSizeValue.ofGb(2).getBytes(), 4, 1, Map.of(), 0), - new Deployment("m_9", ByteSizeValue.ofGb(1).getBytes(), 4, 1, Map.of(), 0), - new AssignmentPlan.Deployment("m_10", ByteSizeValue.ofGb(7).getBytes(), 7, 1, Map.of(), 0), - new Deployment("m_11", ByteSizeValue.ofGb(2).getBytes(), 3, 1, Map.of(), 0), - new Deployment("m_12", ByteSizeValue.ofGb(1).getBytes(), 10, 1, Map.of(), 0) + new Deployment("m_1", ByteSizeValue.ofGb(4).getBytes(), 10, 1, Map.of("n_1", 5), 0, 0, 0), + new AssignmentPlan.Deployment("m_2", ByteSizeValue.ofGb(2).getBytes(), 3, 1, Map.of("n_3", 2), 0, 0, 0), + new AssignmentPlan.Deployment("m_3", ByteSizeValue.ofGb(3).getBytes(), 3, 1, Map.of(), 0, 0, 0), + new Deployment("m_4", ByteSizeValue.ofGb(1).getBytes(), 4, 1, Map.of("n_3", 2), 0, 0, 0), + new Deployment("m_5", ByteSizeValue.ofGb(6).getBytes(), 2, 1, Map.of(), 0, 0, 0), + new Deployment("m_6", ByteSizeValue.ofGb(1).getBytes(), 12, 1, Map.of(), 0, 0, 0), + new AssignmentPlan.Deployment("m_7", ByteSizeValue.ofGb(1).getBytes() / 2, 12, 1, Map.of("n_2", 6), 0, 0, 0), + new Deployment("m_8", ByteSizeValue.ofGb(2).getBytes(), 4, 1, Map.of(), 0, 0, 0), + new Deployment("m_9", ByteSizeValue.ofGb(1).getBytes(), 4, 1, Map.of(), 0, 0, 0), + new AssignmentPlan.Deployment("m_10", ByteSizeValue.ofGb(7).getBytes(), 7, 1, Map.of(), 0, 0, 0), + new Deployment("m_11", ByteSizeValue.ofGb(2).getBytes(), 3, 1, Map.of(), 0, 0, 0), + new Deployment("m_12", ByteSizeValue.ofGb(1).getBytes(), 10, 1, Map.of(), 0, 0, 0) + ); + + AssignmentPlan assignmentPlan = new AssignmentPlanner(nodes, deployments).computePlan(); + + int usedCores = 0; + for (AssignmentPlan.Deployment m : deployments) { + Map assignments = assignmentPlan.assignments(m).orElse(Map.of()); + usedCores += assignments.values().stream().mapToInt(Integer::intValue).sum(); + } + assertThat(usedCores, equalTo(64)); + + assertPreviousAssignmentsAreSatisfied(deployments, assignmentPlan); + } + + public void testFullCoreUtilization_GivenModelsWithSingleThreadPerAllocation_NewMemoryFields() { + List nodes = List.of( + new Node("n_1", ByteSizeValue.ofGb(18).getBytes(), 8), + new Node("n_2", ByteSizeValue.ofGb(18).getBytes(), 8), + new Node("n_3", ByteSizeValue.ofGb(18).getBytes(), 8), + new Node("n_4", ByteSizeValue.ofGb(18).getBytes(), 8), + new Node("n_5", ByteSizeValue.ofGb(64).getBytes(), 16), + new Node("n_6", ByteSizeValue.ofGb(32).getBytes(), 16) + ); + // Use mix of old and new memory fields + List deployments = List.of( + new Deployment( + "m_1", + ByteSizeValue.ofMb(100).getBytes(), + 10, + 1, + Map.of("n_1", 5), + 0, + ByteSizeValue.ofMb(400).getBytes(), + ByteSizeValue.ofMb(100).getBytes() + ), + new Deployment("m_2", ByteSizeValue.ofMb(100).getBytes(), 3, 1, Map.of("n_3", 2), 0, 0, 0), + new Deployment( + "m_3", + ByteSizeValue.ofMb(50).getBytes(), + 3, + 1, + Map.of(), + 0, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(50).getBytes() + ), + new Deployment( + "m_4", + ByteSizeValue.ofMb(50).getBytes(), + 4, + 1, + Map.of("n_3", 2), + 0, + ByteSizeValue.ofMb(400).getBytes(), + ByteSizeValue.ofMb(100).getBytes() + ), + new Deployment( + "m_5", + ByteSizeValue.ofMb(500).getBytes(), + 2, + 1, + Map.of(), + 0, + ByteSizeValue.ofMb(800).getBytes(), + ByteSizeValue.ofMb(100).getBytes() + ), + new Deployment( + "m_6", + ByteSizeValue.ofMb(50).getBytes(), + 12, + 1, + Map.of(), + 0, + ByteSizeValue.ofMb(50).getBytes(), + ByteSizeValue.ofMb(20).getBytes() + ), + new Deployment( + "m_7", + ByteSizeValue.ofMb(50).getBytes(), + 12, + 1, + Map.of("n_2", 6), + 0, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(50).getBytes() + ), + new Deployment("m_8", ByteSizeValue.ofGb(2).getBytes(), 4, 1, Map.of(), 0, 0, 0), + new Deployment("m_9", ByteSizeValue.ofGb(1).getBytes(), 4, 1, Map.of(), 0, 0, 0), + new Deployment("m_10", ByteSizeValue.ofGb(7).getBytes(), 7, 1, Map.of(), 0, 0, 0), + new Deployment("m_11", ByteSizeValue.ofGb(2).getBytes(), 3, 1, Map.of(), 0, 0, 0), + new Deployment("m_12", ByteSizeValue.ofGb(1).getBytes(), 10, 1, Map.of(), 0, 0, 0) ); AssignmentPlan assignmentPlan = new AssignmentPlanner(nodes, deployments).computePlan(); @@ -297,6 +668,9 @@ public void testRandomBenchmark() { StopWatch stopWatch = new StopWatch(); stopWatch.start(); AssignmentPlan assignmentPlan = solver.computePlan(); + for (Node node : nodes) { + assertThat(assignmentPlan.getRemainingNodeMemory(node.id()), greaterThanOrEqualTo(0L)); + } stopWatch.stop(); Quality quality = computeQuality(nodes, deployments, assignmentPlan); @@ -336,7 +710,16 @@ public void testPreviousAssignmentsGetAtLeastAsManyAllocationsAfterAddingNewMode .stream() .collect(Collectors.toMap(e -> e.getKey().id(), Map.Entry::getValue)); previousModelsPlusNew.add( - new AssignmentPlan.Deployment(m.id(), m.memoryBytes(), m.allocations(), m.threadsPerAllocation(), previousAssignments, 0) + new AssignmentPlan.Deployment( + m.id(), + m.memoryBytes(), + m.allocations(), + m.threadsPerAllocation(), + previousAssignments, + 0, + 0, + 0 + ) ); } previousModelsPlusNew.add(randomModel("new")); @@ -347,18 +730,20 @@ public void testPreviousAssignmentsGetAtLeastAsManyAllocationsAfterAddingNewMode } public void testGivenLargerModelWithPreviousAssignmentsAndSmallerModelWithoutAssignments() { - Node node1 = new Node("n_1", ByteSizeValue.ofGb(2).getBytes(), 2); - Node node2 = new Node("n_2", ByteSizeValue.ofGb(2).getBytes(), 2); - Node node3 = new Node("n_3", ByteSizeValue.ofGb(2).getBytes(), 2); + Node node1 = new Node("n_1", scaleNodeSize(ByteSizeValue.ofGb(2).getMb()), 2); + Node node2 = new Node("n_2", scaleNodeSize(ByteSizeValue.ofGb(2).getMb()), 2); + Node node3 = new Node("n_3", scaleNodeSize(ByteSizeValue.ofGb(2).getMb()), 2); Deployment deployment1 = new AssignmentPlan.Deployment( "m_1", ByteSizeValue.ofMb(1200).getBytes(), 3, 1, Map.of("n_1", 2, "n_2", 1), + 0, + 0, 0 ); - Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(1100).getBytes(), 2, 1, Map.of(), 0); + Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(1100).getBytes(), 2, 1, Map.of(), 0, 0, 0); AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1, node2, node3), List.of(deployment1, deployment2)) .computePlan(); assertThat(assignmentPlan.getRemainingNodeMemory("n_1"), greaterThanOrEqualTo(0L)); @@ -381,15 +766,17 @@ public void testGivenLargerModelWithPreviousAssignmentsAndSmallerModelWithoutAss } public void testModelWithoutCurrentAllocationsGetsAssignedIfAllocatedPreviously() { - Node node1 = new Node("n_1", ByteSizeValue.ofGb(4).getBytes(), 2); - Node node2 = new Node("n_2", ByteSizeValue.ofGb(4).getBytes(), 2); + Node node1 = new Node("n_1", ByteSizeValue.ofGb(6).getBytes(), 2); + Node node2 = new Node("n_2", ByteSizeValue.ofGb(6).getBytes(), 2); AssignmentPlan.Deployment deployment1 = new Deployment( "m_1", ByteSizeValue.ofMb(1200).getBytes(), 3, 1, Map.of("n_1", 2, "n_2", 1), - 3 + 3, + 0, + 0 ); AssignmentPlan.Deployment deployment2 = new AssignmentPlan.Deployment( "m_2", @@ -397,35 +784,84 @@ public void testModelWithoutCurrentAllocationsGetsAssignedIfAllocatedPreviously( 1, 2, Map.of(), - 1 + 1, + 0, + 0 ); AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1, node2), List.of(deployment1, deployment2)).computePlan(); Map> indexedBasedPlan = convertToIdIndexed(assignmentPlan); assertThat(indexedBasedPlan.keySet(), hasItems("m_1", "m_2")); - assertThat(indexedBasedPlan.get("m_1"), equalTo(Map.of("n_1", 2))); - assertThat(indexedBasedPlan.get("m_2"), equalTo(Map.of("n_2", 1))); + if (indexedBasedPlan.get("m_2").containsKey("n_1")) { + assertThat(indexedBasedPlan.get("m_1"), equalTo(Map.of("n_2", 2))); + assertThat(indexedBasedPlan.get("m_2"), equalTo(Map.of("n_1", 1))); + } else { + assertThat(indexedBasedPlan.get("m_1"), equalTo(Map.of("n_1", 2))); + assertThat(indexedBasedPlan.get("m_2"), equalTo(Map.of("n_2", 1))); + } assertThat(assignmentPlan.getRemainingNodeMemory("n_1"), greaterThanOrEqualTo(0L)); assertThat(assignmentPlan.getRemainingNodeMemory("n_2"), greaterThanOrEqualTo(0L)); } public void testGivenPreviouslyAssignedModels_CannotAllBeAllocated() { - Node node1 = new Node("n_1", ByteSizeValue.ofGb(2).getBytes(), 2); - AssignmentPlan.Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(1200).getBytes(), 1, 1, Map.of(), 1); - AssignmentPlan.Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(1100).getBytes(), 1, 1, Map.of(), 1); + Node node1 = new Node("n_1", scaleNodeSize(ByteSizeValue.ofGb(2).getMb()), 2); + AssignmentPlan.Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(1200).getBytes(), 1, 1, Map.of(), 1, 0, 0); + AssignmentPlan.Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(1100).getBytes(), 1, 1, Map.of(), 1, 0, 0); AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1), List.of(deployment1, deployment2)).computePlan(); assertThat(assignmentPlan.countPreviouslyAssignedModelsThatAreStillAssigned(), equalTo(1L)); } + public void testGivenClusterResize_AllocationShouldNotExceedMemoryConstraints() { + Node node1 = new Node("n_1", ByteSizeValue.ofMb(1840).getBytes(), 2); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(2580).getBytes(), 2); + Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(800).getBytes(), 2, 1, Map.of(), 0, 0, 0); + Deployment deployment2 = new AssignmentPlan.Deployment("m_2", ByteSizeValue.ofMb(800).getBytes(), 1, 1, Map.of(), 0, 0, 0); + Deployment deployment3 = new Deployment("m_3", ByteSizeValue.ofMb(250).getBytes(), 4, 1, Map.of(), 0, 0, 0); + + // First only start m_1 + AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1, node2), List.of(deployment1)).computePlan(); + + Map> indexedBasedPlan = convertToIdIndexed(assignmentPlan); + assertThat(indexedBasedPlan.keySet(), hasItems("m_1")); + assertThat(indexedBasedPlan.get("m_1"), equalTo(Map.of("n_1", 2))); + + // Then start m_2 + assignmentPlan = new AssignmentPlanner( + List.of(node1, node2), + Stream.concat(createModelsFromPlan(assignmentPlan).stream(), Stream.of(deployment2)).toList() + ).computePlan(); + + indexedBasedPlan = convertToIdIndexed(assignmentPlan); + assertThat(indexedBasedPlan.keySet(), hasItems("m_1", "m_2")); + assertThat(indexedBasedPlan.get("m_1"), equalTo(Map.of("n_1", 2))); + assertThat(indexedBasedPlan.get("m_2"), equalTo(Map.of("n_2", 1))); + + // Then start m_3 + assignmentPlan = new AssignmentPlanner( + List.of(node1, node2), + Stream.concat(createModelsFromPlan(assignmentPlan).stream(), Stream.of(deployment3)).toList() + ).computePlan(); + + indexedBasedPlan = convertToIdIndexed(assignmentPlan); + assertThat(indexedBasedPlan.keySet(), hasItems("m_1", "m_2", "m_3")); + assertThat(indexedBasedPlan.get("m_1"), equalTo(Map.of("n_1", 2))); + assertThat(indexedBasedPlan.get("m_2"), equalTo(Map.of("n_2", 1))); + assertThat(indexedBasedPlan.get("m_3"), equalTo(Map.of("n_2", 1))); + + // First, one node goes away. + assignmentPlan = new AssignmentPlanner(List.of(node1), createModelsFromPlan(assignmentPlan)).computePlan(); + assertThat(assignmentPlan.getRemainingNodeMemory("n_1"), greaterThanOrEqualTo(0L)); + } + public void testGivenClusterResize_ShouldAllocateEachModelAtLeastOnce() { - Node node1 = new Node("n_1", ByteSizeValue.ofMb(1200).getBytes(), 2); - Node node2 = new Node("n_2", ByteSizeValue.ofMb(1200).getBytes(), 2); - Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(800).getBytes(), 2, 1, Map.of(), 0); - Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(800).getBytes(), 1, 1, Map.of(), 0); - Deployment deployment3 = new Deployment("m_3", ByteSizeValue.ofMb(250).getBytes(), 4, 1, Map.of(), 0); + Node node1 = new Node("n_1", ByteSizeValue.ofMb(2600).getBytes(), 2); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(2600).getBytes(), 2); + Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(800).getBytes(), 2, 1, Map.of(), 0, 0, 0); + Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(800).getBytes(), 1, 1, Map.of(), 0, 0, 0); + Deployment deployment3 = new Deployment("m_3", ByteSizeValue.ofMb(250).getBytes(), 4, 1, Map.of(), 0, 0, 0); // First only start m_1 AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1, node2), List.of(deployment1)).computePlan(); @@ -458,8 +894,8 @@ public void testGivenClusterResize_ShouldAllocateEachModelAtLeastOnce() { assertThat(indexedBasedPlan.get("m_3"), equalTo(Map.of("n_2", 1))); // Now the cluster starts getting resized. - Node node3 = new Node("n_3", ByteSizeValue.ofMb(2400).getBytes(), 2); - Node node4 = new Node("n_4", ByteSizeValue.ofMb(2400).getBytes(), 2); + Node node3 = new Node("n_3", ByteSizeValue.ofMb(2600).getBytes(), 2); + Node node4 = new Node("n_4", ByteSizeValue.ofMb(2600).getBytes(), 2); // First, one node goes away. assignmentPlan = new AssignmentPlanner(List.of(node1), createModelsFromPlan(assignmentPlan)).computePlan(); @@ -492,11 +928,65 @@ public void testGivenClusterResize_ShouldAllocateEachModelAtLeastOnce() { public void testGivenClusterResize_ShouldRemoveAllocatedModels() { // Ensure that plan is removing previously allocated models if not enough memory is available - Node node1 = new Node("n_1", ByteSizeValue.ofMb(1200).getBytes(), 2); - Node node2 = new Node("n_2", ByteSizeValue.ofMb(1200).getBytes(), 2); - Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(800).getBytes(), 2, 1, Map.of(), 0); - Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(800).getBytes(), 1, 1, Map.of(), 0); - Deployment deployment3 = new Deployment("m_3", ByteSizeValue.ofMb(250).getBytes(), 1, 1, Map.of(), 0); + Node node1 = new Node("n_1", ByteSizeValue.ofMb(1840).getBytes(), 2); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(2580).getBytes(), 2); + Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(800).getBytes(), 2, 1, Map.of(), 0, 0, 0); + Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(800).getBytes(), 1, 1, Map.of(), 0, 0, 0); + Deployment deployment3 = new Deployment("m_3", ByteSizeValue.ofMb(250).getBytes(), 1, 1, Map.of(), 0, 0, 0); + + // Create a plan where all deployments are assigned at least once + AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1, node2), List.of(deployment1, deployment2, deployment3)) + .computePlan(); + Map> indexedBasedPlan = convertToIdIndexed(assignmentPlan); + assertThat(indexedBasedPlan.keySet(), hasItems("m_1", "m_2", "m_3")); + assertThat(indexedBasedPlan.get("m_1"), equalTo(Map.of("n_1", 2))); + assertThat(indexedBasedPlan.get("m_2"), equalTo(Map.of("n_2", 1))); + assertThat(indexedBasedPlan.get("m_3"), equalTo(Map.of("n_2", 1))); + assertThat(assignmentPlan.getRemainingNodeMemory(node1.id()), greaterThanOrEqualTo(0L)); + assertThat(assignmentPlan.getRemainingNodeMemory(node2.id()), greaterThanOrEqualTo(0L)); + + // Now the cluster starts getting resized. Ensure that resources are not over-allocated. + assignmentPlan = new AssignmentPlanner(List.of(node1), createModelsFromPlan(assignmentPlan)).computePlan(); + assertThat(indexedBasedPlan.get("m_1"), equalTo(Map.of("n_1", 2))); + assertThat(assignmentPlan.getRemainingNodeMemory(node1.id()), greaterThanOrEqualTo(0L)); + assertThat(assignmentPlan.getRemainingNodeCores(node1.id()), greaterThanOrEqualTo(0)); + + } + + public void testGivenClusterResize_ShouldRemoveAllocatedModels_NewMemoryFields() { + // Ensure that plan is removing previously allocated models if not enough memory is available + Node node1 = new Node("n_1", ByteSizeValue.ofMb(700).getBytes(), 2); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(1000).getBytes(), 2); + Deployment deployment1 = new Deployment( + "m_1", + ByteSizeValue.ofMb(100).getBytes(), + 2, + 1, + Map.of(), + 0, + ByteSizeValue.ofMb(400).getBytes(), + ByteSizeValue.ofMb(100).getBytes() + ); + Deployment deployment2 = new Deployment( + "m_2", + ByteSizeValue.ofMb(100).getBytes(), + 1, + 1, + Map.of(), + 0, + ByteSizeValue.ofMb(400).getBytes(), + ByteSizeValue.ofMb(150).getBytes() + ); + Deployment deployment3 = new Deployment( + "m_3", + ByteSizeValue.ofMb(50).getBytes(), + 1, + 1, + Map.of(), + 0, + ByteSizeValue.ofMb(250).getBytes(), + ByteSizeValue.ofMb(50).getBytes() + ); // Create a plan where all deployments are assigned at least once AssignmentPlan assignmentPlan = new AssignmentPlanner(List.of(node1, node2), List.of(deployment1, deployment2, deployment3)) @@ -536,7 +1026,9 @@ public static List createModelsFromPlan(AssignmentPlan plan) { m.allocations(), m.threadsPerAllocation(), currentAllocations, - Math.max(m.maxAssignedAllocations(), totalAllocations) + Math.max(m.maxAssignedAllocations(), totalAllocations), + 0, + 0 ) ); } @@ -579,7 +1071,7 @@ public static List randomNodes(int scale, String nodeIdPrefix) { for (int i = 0; i < 1 + 3 * scale; i++) { int cores = randomIntBetween(2, 32); long memBytesPerCore = randomFrom(memBytesPerCoreValues); - nodes.add(new Node(nodeIdPrefix + "n_" + i, cores * memBytesPerCore, cores)); + nodes.add(new Node(nodeIdPrefix + "n_" + i, scaleNodeSize(ByteSizeValue.ofBytes(cores * memBytesPerCore).getMb()), cores)); } return nodes; } @@ -594,14 +1086,30 @@ public static List randomModels(int scale, double load) { public static Deployment randomModel(String idSuffix) { int allocations = randomIntBetween(1, 32); - return new Deployment( - "m_" + idSuffix, - randomLongBetween(ByteSizeValue.ofMb(100).getBytes(), ByteSizeValue.ofGb(10).getBytes()), - randomIntBetween(1, 32), - randomIntBetween(1, 4), - Map.of(), - 0 - ); + // randomly choose between old and new memory fields format + if (randomBoolean()) { + return new Deployment( + "m_" + idSuffix, + randomLongBetween(ByteSizeValue.ofMb(100).getBytes(), ByteSizeValue.ofGb(10).getBytes()), + randomIntBetween(1, 32), + randomIntBetween(1, 4), + Map.of(), + 0, + 0, + 0 + ); + } else { + return new Deployment( + "m_" + idSuffix, + randomLongBetween(ByteSizeValue.ofMb(100).getBytes(), ByteSizeValue.ofGb(1).getBytes()), + randomIntBetween(1, 32), + randomIntBetween(1, 4), + Map.of(), + 0, + randomLongBetween(ByteSizeValue.ofMb(100).getBytes(), ByteSizeValue.ofGb(1).getBytes()), + randomLongBetween(ByteSizeValue.ofMb(100).getBytes(), ByteSizeValue.ofGb(1).getBytes()) + ); + } } public static void assertPreviousAssignmentsAreSatisfied(List deployments, AssignmentPlan assignmentPlan) { @@ -628,7 +1136,7 @@ private void runTooManyNodesAndModels(int nodesSize, int modelsSize) { } List deployments = new ArrayList<>(); for (int i = 0; i < modelsSize; i++) { - deployments.add(new Deployment("m_" + i, ByteSizeValue.ofMb(200).getBytes(), 2, 1, Map.of(), 0)); + deployments.add(new Deployment("m_" + i, ByteSizeValue.ofMb(200).getBytes(), 2, 1, Map.of(), 0, 0, 0)); } // Check plan is computed without OOM exception diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveAllAllocationsTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveAllAllocationsTests.java index 4a9b01e535d88..c45ce36394109 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveAllAllocationsTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveAllAllocationsTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.ml.inference.assignment.planning; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.ml.inference.assignment.planning.AssignmentPlan.Deployment; import org.elasticsearch.xpack.ml.inference.assignment.planning.AssignmentPlan.Node; @@ -14,7 +15,6 @@ import java.util.List; import java.util.Map; -import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; @@ -22,77 +22,179 @@ public class PreserveAllAllocationsTests extends ESTestCase { public void testGivenNoPreviousAssignments() { - Node node1 = new Node("n_1", 100, 4); - Node node2 = new Node("n_2", 100, 4); - Deployment deployment1 = new Deployment("m_1", 30, 2, 1, Map.of(), 0); - Deployment deployment2 = new Deployment("m_2", 30, 2, 4, Map.of(), 0); + Node node1 = new Node("n_1", ByteSizeValue.ofMb(440).getBytes(), 4); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(440).getBytes(), 4); + Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 2, 1, Map.of(), 0, 0, 0); + Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(30).getBytes(), 2, 4, Map.of(), 0, 0, 0); PreserveAllAllocations preserveAllAllocations = new PreserveAllAllocations( List.of(node1, node2), List.of(deployment1, deployment2) ); - - List nodesPreservingAllocations = preserveAllAllocations.nodesPreservingAllocations(); - assertThat(nodesPreservingAllocations, contains(node1, node2)); - - List modelsPreservingAllocations = preserveAllAllocations.modelsPreservingAllocations(); - assertThat(modelsPreservingAllocations, contains(deployment1, deployment2)); } public void testGivenPreviousAssignments() { - Node node1 = new Node("n_1", 100, 8); - Node node2 = new Node("n_2", 100, 8); - Deployment deployment1 = new AssignmentPlan.Deployment("m_1", 30, 2, 1, Map.of("n_1", 1), 1); - Deployment deployment2 = new Deployment("m_2", 50, 6, 4, Map.of("n_1", 1, "n_2", 2), 3); - PreserveAllAllocations preserveAllAllocations = new PreserveAllAllocations( - List.of(node1, node2), - List.of(deployment1, deployment2) - ); - - List nodesPreservingAllocations = preserveAllAllocations.nodesPreservingAllocations(); - assertThat(nodesPreservingAllocations, hasSize(2)); - - assertThat(nodesPreservingAllocations.get(0).id(), equalTo("n_1")); - assertThat(nodesPreservingAllocations.get(0).availableMemoryBytes(), equalTo(20L)); - assertThat(nodesPreservingAllocations.get(0).cores(), equalTo(3)); - - assertThat(nodesPreservingAllocations.get(1).id(), equalTo("n_2")); - assertThat(nodesPreservingAllocations.get(1).availableMemoryBytes(), equalTo(50L)); - assertThat(nodesPreservingAllocations.get(1).cores(), equalTo(0)); - - List modelsPreservingAllocations = preserveAllAllocations.modelsPreservingAllocations(); - assertThat(modelsPreservingAllocations, hasSize(2)); - - assertThat(modelsPreservingAllocations.get(0).id(), equalTo("m_1")); - assertThat(modelsPreservingAllocations.get(0).memoryBytes(), equalTo(30L)); - assertThat(modelsPreservingAllocations.get(0).allocations(), equalTo(1)); - assertThat(modelsPreservingAllocations.get(0).threadsPerAllocation(), equalTo(1)); - assertThat(modelsPreservingAllocations.get(0).currentAllocationsByNodeId(), equalTo(Map.of("n_1", 0))); - - assertThat(modelsPreservingAllocations.get(1).id(), equalTo("m_2")); - assertThat(modelsPreservingAllocations.get(1).memoryBytes(), equalTo(50L)); - assertThat(modelsPreservingAllocations.get(1).allocations(), equalTo(3)); - assertThat(modelsPreservingAllocations.get(1).threadsPerAllocation(), equalTo(4)); - assertThat(modelsPreservingAllocations.get(1).currentAllocationsByNodeId(), equalTo(Map.of("n_1", 0, "n_2", 0))); - - AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(deployment1, deployment2)) - .assignModelToNode(deployment1, node1, 2) - .build(); - assertThat(plan.assignments(deployment1).get(), equalTo(Map.of(node1, 2))); - assertThat(plan.assignments(deployment2).isEmpty(), is(true)); - - plan = preserveAllAllocations.mergePreservedAllocations(plan); - - assertThat(plan.assignments(deployment1).get(), equalTo(Map.of(node1, 3))); - assertThat(plan.assignments(deployment2).get(), equalTo(Map.of(node1, 1, node2, 2))); - assertThat(plan.getRemainingNodeMemory("n_1"), equalTo(20L)); - assertThat(plan.getRemainingNodeCores("n_1"), equalTo(1)); - assertThat(plan.getRemainingNodeMemory("n_2"), equalTo(50L)); - assertThat(plan.getRemainingNodeCores("n_2"), equalTo(0)); + { + // old memory format + Node node1 = new Node("n_1", ByteSizeValue.ofMb(640).getBytes(), 8); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(640).getBytes(), 8); + Deployment deployment1 = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(30).getBytes(), + 2, + 1, + Map.of("n_1", 1), + 1, + 0, + 0 + ); + Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(50).getBytes(), 6, 4, Map.of("n_1", 1, "n_2", 2), 3, 0, 0); + PreserveAllAllocations preserveAllAllocations = new PreserveAllAllocations( + List.of(node1, node2), + List.of(deployment1, deployment2) + ); + + List nodesPreservingAllocations = preserveAllAllocations.nodesPreservingAllocations(); + assertThat(nodesPreservingAllocations, hasSize(2)); + + assertThat(nodesPreservingAllocations.get(0).id(), equalTo("n_1")); + // 640 - [(2*30 + 240) + (2*50 + 240)] = 0: deployments use 640 MB on the node 1 + assertThat(nodesPreservingAllocations.get(0).availableMemoryBytes(), equalTo(0L)); + // 8 - (1*1+1*4) = 3 : deployments use 5 cores on the node + assertThat(nodesPreservingAllocations.get(0).cores(), equalTo(3)); + + assertThat(nodesPreservingAllocations.get(1).id(), equalTo("n_2")); + // 640 - (50*2+240) = 300 : deployments use 340MB on the node + assertThat(nodesPreservingAllocations.get(1).availableMemoryBytes(), equalTo(ByteSizeValue.ofMb(300).getBytes())); + // 8 - (2*4) = 0 : preserving all allocation2 of deployment 2 should use 8 cores on the node + assertThat(nodesPreservingAllocations.get(1).cores(), equalTo(0)); + + List modelsPreservingAllocations = preserveAllAllocations.modelsPreservingAllocations(); + assertThat(modelsPreservingAllocations, hasSize(2)); + + assertThat(modelsPreservingAllocations.get(0).id(), equalTo("m_1")); + assertThat(modelsPreservingAllocations.get(0).memoryBytes(), equalTo(ByteSizeValue.ofMb(30).getBytes())); + assertThat(modelsPreservingAllocations.get(0).allocations(), equalTo(1)); + assertThat(modelsPreservingAllocations.get(0).threadsPerAllocation(), equalTo(1)); + assertThat(modelsPreservingAllocations.get(0).currentAllocationsByNodeId(), equalTo(Map.of("n_1", 0))); + + assertThat(modelsPreservingAllocations.get(1).id(), equalTo("m_2")); + assertThat(modelsPreservingAllocations.get(1).memoryBytes(), equalTo(ByteSizeValue.ofMb(50).getBytes())); + assertThat(modelsPreservingAllocations.get(1).allocations(), equalTo(3)); + assertThat(modelsPreservingAllocations.get(1).threadsPerAllocation(), equalTo(4)); + assertThat(modelsPreservingAllocations.get(1).currentAllocationsByNodeId(), equalTo(Map.of("n_1", 0, "n_2", 0))); + + // Now we have a plan with 2 deployments assigned to 2 nodes. + // Note that deployment 1 has already 1 allocation on node 1, and it gets 2 more. It's more than 2 allocations defined during + // initialization of deployment1, but we don't care at this point. + AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(deployment1, deployment2)) + .assignModelToNode(deployment1, node1, 2) + .build(); + assertThat(plan.assignments(deployment1).get(), equalTo(Map.of(node1, 2))); + assertThat(plan.assignments(deployment2).isEmpty(), is(true)); + + plan = preserveAllAllocations.mergePreservedAllocations(plan); + assertThat(plan.assignments(deployment1).get(), equalTo(Map.of(node1, 3))); + assertThat(plan.assignments(deployment2).get(), equalTo(Map.of(node1, 1, node2, 2))); + + // Node 1 already had deployments 1 and 2 assigned to it so adding more allocation doesn't change memory usage. + assertThat(plan.getRemainingNodeMemory("n_1"), equalTo(0L)); + // 8 - ((1*1+1*4) + 2*1) = 1 : deployments use 7 cores on the node + assertThat(plan.getRemainingNodeCores("n_1"), equalTo(1)); + // Nothing changed for Node 2 + assertThat(plan.getRemainingNodeMemory("n_2"), equalTo(ByteSizeValue.ofMb(300).getBytes())); + // Nothing changed for Node 2 + assertThat(plan.getRemainingNodeCores("n_2"), equalTo(0)); + } + { + // new memory format + Node node1 = new Node("n_1", ByteSizeValue.ofMb(1000).getBytes(), 8); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(1000).getBytes(), 8); + Deployment deployment1 = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(30).getBytes(), + 2, + 1, + Map.of("n_1", 1), + 1, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(10).getBytes() + ); + Deployment deployment2 = new Deployment( + "m_2", + ByteSizeValue.ofMb(50).getBytes(), + 6, + 4, + Map.of("n_1", 1, "n_2", 2), + 3, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(10).getBytes() + ); + PreserveAllAllocations preserveAllAllocations = new PreserveAllAllocations( + List.of(node1, node2), + List.of(deployment1, deployment2) + ); + + List nodesPreservingAllocations = preserveAllAllocations.nodesPreservingAllocations(); + assertThat(nodesPreservingAllocations, hasSize(2)); + + assertThat(nodesPreservingAllocations.get(0).id(), equalTo("n_1")); + // 1000 - [(30 + 300+10) + (50 + 300 + 10)] = 300: deployments use 700 MB on the node 1 + assertThat(nodesPreservingAllocations.get(0).availableMemoryBytes(), equalTo(ByteSizeValue.ofMb(300).getBytes())); + // 8 - (1*1+1*4) = 3 : deployments use 5 cores on the node + assertThat(nodesPreservingAllocations.get(0).cores(), equalTo(3)); + + assertThat(nodesPreservingAllocations.get(1).id(), equalTo("n_2")); + // 1000 - (50 + 300 + 2*10) = 630 : deployments use 370MB on the node + assertThat(nodesPreservingAllocations.get(1).availableMemoryBytes(), equalTo(ByteSizeValue.ofMb(630).getBytes())); + // 8 - (2*4) = 0 : preserving all allocation2 of deployment 2 should use 8 cores on the node + assertThat(nodesPreservingAllocations.get(1).cores(), equalTo(0)); + + List modelsPreservingAllocations = preserveAllAllocations.modelsPreservingAllocations(); + assertThat(modelsPreservingAllocations, hasSize(2)); + + assertThat(modelsPreservingAllocations.get(0).id(), equalTo("m_1")); + assertThat(modelsPreservingAllocations.get(0).memoryBytes(), equalTo(ByteSizeValue.ofMb(30).getBytes())); + assertThat(modelsPreservingAllocations.get(0).perDeploymentMemoryBytes(), equalTo(ByteSizeValue.ofMb(300).getBytes())); + assertThat(modelsPreservingAllocations.get(0).perAllocationMemoryBytes(), equalTo(ByteSizeValue.ofMb(10).getBytes())); + assertThat(modelsPreservingAllocations.get(0).allocations(), equalTo(1)); + assertThat(modelsPreservingAllocations.get(0).threadsPerAllocation(), equalTo(1)); + assertThat(modelsPreservingAllocations.get(0).currentAllocationsByNodeId(), equalTo(Map.of("n_1", 0))); + + assertThat(modelsPreservingAllocations.get(1).id(), equalTo("m_2")); + assertThat(modelsPreservingAllocations.get(1).memoryBytes(), equalTo(ByteSizeValue.ofMb(50).getBytes())); + assertThat(modelsPreservingAllocations.get(1).perDeploymentMemoryBytes(), equalTo(ByteSizeValue.ofMb(300).getBytes())); + assertThat(modelsPreservingAllocations.get(1).perAllocationMemoryBytes(), equalTo(ByteSizeValue.ofMb(10).getBytes())); + assertThat(modelsPreservingAllocations.get(1).allocations(), equalTo(3)); + assertThat(modelsPreservingAllocations.get(1).threadsPerAllocation(), equalTo(4)); + assertThat(modelsPreservingAllocations.get(1).currentAllocationsByNodeId(), equalTo(Map.of("n_1", 0, "n_2", 0))); + + // Now we have a plan with 2 deployments assigned to 2 nodes. + // Note that deployment 1 has already 1 allocation on node 1, and it gets 2 more. It's more than 2 allocations defined during + // initialization of deployment1, but we don't care at this point. + AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(deployment1, deployment2)) + .assignModelToNode(deployment1, node1, 2) + .build(); + assertThat(plan.assignments(deployment1).get(), equalTo(Map.of(node1, 2))); + assertThat(plan.assignments(deployment2).isEmpty(), is(true)); + + plan = preserveAllAllocations.mergePreservedAllocations(plan); + assertThat(plan.assignments(deployment1).get(), equalTo(Map.of(node1, 3))); + assertThat(plan.assignments(deployment2).get(), equalTo(Map.of(node1, 1, node2, 2))); + + // 1000 - ((30 + 300 + 3*10) + (50 + 300 + 10)) = 280 : deployments use 720 MB on the node 1 + assertThat(plan.getRemainingNodeMemory("n_1"), equalTo(ByteSizeValue.ofMb(280).getBytes())); + // 8 - ((1*1+1*4) + 2*1) = 1 : deployments use 7 cores on the node + assertThat(plan.getRemainingNodeCores("n_1"), equalTo(1)); + // Nothing changed for Node 2 + assertThat(plan.getRemainingNodeMemory("n_2"), equalTo(ByteSizeValue.ofMb(630).getBytes())); + // Nothing changed for Node 2 + assertThat(plan.getRemainingNodeCores("n_2"), equalTo(0)); + } } public void testGivenModelWithPreviousAssignments_AndPlanToMergeHasNoAssignments() { - Node node = new Node("n_1", 100, 4); - AssignmentPlan.Deployment deployment = new Deployment("m_1", 30, 2, 2, Map.of("n_1", 2), 2); + Node node = new Node("n_1", ByteSizeValue.ofMb(400).getBytes(), 4); + Deployment deployment = new Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 2, 2, Map.of("n_1", 2), 2, 0, 0); PreserveAllAllocations preserveAllAllocations = new PreserveAllAllocations(List.of(node), List.of(deployment)); AssignmentPlan plan = AssignmentPlan.builder(List.of(node), List.of(deployment)).build(); @@ -101,7 +203,7 @@ public void testGivenModelWithPreviousAssignments_AndPlanToMergeHasNoAssignments plan = preserveAllAllocations.mergePreservedAllocations(plan); assertThat(plan.assignments(deployment).isPresent(), is(true)); assertThat(plan.assignments(deployment).get(), equalTo(Map.of(node, 2))); - assertThat(plan.getRemainingNodeMemory("n_1"), equalTo(70L)); + assertThat(plan.getRemainingNodeMemory("n_1"), equalTo(ByteSizeValue.ofMb(100).getBytes())); assertThat(plan.getRemainingNodeCores("n_1"), equalTo(0)); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveOneAllocationTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveOneAllocationTests.java index d8c3b09422e92..f646bf5cb2e9d 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveOneAllocationTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/PreserveOneAllocationTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.ml.inference.assignment.planning; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.ml.inference.assignment.planning.AssignmentPlan.Deployment; import org.elasticsearch.xpack.ml.inference.assignment.planning.AssignmentPlan.Node; @@ -22,10 +23,10 @@ public class PreserveOneAllocationTests extends ESTestCase { public void testGivenNoPreviousAssignments() { - Node node1 = new Node("n_1", 100, 4); - Node node2 = new Node("n_2", 100, 4); - Deployment deployment1 = new AssignmentPlan.Deployment("m_1", 30, 2, 1, Map.of(), 0); - AssignmentPlan.Deployment deployment2 = new Deployment("m_2", 30, 2, 4, Map.of(), 0); + Node node1 = new Node("n_1", ByteSizeValue.ofMb(440).getBytes(), 4); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(440).getBytes(), 4); + Deployment deployment1 = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 2, 1, Map.of(), 0, 0, 0); + AssignmentPlan.Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(30).getBytes(), 2, 4, Map.of(), 0, 0, 0); PreserveOneAllocation preserveOneAllocation = new PreserveOneAllocation(List.of(node1, node2), List.of(deployment1, deployment2)); List nodesPreservingAllocations = preserveOneAllocation.nodesPreservingAllocations(); @@ -36,67 +37,204 @@ public void testGivenNoPreviousAssignments() { } public void testGivenPreviousAssignments() { - Node node1 = new Node("n_1", 100, 8); - Node node2 = new Node("n_2", 100, 8); - AssignmentPlan.Deployment deployment1 = new AssignmentPlan.Deployment("m_1", 30, 2, 1, Map.of("n_1", 1), 1); - AssignmentPlan.Deployment deployment2 = new Deployment("m_2", 50, 6, 4, Map.of("n_1", 1, "n_2", 2), 3); - PreserveOneAllocation preserveOneAllocation = new PreserveOneAllocation(List.of(node1, node2), List.of(deployment1, deployment2)); - - List nodesPreservingAllocations = preserveOneAllocation.nodesPreservingAllocations(); - assertThat(nodesPreservingAllocations, hasSize(2)); - - assertThat(nodesPreservingAllocations.get(0).id(), equalTo("n_1")); - assertThat(nodesPreservingAllocations.get(0).availableMemoryBytes(), equalTo(20L)); - assertThat(nodesPreservingAllocations.get(0).cores(), equalTo(3)); - - assertThat(nodesPreservingAllocations.get(1).id(), equalTo("n_2")); - assertThat(nodesPreservingAllocations.get(1).availableMemoryBytes(), equalTo(50L)); - assertThat(nodesPreservingAllocations.get(1).cores(), equalTo(4)); - - List modelsPreservingAllocations = preserveOneAllocation.modelsPreservingAllocations(); - assertThat(modelsPreservingAllocations, hasSize(2)); - - assertThat(modelsPreservingAllocations.get(0).id(), equalTo("m_1")); - assertThat(modelsPreservingAllocations.get(0).memoryBytes(), equalTo(30L)); - assertThat(modelsPreservingAllocations.get(0).allocations(), equalTo(1)); - assertThat(modelsPreservingAllocations.get(0).threadsPerAllocation(), equalTo(1)); - assertThat(modelsPreservingAllocations.get(0).currentAllocationsByNodeId(), equalTo(Map.of("n_1", 0))); - - assertThat(modelsPreservingAllocations.get(1).id(), equalTo("m_2")); - assertThat(modelsPreservingAllocations.get(1).memoryBytes(), equalTo(50L)); - assertThat(modelsPreservingAllocations.get(1).allocations(), equalTo(4)); - assertThat(modelsPreservingAllocations.get(1).threadsPerAllocation(), equalTo(4)); - assertThat(modelsPreservingAllocations.get(1).currentAllocationsByNodeId(), equalTo(Map.of("n_1", 0, "n_2", 1))); - - AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(deployment1, deployment2)) - .assignModelToNode(deployment1, node1, 2) - .assignModelToNode(deployment2, node2, 1) - .build(); - assertThat(plan.assignments(deployment1).get(), equalTo(Map.of(node1, 2))); - assertThat(plan.assignments(deployment2).get(), equalTo(Map.of(node2, 1))); - - plan = preserveOneAllocation.mergePreservedAllocations(plan); - - assertThat(plan.assignments(deployment1).get(), equalTo(Map.of(node1, 3))); - assertThat(plan.assignments(deployment2).get(), equalTo(Map.of(node1, 1, node2, 2))); - assertThat(plan.getRemainingNodeMemory("n_1"), equalTo(20L)); - assertThat(plan.getRemainingNodeCores("n_1"), equalTo(1)); - assertThat(plan.getRemainingNodeMemory("n_2"), equalTo(50L)); - assertThat(plan.getRemainingNodeCores("n_2"), equalTo(0)); + { + // old memory format + Node node1 = new Node("n_1", ByteSizeValue.ofMb(640).getBytes(), 8); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(640).getBytes(), 8); + Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 2, 1, Map.of("n_1", 1), 1, 0, 0); + Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(50).getBytes(), 6, 4, Map.of("n_1", 1, "n_2", 2), 3, 0, 0); + PreserveOneAllocation preserveOneAllocation = new PreserveOneAllocation( + List.of(node1, node2), + List.of(deployment1, deployment2) + ); + + List nodesPreservingAllocations = preserveOneAllocation.nodesPreservingAllocations(); + assertThat(nodesPreservingAllocations, hasSize(2)); + + assertThat(nodesPreservingAllocations.get(0).id(), equalTo("n_1")); + // 640 - [(30*2+240)+(50*2+240)] = 0 : deployments use all memory on the node + assertThat(nodesPreservingAllocations.get(0).availableMemoryBytes(), equalTo(0L)); + // 8 - (1*1+1*4) = 3 : deployments use 5 cores on the node + assertThat(nodesPreservingAllocations.get(0).cores(), equalTo(3)); + + assertThat(nodesPreservingAllocations.get(1).id(), equalTo("n_2")); + // 640 - (50*2+240) = 300 : deployments use 340MB on the node + assertThat(nodesPreservingAllocations.get(1).availableMemoryBytes(), equalTo(ByteSizeValue.ofMb(300).getBytes())); + // 8 - (1*4) = 4 : preserving 1 allocation of deployment 2 should use 4 cores on the node + assertThat(nodesPreservingAllocations.get(1).cores(), equalTo(4)); + + List modelsPreservingAllocations = preserveOneAllocation.modelsPreservingAllocations(); + assertThat(modelsPreservingAllocations, hasSize(2)); + + assertThat(modelsPreservingAllocations.get(0).id(), equalTo("m_1")); + assertThat(modelsPreservingAllocations.get(0).memoryBytes(), equalTo(ByteSizeValue.ofMb(30).getBytes())); + assertThat(modelsPreservingAllocations.get(0).perDeploymentMemoryBytes(), equalTo(ByteSizeValue.ofMb(0).getBytes())); + assertThat(modelsPreservingAllocations.get(0).perAllocationMemoryBytes(), equalTo(ByteSizeValue.ofMb(0).getBytes())); + assertThat(modelsPreservingAllocations.get(0).allocations(), equalTo(1)); + assertThat(modelsPreservingAllocations.get(0).threadsPerAllocation(), equalTo(1)); + assertThat(modelsPreservingAllocations.get(0).currentAllocationsByNodeId(), equalTo(Map.of("n_1", 0))); + + assertThat(modelsPreservingAllocations.get(1).id(), equalTo("m_2")); + assertThat(modelsPreservingAllocations.get(1).memoryBytes(), equalTo(ByteSizeValue.ofMb(50).getBytes())); + assertThat(modelsPreservingAllocations.get(1).perDeploymentMemoryBytes(), equalTo(ByteSizeValue.ofMb(0).getBytes())); + assertThat(modelsPreservingAllocations.get(1).perAllocationMemoryBytes(), equalTo(ByteSizeValue.ofMb(0).getBytes())); + assertThat(modelsPreservingAllocations.get(1).allocations(), equalTo(4)); + assertThat(modelsPreservingAllocations.get(1).threadsPerAllocation(), equalTo(4)); + assertThat(modelsPreservingAllocations.get(1).currentAllocationsByNodeId(), equalTo(Map.of("n_1", 0, "n_2", 1))); + + // Now we have a plan with 2 deployments assigned to 2 nodes. + // Note that deployment 1 has already 1 allocation on node 1, and it gets 2 more. It's more than 2 allocations defined during + // initialization of deployment1, but we don't care at this point. + AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(deployment1, deployment2)) + .assignModelToNode(deployment1, node1, 2) + .assignModelToNode(deployment2, node2, 1) + .build(); + assertThat(plan.assignments(deployment1).get(), equalTo(Map.of(node1, 2))); + assertThat(plan.assignments(deployment2).get(), equalTo(Map.of(node2, 1))); + + plan = preserveOneAllocation.mergePreservedAllocations(plan); + + assertThat(plan.assignments(deployment1).get(), equalTo(Map.of(node1, 3))); + assertThat(plan.assignments(deployment2).get(), equalTo(Map.of(node1, 1, node2, 2))); + // Node 1 already had deployments 1 and 2 assigned to it so adding more allocation doesn't change memory usage. + assertThat(plan.getRemainingNodeMemory("n_1"), equalTo(0L)); + // 8 - ((1*1+1*4) + 2*1) = 1 : deployments use 7 cores on the node + assertThat(plan.getRemainingNodeCores("n_1"), equalTo(1)); + // Node 2 already had deployment 2 assigned to it so adding more allocation doesn't change memory usage. + assertThat(plan.getRemainingNodeMemory("n_2"), equalTo(ByteSizeValue.ofMb(300).getBytes())); + // 8 - [(1*4) + (1*4)] = 4 : deployment 2 should use all cores on the node + assertThat(plan.getRemainingNodeCores("n_2"), equalTo(0)); + } + { + // new memory format + Node node1 = new Node("n_1", ByteSizeValue.ofMb(1000).getBytes(), 8); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(1000).getBytes(), 8); + Deployment deployment1 = new Deployment( + "m_1", + ByteSizeValue.ofMb(30).getBytes(), + 2, + 1, + Map.of("n_1", 1), + 1, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(10).getBytes() + ); + Deployment deployment2 = new Deployment( + "m_2", + ByteSizeValue.ofMb(50).getBytes(), + 6, + 4, + Map.of("n_1", 1, "n_2", 2), + 3, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(10).getBytes() + ); + PreserveOneAllocation preserveOneAllocation = new PreserveOneAllocation( + List.of(node1, node2), + List.of(deployment1, deployment2) + ); + + List nodesPreservingAllocations = preserveOneAllocation.nodesPreservingAllocations(); + assertThat(nodesPreservingAllocations, hasSize(2)); + + assertThat(nodesPreservingAllocations.get(0).id(), equalTo("n_1")); + // 1000 - [(30+300+10)+(50 + 300 +10)] = 300 : deployments use 700 memory on the node + assertThat(nodesPreservingAllocations.get(0).availableMemoryBytes(), equalTo(ByteSizeValue.ofMb(300).getBytes())); + // 8 - (1*1+1*4) = 3 : deployments use 5 cores on the node + assertThat(nodesPreservingAllocations.get(0).cores(), equalTo(3)); + + assertThat(nodesPreservingAllocations.get(1).id(), equalTo("n_2")); + // 1000 - (50 +300 + 2*10) = 630 : deployments use 340MB on the node + assertThat(nodesPreservingAllocations.get(1).availableMemoryBytes(), equalTo(ByteSizeValue.ofMb(630).getBytes())); + // 8 - (1*4) = 0 : preserving 1 allocation of deployment 2 should use 4 cores on the node + assertThat(nodesPreservingAllocations.get(1).cores(), equalTo(4)); + + List modelsPreservingAllocations = preserveOneAllocation.modelsPreservingAllocations(); + assertThat(modelsPreservingAllocations, hasSize(2)); + + assertThat(modelsPreservingAllocations.get(0).id(), equalTo("m_1")); + assertThat(modelsPreservingAllocations.get(0).memoryBytes(), equalTo(ByteSizeValue.ofMb(30).getBytes())); + assertThat(modelsPreservingAllocations.get(0).perDeploymentMemoryBytes(), equalTo(ByteSizeValue.ofMb(300).getBytes())); + assertThat(modelsPreservingAllocations.get(0).perAllocationMemoryBytes(), equalTo(ByteSizeValue.ofMb(10).getBytes())); + assertThat(modelsPreservingAllocations.get(0).allocations(), equalTo(1)); + assertThat(modelsPreservingAllocations.get(0).threadsPerAllocation(), equalTo(1)); + assertThat(modelsPreservingAllocations.get(0).currentAllocationsByNodeId(), equalTo(Map.of("n_1", 0))); + + assertThat(modelsPreservingAllocations.get(1).id(), equalTo("m_2")); + assertThat(modelsPreservingAllocations.get(1).memoryBytes(), equalTo(ByteSizeValue.ofMb(50).getBytes())); + assertThat(modelsPreservingAllocations.get(1).perDeploymentMemoryBytes(), equalTo(ByteSizeValue.ofMb(300).getBytes())); + assertThat(modelsPreservingAllocations.get(1).perAllocationMemoryBytes(), equalTo(ByteSizeValue.ofMb(10).getBytes())); + assertThat(modelsPreservingAllocations.get(1).allocations(), equalTo(4)); + assertThat(modelsPreservingAllocations.get(1).threadsPerAllocation(), equalTo(4)); + assertThat(modelsPreservingAllocations.get(1).currentAllocationsByNodeId(), equalTo(Map.of("n_1", 0, "n_2", 1))); + + // Now we have a plan with 2 deployments assigned to 2 nodes. + // Note that deployment 1 has already 1 allocation on node 1, and it gets 2 more. It's more than 2 allocations defined during + // initialization of deployment1, but we don't care at this point. + AssignmentPlan plan = AssignmentPlan.builder(List.of(node1, node2), List.of(deployment1, deployment2)) + .assignModelToNode(deployment1, node1, 2) + .assignModelToNode(deployment2, node2, 1) + .build(); + assertThat(plan.assignments(deployment1).get(), equalTo(Map.of(node1, 2))); + assertThat(plan.assignments(deployment2).get(), equalTo(Map.of(node2, 1))); + + plan = preserveOneAllocation.mergePreservedAllocations(plan); + + assertThat(plan.assignments(deployment1).get(), equalTo(Map.of(node1, 3))); + assertThat(plan.assignments(deployment2).get(), equalTo(Map.of(node1, 1, node2, 2))); + // 1000 - [(30+300+3*10) + (50+300+10)] = 280 : deployments use 720MB on the node + assertThat(plan.getRemainingNodeMemory("n_1"), equalTo(ByteSizeValue.ofMb(280).getBytes())); + // 8 - ((1*1+1*4) + 2*1) = 1 : deployments use 7 cores on the node + assertThat(plan.getRemainingNodeCores("n_1"), equalTo(1)); + // 1000 - (50 + 300 + 2*10) = 630 : deployments use 370MB on the node + assertThat(plan.getRemainingNodeMemory("n_2"), equalTo(ByteSizeValue.ofMb(630).getBytes())); + // 8 - [(1*4) + (1*4)] = 4 : deployment 2 should use all cores on the node + assertThat(plan.getRemainingNodeCores("n_2"), equalTo(0)); + + } } public void testGivenModelWithPreviousAssignments_AndPlanToMergeHasNoAssignments() { - Node node = new Node("n_1", 100, 4); - AssignmentPlan.Deployment deployment = new Deployment("m_1", 30, 2, 2, Map.of("n_1", 2), 2); - PreserveOneAllocation preserveOneAllocation = new PreserveOneAllocation(List.of(node), List.of(deployment)); - - AssignmentPlan plan = AssignmentPlan.builder(List.of(node), List.of(deployment)).build(); - assertThat(plan.assignments(deployment).isEmpty(), is(true)); - - plan = preserveOneAllocation.mergePreservedAllocations(plan); - assertThat(plan.assignments(deployment).isPresent(), is(true)); - assertThat(plan.assignments(deployment).get(), equalTo(Map.of(node, 1))); - assertThat(plan.getRemainingNodeMemory("n_1"), equalTo(70L)); - assertThat(plan.getRemainingNodeCores("n_1"), equalTo(2)); + { + // old memory format + Node node = new Node("n_1", ByteSizeValue.ofMb(400).getBytes(), 4); + Deployment deployment = new Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 2, 2, Map.of("n_1", 2), 2, 0, 0); + PreserveOneAllocation preserveOneAllocation = new PreserveOneAllocation(List.of(node), List.of(deployment)); + + AssignmentPlan plan = AssignmentPlan.builder(List.of(node), List.of(deployment)).build(); + assertThat(plan.assignments(deployment).isEmpty(), is(true)); + + plan = preserveOneAllocation.mergePreservedAllocations(plan); + assertThat(plan.assignments(deployment).isPresent(), is(true)); + assertThat(plan.assignments(deployment).get(), equalTo(Map.of(node, 1))); + // 400 - (30*2 + 240) = 100 : deployments use 300MB on the node + assertThat(plan.getRemainingNodeMemory("n_1"), equalTo(ByteSizeValue.ofMb(100).getBytes())); + assertThat(plan.getRemainingNodeCores("n_1"), equalTo(2)); + } + { + // new memory format + Node node = new Node("n_1", ByteSizeValue.ofMb(400).getBytes(), 4); + Deployment deployment = new Deployment( + "m_1", + ByteSizeValue.ofMb(30).getBytes(), + 2, + 2, + Map.of("n_1", 2), + 2, + ByteSizeValue.ofMb(300).getBytes(), + ByteSizeValue.ofMb(10).getBytes() + ); + PreserveOneAllocation preserveOneAllocation = new PreserveOneAllocation(List.of(node), List.of(deployment)); + + AssignmentPlan plan = AssignmentPlan.builder(List.of(node), List.of(deployment)).build(); + assertThat(plan.assignments(deployment).isEmpty(), is(true)); + + plan = preserveOneAllocation.mergePreservedAllocations(plan); + assertThat(plan.assignments(deployment).isPresent(), is(true)); + assertThat(plan.assignments(deployment).get(), equalTo(Map.of(node, 1))); + // 400 - (30 + 300 + 10) = 60 : deployments use 340MB on the node + assertThat(plan.getRemainingNodeMemory("n_1"), equalTo(ByteSizeValue.ofMb(60).getBytes())); + assertThat(plan.getRemainingNodeCores("n_1"), equalTo(2)); + } } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/ZoneAwareAssignmentPlannerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/ZoneAwareAssignmentPlannerTests.java index 7ceb8bbb86869..651e4764cb894 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/ZoneAwareAssignmentPlannerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/planning/ZoneAwareAssignmentPlannerTests.java @@ -36,7 +36,7 @@ public class ZoneAwareAssignmentPlannerTests extends ESTestCase { public void testGivenOneModel_OneNode_OneZone_DoesNotFit() { Node node = new Node("n_1", 100, 1); - AssignmentPlan.Deployment deployment = new AssignmentPlan.Deployment("m_1", 100, 1, 2, Map.of(), 0); + AssignmentPlan.Deployment deployment = new AssignmentPlan.Deployment("m_1", 100, 1, 2, Map.of(), 0, 0, 0); AssignmentPlan plan = new ZoneAwareAssignmentPlanner(Map.of(List.of(), List.of(node)), List.of(deployment)).computePlan(); @@ -44,8 +44,17 @@ public void testGivenOneModel_OneNode_OneZone_DoesNotFit() { } public void testGivenOneModel_OneNode_OneZone_FullyFits() { - Node node = new Node("n_1", 100, 4); - AssignmentPlan.Deployment deployment = new AssignmentPlan.Deployment("m_1", 100, 2, 2, Map.of(), 0); + Node node = new Node("n_1", ByteSizeValue.ofMb(440).getBytes(), 4); + AssignmentPlan.Deployment deployment = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(100).getBytes(), + 2, + 2, + Map.of(), + 0, + 0, + 0 + ); AssignmentPlan plan = new ZoneAwareAssignmentPlanner(Map.of(List.of(), List.of(node)), List.of(deployment)).computePlan(); @@ -53,8 +62,17 @@ public void testGivenOneModel_OneNode_OneZone_FullyFits() { } public void testGivenOneModel_OneNode_OneZone_PartiallyFits() { - Node node = new Node("n_1", 100, 5); - AssignmentPlan.Deployment deployment = new AssignmentPlan.Deployment("m_1", 100, 3, 2, Map.of(), 0); + Node node = new Node("n_1", ByteSizeValue.ofMb(440).getBytes(), 5); + AssignmentPlan.Deployment deployment = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(100).getBytes(), + 3, + 2, + Map.of(), + 0, + 0, + 0 + ); AssignmentPlan plan = new ZoneAwareAssignmentPlanner(Map.of(List.of(), List.of(node)), List.of(deployment)).computePlan(); @@ -64,9 +82,18 @@ public void testGivenOneModel_OneNode_OneZone_PartiallyFits() { } public void testGivenOneModelWithSingleAllocation_OneNode_TwoZones() { - Node node1 = new Node("n_1", 100, 4); - Node node2 = new Node("n_2", 100, 4); - AssignmentPlan.Deployment deployment = new AssignmentPlan.Deployment("m_1", 100, 1, 2, Map.of(), 0); + Node node1 = new Node("n_1", ByteSizeValue.ofMb(440).getBytes(), 4); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(440).getBytes(), 4); + AssignmentPlan.Deployment deployment = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(100).getBytes(), + 1, + 2, + Map.of(), + 0, + 0, + 0 + ); AssignmentPlan plan = new ZoneAwareAssignmentPlanner( Map.of(List.of("z1"), List.of(node1), List.of("z2"), List.of(node2)), @@ -82,9 +109,18 @@ public void testGivenOneModelWithSingleAllocation_OneNode_TwoZones() { } public void testGivenOneModel_OneNodePerZone_TwoZones_FullyFits() { - Node node1 = new Node("n_1", 100, 4); - Node node2 = new Node("n_2", 100, 4); - AssignmentPlan.Deployment deployment = new AssignmentPlan.Deployment("m_1", 100, 2, 2, Map.of(), 0); + Node node1 = new Node("n_1", ByteSizeValue.ofMb(440).getBytes(), 4); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(440).getBytes(), 4); + AssignmentPlan.Deployment deployment = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(100).getBytes(), + 2, + 2, + Map.of(), + 0, + 0, + 0 + ); AssignmentPlan plan = new ZoneAwareAssignmentPlanner( Map.of(List.of("z_1"), List.of(node1), List.of("z_2"), List.of(node2)), @@ -99,9 +135,18 @@ public void testGivenOneModel_OneNodePerZone_TwoZones_FullyFits() { } public void testGivenOneModel_OneNodePerZone_TwoZones_PartiallyFits() { - Node node1 = new Node("n_1", 100, 4); - Node node2 = new Node("n_2", 100, 4); - AssignmentPlan.Deployment deployment = new AssignmentPlan.Deployment("m_1", 100, 3, 3, Map.of(), 0); + Node node1 = new Node("n_1", ByteSizeValue.ofMb(440).getBytes(), 4); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(440).getBytes(), 4); + AssignmentPlan.Deployment deployment = new AssignmentPlan.Deployment( + "m_1", + ByteSizeValue.ofMb(100).getBytes(), + 3, + 3, + Map.of(), + 0, + 0, + 0 + ); AssignmentPlan plan = new ZoneAwareAssignmentPlanner( Map.of(List.of("z_1"), List.of(node1), List.of("z_2"), List.of(node2)), @@ -117,15 +162,15 @@ public void testGivenOneModel_OneNodePerZone_TwoZones_PartiallyFits() { } public void testGivenThreeModels_TwoNodesPerZone_ThreeZones_FullyFit() { - Node node1 = new Node("n_1", 100, 4); - Node node2 = new Node("n_2", 100, 4); - Node node3 = new Node("n_3", 100, 4); - Node node4 = new Node("n_4", 100, 4); - Node node5 = new Node("n_5", 100, 4); - Node node6 = new Node("n_6", 100, 4); - AssignmentPlan.Deployment deployment1 = new AssignmentPlan.Deployment("m_1", 25, 4, 1, Map.of(), 0); - Deployment deployment2 = new AssignmentPlan.Deployment("m_2", 25, 6, 2, Map.of(), 0); - AssignmentPlan.Deployment deployment3 = new AssignmentPlan.Deployment("m_3", 25, 2, 3, Map.of(), 0); + Node node1 = new Node("n_1", ByteSizeValue.ofMb(1000).getBytes(), 4); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(1000).getBytes(), 4); + Node node3 = new Node("n_3", ByteSizeValue.ofMb(1000).getBytes(), 4); + Node node4 = new Node("n_4", ByteSizeValue.ofMb(1000).getBytes(), 4); + Node node5 = new Node("n_5", ByteSizeValue.ofMb(1000).getBytes(), 4); + Node node6 = new Node("n_6", ByteSizeValue.ofMb(1000).getBytes(), 4); + Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 4, 1, Map.of(), 0, 0, 0); + Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(30).getBytes(), 6, 2, Map.of(), 0, 0, 0); + Deployment deployment3 = new Deployment("m_3", ByteSizeValue.ofMb(30).getBytes(), 2, 3, Map.of(), 0, 0, 0); Map, List> nodesByZone = Map.of( List.of("z_1"), @@ -168,11 +213,11 @@ public void testGivenThreeModels_TwoNodesPerZone_ThreeZones_FullyFit() { } public void testGivenTwoModelsWithSingleAllocation_OneNode_ThreeZones() { - Node node1 = new Node("n_1", 100, 4); - Node node2 = new Node("n_2", 100, 4); - Node node3 = new Node("n_3", 100, 4); - AssignmentPlan.Deployment deployment1 = new Deployment("m_1", 25, 1, 1, Map.of(), 0); - AssignmentPlan.Deployment deployment2 = new Deployment("m_2", 25, 1, 1, Map.of(), 0); + Node node1 = new Node("n_1", ByteSizeValue.ofMb(1000).getBytes(), 4); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(1000).getBytes(), 4); + Node node3 = new Node("n_3", ByteSizeValue.ofMb(1000).getBytes(), 4); + Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(30).getBytes(), 1, 1, Map.of(), 0, 0, 0); + Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(30).getBytes(), 1, 1, Map.of(), 0, 0, 0); AssignmentPlan plan = new ZoneAwareAssignmentPlanner( Map.of(List.of("z1"), List.of(node1), List.of("z2"), List.of(node2), List.of("z3"), List.of(node3)), @@ -203,7 +248,16 @@ public void testPreviousAssignmentsGetAtLeastAsManyAllocationsAfterAddingNewMode .stream() .collect(Collectors.toMap(e -> e.getKey().id(), Map.Entry::getValue)); previousModelsPlusNew.add( - new AssignmentPlan.Deployment(m.id(), m.memoryBytes(), m.allocations(), m.threadsPerAllocation(), previousAssignments, 0) + new AssignmentPlan.Deployment( + m.id(), + m.memoryBytes(), + m.allocations(), + m.threadsPerAllocation(), + previousAssignments, + 0, + 0, + 0 + ) ); } previousModelsPlusNew.add(randomModel("new")); @@ -214,11 +268,11 @@ public void testPreviousAssignmentsGetAtLeastAsManyAllocationsAfterAddingNewMode } public void testGivenClusterResize_GivenOneZone_ShouldAllocateEachModelAtLeastOnce() { - Node node1 = new Node("n_1", ByteSizeValue.ofMb(1200).getBytes(), 2); - Node node2 = new Node("n_2", ByteSizeValue.ofMb(1200).getBytes(), 2); - AssignmentPlan.Deployment deployment1 = new AssignmentPlan.Deployment("m_1", ByteSizeValue.ofMb(800).getBytes(), 2, 1, Map.of(), 0); - AssignmentPlan.Deployment deployment2 = new AssignmentPlan.Deployment("m_2", ByteSizeValue.ofMb(800).getBytes(), 1, 1, Map.of(), 0); - AssignmentPlan.Deployment deployment3 = new AssignmentPlan.Deployment("m_3", ByteSizeValue.ofMb(250).getBytes(), 4, 1, Map.of(), 0); + Node node1 = new Node("n_1", ByteSizeValue.ofMb(2580).getBytes(), 2); + Node node2 = new Node("n_2", ByteSizeValue.ofMb(2580).getBytes(), 2); + Deployment deployment1 = new Deployment("m_1", ByteSizeValue.ofMb(800).getBytes(), 2, 1, Map.of(), 0, 0, 0); + Deployment deployment2 = new Deployment("m_2", ByteSizeValue.ofMb(800).getBytes(), 1, 1, Map.of(), 0, 0, 0); + Deployment deployment3 = new Deployment("m_3", ByteSizeValue.ofMb(250).getBytes(), 4, 1, Map.of(), 0, 0, 0); // First only start m_1 AssignmentPlan assignmentPlan = new ZoneAwareAssignmentPlanner(Map.of(List.of(), List.of(node1, node2)), List.of(deployment1)) @@ -252,8 +306,8 @@ public void testGivenClusterResize_GivenOneZone_ShouldAllocateEachModelAtLeastOn assertThat(indexedBasedPlan.get("m_3"), equalTo(Map.of("n_2", 1))); // Now the cluster starts getting resized. - Node node3 = new Node("n_3", ByteSizeValue.ofMb(2400).getBytes(), 2); - Node node4 = new Node("n_4", ByteSizeValue.ofMb(2400).getBytes(), 2); + Node node3 = new Node("n_3", ByteSizeValue.ofMb(5160).getBytes(), 2); + Node node4 = new Node("n_4", ByteSizeValue.ofMb(5160).getBytes(), 2); // First, one node goes away. assignmentPlan = new ZoneAwareAssignmentPlanner(Map.of(List.of(), List.of(node1)), createModelsFromPlan(assignmentPlan)) diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/persistence/ChunkedTrainedModelRestorerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/persistence/ChunkedTrainedModelRestorerTests.java index abdd1def956f0..9fb27e8143814 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/persistence/ChunkedTrainedModelRestorerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/persistence/ChunkedTrainedModelRestorerTests.java @@ -51,118 +51,112 @@ public void testRetryingSearch_ReturnsSearchResults() throws InterruptedExceptio } public void testRetryingSearch_ThrowsSearchPhaseExceptionWithNoRetries() { - try (var mockClient = mock(Client.class)) { - var searchPhaseException = new SearchPhaseExecutionException("phase", "error", ShardSearchFailure.EMPTY_ARRAY); - when(mockClient.search(any())).thenThrow(searchPhaseException); - - var request = createSearchRequest(); - - ElasticsearchException exception = expectThrows( - ElasticsearchException.class, - () -> ChunkedTrainedModelRestorer.retryingSearch(mockClient, "1", request, 0, new TimeValue(1, TimeUnit.NANOSECONDS)) - ); - - assertThat(exception.getCause(), is(searchPhaseException)); - assertThat( - exception.getMessage(), - is( - "loading model [1] failed after [0] retries. The deployment is now in a failed state, the error may be " - + "transient please stop the deployment and restart" - ) - ); - verify(mockClient, times(1)).search(any()); - } + final var mockClient = mock(Client.class); + var searchPhaseException = new SearchPhaseExecutionException("phase", "error", ShardSearchFailure.EMPTY_ARRAY); + when(mockClient.search(any())).thenThrow(searchPhaseException); + + var request = createSearchRequest(); + + ElasticsearchException exception = expectThrows( + ElasticsearchException.class, + () -> ChunkedTrainedModelRestorer.retryingSearch(mockClient, "1", request, 0, new TimeValue(1, TimeUnit.NANOSECONDS)) + ); + + assertThat(exception.getCause(), is(searchPhaseException)); + assertThat( + exception.getMessage(), + is( + "loading model [1] failed after [0] retries. The deployment is now in a failed state, the error may be " + + "transient please stop the deployment and restart" + ) + ); + verify(mockClient, times(1)).search(any()); } public void testRetryingSearch_ThrowsSearchPhaseExceptionAfterOneRetry() { - try (var mockClient = mock(Client.class)) { - var searchPhaseException = new SearchPhaseExecutionException("phase", "error", ShardSearchFailure.EMPTY_ARRAY); - when(mockClient.search(any())).thenThrow(searchPhaseException); + final var mockClient = mock(Client.class); + var searchPhaseException = new SearchPhaseExecutionException("phase", "error", ShardSearchFailure.EMPTY_ARRAY); + when(mockClient.search(any())).thenThrow(searchPhaseException); - var request = createSearchRequest(); + var request = createSearchRequest(); - ElasticsearchException exception = expectThrows( - ElasticsearchException.class, - () -> ChunkedTrainedModelRestorer.retryingSearch(mockClient, "", request, 1, new TimeValue(1, TimeUnit.NANOSECONDS)) - ); + ElasticsearchException exception = expectThrows( + ElasticsearchException.class, + () -> ChunkedTrainedModelRestorer.retryingSearch(mockClient, "", request, 1, new TimeValue(1, TimeUnit.NANOSECONDS)) + ); - assertThat(exception.getCause(), is(searchPhaseException)); - verify(mockClient, times(2)).search(any()); - } + assertThat(exception.getCause(), is(searchPhaseException)); + verify(mockClient, times(2)).search(any()); } public void testRetryingSearch_ThrowsCircuitBreakingExceptionAfterOneRetry_FromSearchPhaseException() { - try (var mockClient = mock(Client.class)) { - var searchPhaseException = new SearchPhaseExecutionException("phase", "error", ShardSearchFailure.EMPTY_ARRAY); - var circuitBreakerException = new CircuitBreakingException("error", CircuitBreaker.Durability.TRANSIENT); - when(mockClient.search(any())).thenThrow(searchPhaseException).thenThrow(circuitBreakerException); + final var mockClient = mock(Client.class); + var searchPhaseException = new SearchPhaseExecutionException("phase", "error", ShardSearchFailure.EMPTY_ARRAY); + var circuitBreakerException = new CircuitBreakingException("error", CircuitBreaker.Durability.TRANSIENT); + when(mockClient.search(any())).thenThrow(searchPhaseException).thenThrow(circuitBreakerException); - var request = createSearchRequest(); + var request = createSearchRequest(); - ElasticsearchException exception = expectThrows( - ElasticsearchException.class, - () -> ChunkedTrainedModelRestorer.retryingSearch(mockClient, "", request, 1, new TimeValue(1, TimeUnit.NANOSECONDS)) - ); + ElasticsearchException exception = expectThrows( + ElasticsearchException.class, + () -> ChunkedTrainedModelRestorer.retryingSearch(mockClient, "", request, 1, new TimeValue(1, TimeUnit.NANOSECONDS)) + ); - assertThat(exception.getCause(), is(circuitBreakerException)); - verify(mockClient, times(2)).search(any()); - } + assertThat(exception.getCause(), is(circuitBreakerException)); + verify(mockClient, times(2)).search(any()); } public void testRetryingSearch_EnsureExceptionCannotBeUnwrapped() { - try (var mockClient = mock(Client.class)) { - var searchPhaseExecutionException = new SearchPhaseExecutionException("phase", "error", ShardSearchFailure.EMPTY_ARRAY); - when(mockClient.search(any())).thenThrow(searchPhaseExecutionException); + final var mockClient = mock(Client.class); + var searchPhaseExecutionException = new SearchPhaseExecutionException("phase", "error", ShardSearchFailure.EMPTY_ARRAY); + when(mockClient.search(any())).thenThrow(searchPhaseExecutionException); - var request = createSearchRequest(); + var request = createSearchRequest(); - ElasticsearchException exception = expectThrows( - ElasticsearchException.class, - () -> ChunkedTrainedModelRestorer.retryingSearch(mockClient, "", request, 1, new TimeValue(1, TimeUnit.NANOSECONDS)) - ); + ElasticsearchException exception = expectThrows( + ElasticsearchException.class, + () -> ChunkedTrainedModelRestorer.retryingSearch(mockClient, "", request, 1, new TimeValue(1, TimeUnit.NANOSECONDS)) + ); - assertThat(ExceptionsHelper.unwrapCause(exception), is(exception)); - assertThat(ExceptionsHelper.unwrapCause(exception), instanceOf(ElasticsearchException.class)); - verify(mockClient, times(2)).search(any()); - } + assertThat(ExceptionsHelper.unwrapCause(exception), is(exception)); + assertThat(ExceptionsHelper.unwrapCause(exception), instanceOf(ElasticsearchException.class)); + verify(mockClient, times(2)).search(any()); } public void testRetryingSearch_ThrowsIllegalArgumentExceptionIgnoringRetries() { - try (var mockClient = mock(Client.class)) { - var exception = new IllegalArgumentException("Error"); - when(mockClient.search(any())).thenThrow(exception); + final var mockClient = mock(Client.class); + var exception = new IllegalArgumentException("Error"); + when(mockClient.search(any())).thenThrow(exception); - var request = createSearchRequest(); + var request = createSearchRequest(); - IllegalArgumentException thrownException = expectThrows( - IllegalArgumentException.class, - () -> ChunkedTrainedModelRestorer.retryingSearch(mockClient, "", request, 1, new TimeValue(1, TimeUnit.NANOSECONDS)) - ); + IllegalArgumentException thrownException = expectThrows( + IllegalArgumentException.class, + () -> ChunkedTrainedModelRestorer.retryingSearch(mockClient, "", request, 1, new TimeValue(1, TimeUnit.NANOSECONDS)) + ); - assertThat(thrownException, is(exception)); - verify(mockClient, times(1)).search(any()); - } + assertThat(thrownException, is(exception)); + verify(mockClient, times(1)).search(any()); } public void testRetryingSearch_ThrowsSearchPhaseExceptionOnce_ThenReturnsResponse() throws InterruptedException { - try (var mockClient = mock(Client.class)) { - var mockSearchResponse = mock(SearchResponse.class, RETURNS_DEEP_STUBS); + final var mockClient = mock(Client.class); + var mockSearchResponse = mock(SearchResponse.class, RETURNS_DEEP_STUBS); - PlainActionFuture searchFuture = new PlainActionFuture<>(); - searchFuture.onResponse(mockSearchResponse); + PlainActionFuture searchFuture = new PlainActionFuture<>(); + searchFuture.onResponse(mockSearchResponse); - var searchPhaseException = new SearchPhaseExecutionException("phase", "error", ShardSearchFailure.EMPTY_ARRAY); - when(mockClient.search(any())).thenThrow(searchPhaseException).thenReturn(searchFuture); + var searchPhaseException = new SearchPhaseExecutionException("phase", "error", ShardSearchFailure.EMPTY_ARRAY); + when(mockClient.search(any())).thenThrow(searchPhaseException).thenReturn(searchFuture); - var request = createSearchRequest(); + var request = createSearchRequest(); - assertThat( - ChunkedTrainedModelRestorer.retryingSearch(mockClient, "", request, 1, new TimeValue(1, TimeUnit.NANOSECONDS)), - is(mockSearchResponse) - ); + assertThat( + ChunkedTrainedModelRestorer.retryingSearch(mockClient, "", request, 1, new TimeValue(1, TimeUnit.NANOSECONDS)), + is(mockSearchResponse) + ); - verify(mockClient, times(2)).search(any()); - } + verify(mockClient, times(2)).search(any()); } private static SearchRequest createSearchRequest() { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProviderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProviderTests.java index 8ae2015466b02..164d4efe6b6f5 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProviderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProviderTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.core.action.util.PageParams; import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction; @@ -346,7 +347,8 @@ public void testGetDefinitionFromDocs() { } public void testStoreTrainedModelConfigCallsClientExecuteWithOperationCreate() { - try (var client = createMockClient()) { + try (var threadPool = createThreadPool()) { + final var client = createMockClient(threadPool); var config = TrainedModelConfigTests.createTestInstance("modelId").build(); var trainedModelProvider = new TrainedModelProvider(client, xContentRegistry()); var future = new PlainActionFuture(); @@ -357,7 +359,8 @@ public void testStoreTrainedModelConfigCallsClientExecuteWithOperationCreate() { } public void testStoreTrainedModelConfigCallsClientExecuteWithOperationCreateWhenAllowOverwriteIsFalse() { - try (var client = createMockClient()) { + try (var threadPool = createThreadPool()) { + final var client = createMockClient(threadPool); var config = TrainedModelConfigTests.createTestInstance("modelId").build(); var trainedModelProvider = new TrainedModelProvider(client, xContentRegistry()); var future = new PlainActionFuture(); @@ -368,7 +371,8 @@ public void testStoreTrainedModelConfigCallsClientExecuteWithOperationCreateWhen } public void testStoreTrainedModelConfigCallsClientExecuteWithOperationIndex() { - try (var client = createMockClient()) { + try (var threadPool = createThreadPool()) { + final var client = createMockClient(threadPool); var config = TrainedModelConfigTests.createTestInstance("modelId").build(); var trainedModelProvider = new TrainedModelProvider(client, xContentRegistry()); var future = new PlainActionFuture(); @@ -379,7 +383,8 @@ public void testStoreTrainedModelConfigCallsClientExecuteWithOperationIndex() { } public void testStoreTrainedModelWithDefinitionCallsClientExecuteWithOperationCreate() throws IOException { - try (var client = createMockClient()) { + try (var threadPool = createThreadPool()) { + final var client = createMockClient(threadPool); var config = createTrainedModelConfigWithDefinition("modelId"); var trainedModelProvider = new TrainedModelProvider(client, xContentRegistry()); var future = new PlainActionFuture(); @@ -390,7 +395,8 @@ public void testStoreTrainedModelWithDefinitionCallsClientExecuteWithOperationCr } public void testStoreTrainedModelWithDefinitionCallsClientExecuteWithOperationCreateWhenAllowOverwriteIsFalse() throws IOException { - try (var client = createMockClient()) { + try (var threadPool = createThreadPool()) { + final var client = createMockClient(threadPool); var config = createTrainedModelConfigWithDefinition("modelId"); var trainedModelProvider = new TrainedModelProvider(client, xContentRegistry()); var future = new PlainActionFuture(); @@ -401,7 +407,8 @@ public void testStoreTrainedModelWithDefinitionCallsClientExecuteWithOperationCr } public void testStoreTrainedModelWithDefinitionCallsClientExecuteWithOperationIndex() throws IOException { - try (var client = createMockClient()) { + try (var threadPool = createThreadPool()) { + final var client = createMockClient(threadPool); var config = createTrainedModelConfigWithDefinition("modelId"); var trainedModelProvider = new TrainedModelProvider(client, xContentRegistry()); var future = new PlainActionFuture(); @@ -412,7 +419,8 @@ public void testStoreTrainedModelWithDefinitionCallsClientExecuteWithOperationIn } public void testStoreTrainedModelDefinitionDocCallsClientExecuteWithOperationCreate() { - try (var client = createMockClient()) { + try (var threadPool = createThreadPool()) { + final var client = createMockClient(threadPool); var config = TrainedModelDefinitionDocTests.createDefinitionDocInstance(); var trainedModelProvider = new TrainedModelProvider(client, xContentRegistry()); var future = new PlainActionFuture(); @@ -423,7 +431,8 @@ public void testStoreTrainedModelDefinitionDocCallsClientExecuteWithOperationCre } public void testStoreTrainedModelDefinitionDocCallsClientExecuteWithOperationCreateWhenAllowOverwriteIsFalse() { - try (var client = createMockClient()) { + try (var threadPool = createThreadPool()) { + final var client = createMockClient(threadPool); var config = TrainedModelDefinitionDocTests.createDefinitionDocInstance(); var trainedModelProvider = new TrainedModelProvider(client, xContentRegistry()); var future = new PlainActionFuture(); @@ -434,7 +443,8 @@ public void testStoreTrainedModelDefinitionDocCallsClientExecuteWithOperationCre } public void testStoreTrainedModelDefinitionDocCallsClientExecuteWithOperationIndex() { - try (var client = createMockClient()) { + try (var threadPool = createThreadPool()) { + final var client = createMockClient(threadPool); var config = TrainedModelDefinitionDocTests.createDefinitionDocInstance(); var trainedModelProvider = new TrainedModelProvider(client, xContentRegistry()); var future = new PlainActionFuture(); @@ -445,7 +455,8 @@ public void testStoreTrainedModelDefinitionDocCallsClientExecuteWithOperationInd } public void testStoreTrainedModelVocabularyCallsClientExecuteWithOperationCreate() { - try (var client = createMockClient()) { + try (var threadPool = createThreadPool()) { + final var client = createMockClient(threadPool); var vocab = createVocabulary(); var trainedModelProvider = new TrainedModelProvider(client, xContentRegistry()); var future = new PlainActionFuture(); @@ -456,7 +467,8 @@ public void testStoreTrainedModelVocabularyCallsClientExecuteWithOperationCreate } public void testStoreTrainedModelVocabularyCallsClientExecuteWithOperationCreateWhenAllowOverwritingIsFalse() { - try (var client = createMockClient()) { + try (var threadPool = createThreadPool()) { + final var client = createMockClient(threadPool); var vocab = createVocabulary(); var trainedModelProvider = new TrainedModelProvider(client, xContentRegistry()); var future = new PlainActionFuture(); @@ -467,7 +479,8 @@ public void testStoreTrainedModelVocabularyCallsClientExecuteWithOperationCreate } public void testStoreTrainedModelVocabularyCallsClientExecuteWithOperationIndex() { - try (var client = createMockClient()) { + try (var threadPool = createThreadPool()) { + final var client = createMockClient(threadPool); var vocab = createVocabulary(); var trainedModelProvider = new TrainedModelProvider(client, xContentRegistry()); var future = new PlainActionFuture(); @@ -478,7 +491,8 @@ public void testStoreTrainedModelVocabularyCallsClientExecuteWithOperationIndex( } public void testStoreTrainedModelMetadataCallsClientExecuteWithOperationCreate() { - try (var client = createMockClient()) { + try (var threadPool = createThreadPool()) { + final var client = createMockClient(threadPool); var metadata = TrainedModelMetadataTests.randomInstance(); var trainedModelProvider = new TrainedModelProvider(client, xContentRegistry()); var future = new PlainActionFuture(); @@ -489,7 +503,8 @@ public void testStoreTrainedModelMetadataCallsClientExecuteWithOperationCreate() } public void testStoreTrainedModelMetadataCallsClientExecuteWithOperationCreateWhenAllowOverwritingIsFalse() { - try (var client = createMockClient()) { + try (var threadPool = createThreadPool()) { + final var client = createMockClient(threadPool); var metadata = TrainedModelMetadataTests.randomInstance(); var trainedModelProvider = new TrainedModelProvider(client, xContentRegistry()); var future = new PlainActionFuture(); @@ -500,7 +515,8 @@ public void testStoreTrainedModelMetadataCallsClientExecuteWithOperationCreateWh } public void testStoreTrainedModelMetadataCallsClientExecuteWithOperationIndex() { - try (var client = createMockClient()) { + try (var threadPool = createThreadPool()) { + final var client = createMockClient(threadPool); var metadata = TrainedModelMetadataTests.randomInstance(); var trainedModelProvider = new TrainedModelProvider(client, xContentRegistry()); var future = new PlainActionFuture(); @@ -520,10 +536,8 @@ private TrainedModelConfig createTrainedModelConfigWithDefinition(String modelId return TrainedModelConfigTests.createTestInstance(modelId).setDefinitionFromBytes(bytes).build(); } - private Client createMockClient() { - var noOpClient = new NoOpClient(getTestName()); - - return spy(noOpClient); + private Client createMockClient(ThreadPool threadPool) { + return spy(new NoOpClient(threadPool)); } private void assertThatIndexRequestHasOperation(Client client, DocWriteRequest.OpType operation) { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java index 47ed0e9e269ab..e9a89a81f62e2 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java @@ -122,11 +122,6 @@ public abstract class BaseMlIntegTestCase extends ESIntegTestCase { // all the tasks that should be excluded from the cleanup jobs because they are not related to the tests. private static final Set UNRELATED_TASKS = Set.of(TransportListTasksAction.TYPE.name(), HealthNode.TASK_NAME); - @Override - protected boolean ignoreExternalCluster() { - return true; - } - @Override protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { Settings.Builder settings = Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings)); diff --git a/x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterIT.java b/x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterIT.java index 256d12cd07510..36246902e5597 100644 --- a/x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterIT.java +++ b/x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterIT.java @@ -128,11 +128,6 @@ public void stopWebServer() { } } - @Override - protected boolean ignoreExternalCluster() { - return true; - } - private Settings.Builder secureSettings(String password) { mockSecureSettings.setString("xpack.monitoring.exporters._http.auth.secure_password", password); return baseSettings().setSecureSettings(mockSecureSettings); diff --git a/x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterSslIT.java b/x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterSslIT.java index db37b09095e61..3b61b0496c64d 100644 --- a/x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterSslIT.java +++ b/x-pack/plugin/monitoring/src/internalClusterTest/java/org/elasticsearch/xpack/monitoring/exporter/http/HttpExporterSslIT.java @@ -52,11 +52,6 @@ public static void cleanUpStatics() { } } - @Override - protected boolean ignoreExternalCluster() { - return true; - } - @Override protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { final Path truststore = getDataPath("/org/elasticsearch/xpack/monitoring/exporter/http/testnode.jks"); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterTests.java index a30975be1055d..2d692e977f3d5 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterTests.java @@ -51,7 +51,8 @@ public void testLocalExporterDoesNotInteractWithClusterServiceUntilStateIsRecove final Exporter.Config config = new Exporter.Config("name", "type", Settings.EMPTY, clusterService, licenseState); final CleanerService cleanerService = mock(CleanerService.class); final MonitoringMigrationCoordinator migrationCoordinator = new MonitoringMigrationCoordinator(); - try (Client client = new NoOpClient(getTestName())) { + try (var threadPool = createThreadPool()) { + final var client = new NoOpClient(threadPool); final LocalExporter exporter = new LocalExporter(config, client, migrationCoordinator, cleanerService); final TimeValue retention = TimeValue.timeValueDays(randomIntBetween(1, 90)); diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/ProfilingTestCase.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/ProfilingTestCase.java index 199662f9ca1f6..29981c8e2f2a3 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/ProfilingTestCase.java +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/ProfilingTestCase.java @@ -66,11 +66,6 @@ protected boolean addMockHttpTransport() { return false; // enable http } - @Override - protected boolean ignoreExternalCluster() { - return true; - } - private void indexDoc(String index, String id, Map source) { DocWriteResponse indexResponse = client().prepareIndex(index).setId(id).setSource(source).setCreate(true).get(); assertEquals(RestStatus.CREATED, indexResponse.status()); diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/StopWatch.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/StopWatch.java new file mode 100644 index 0000000000000..c423fe12f3581 --- /dev/null +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/StopWatch.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.profiling; + +/** + * Measures time and logs it in milliseconds. + */ +public final class StopWatch { + private final String name; + private final long start; + + public StopWatch(String name) { + this.name = name; + start = System.nanoTime(); + } + + /** + * Return a textual report including the name and the number of elapsed milliseconds since object creation. + */ + public String report() { + return name + " took [" + millis() + " ms]."; + } + + /** + * Return number of elapsed milliseconds since object creation. + */ + public double millis() { + return (System.nanoTime() - start) / 1_000_000.0d; + } +} diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetFlamegraphAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetFlamegraphAction.java index f26a6b1fb3a84..b791684bec233 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetFlamegraphAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetFlamegraphAction.java @@ -44,20 +44,14 @@ public TransportGetFlamegraphAction(NodeClient nodeClient, TransportService tran @Override protected void doExecute(Task task, GetStackTracesRequest request, ActionListener listener) { Client client = new ParentTaskAssigningClient(this.nodeClient, transportService.getLocalNode(), task); - long start = System.nanoTime(); + StopWatch watch = new StopWatch("getFlamegraphAction"); client.execute(GetStackTracesAction.INSTANCE, request, new ActionListener<>() { @Override public void onResponse(GetStackTracesResponse response) { - long responseStart = System.nanoTime(); try { + StopWatch processingWatch = new StopWatch("Processing response"); GetFlamegraphResponse flamegraphResponse = buildFlamegraph(response); - log.debug( - "getFlamegraphAction took [" - + (System.nanoTime() - start) / 1_000_000.0d - + "] ms (processing response: [" - + (System.nanoTime() - responseStart) / 1_000_000.0d - + "] ms." - ); + log.debug(() -> watch.report() + " " + processingWatch.report()); listener.onResponse(flamegraphResponse); } catch (Exception ex) { listener.onFailure(ex); diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java index e15792adc489d..8b9fce4d04040 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java @@ -126,7 +126,7 @@ public TransportGetStackTracesAction( @Override protected void doExecute(Task submitTask, GetStackTracesRequest request, ActionListener submitListener) { licenseChecker.requireSupportedLicense(); - long start = System.nanoTime(); + StopWatch watch = new StopWatch("getResampledIndex"); Client client = new ParentTaskAssigningClient(this.nodeClient, transportService.getLocalNode(), submitTask); EventsIndex mediumDownsampled = EventsIndex.MEDIUM_DOWNSAMPLED; client.prepareSearch(mediumDownsampled.getName()) @@ -143,7 +143,7 @@ protected void doExecute(Task submitTask, GetStackTracesRequest request, ActionL mediumDownsampled, resampledIndex ); - log.debug("getResampledIndex took [" + (System.nanoTime() - start) / 1_000_000.0d + " ms]."); + log.debug(() -> watch.report()); searchEventGroupByStackTrace(client, request, resampledIndex, submitListener); }, e -> { // All profiling-events data streams are created lazily. In a relatively empty cluster it can happen that there are so few @@ -166,7 +166,7 @@ private void searchEventGroupByStackTrace( EventsIndex eventsIndex, ActionListener submitListener ) { - long start = System.nanoTime(); + StopWatch watch = new StopWatch("searchEventGroupByStackTrace"); GetStackTracesResponseBuilder responseBuilder = new GetStackTracesResponseBuilder(); responseBuilder.setSampleRate(eventsIndex.getSampleRate()); client.prepareSearch(eventsIndex.getName()) @@ -216,7 +216,7 @@ private void searchEventGroupByStackTrace( totalFinalCount, stackTraceEvents.size() ); - log.debug("searchEventGroupByStackTrace took [" + (System.nanoTime() - start) / 1_000_000.0d + " ms]."); + log.debug(() -> watch.report()); if (stackTraceEvents.isEmpty() == false) { responseBuilder.setStart(Instant.ofEpochMilli(minTime)); responseBuilder.setEnd(Instant.ofEpochMilli(maxTime)); @@ -287,7 +287,7 @@ private class StackTraceHandler { private final Set stackFrameIds = new ConcurrentSkipListSet<>(); private final Set executableIds = new ConcurrentSkipListSet<>(); private final AtomicInteger totalFrames = new AtomicInteger(); - private final long start = System.nanoTime(); + private final StopWatch watch = new StopWatch("retrieveStackTraces"); private StackTraceHandler( ClusterState clusterState, @@ -334,7 +334,7 @@ public void onResponse(MultiGetResponse multiGetItemResponses) { stackFrameIds.size(), executableIds.size() ); - log.debug("retrieveStackTraces took [" + (System.nanoTime() - start) / 1_000_000.0d + " ms]."); + log.debug(() -> watch.report()); retrieveStackTraceDetails( clusterState, client, @@ -409,7 +409,7 @@ private static class DetailsHandler { private final Map executables; private final Map stackFrames; private final AtomicInteger expectedSlices; - private final long start = System.nanoTime(); + private final StopWatch watch = new StopWatch("retrieveStackTraceDetails"); private DetailsHandler( GetStackTracesResponseBuilder builder, @@ -479,7 +479,7 @@ public void mayFinish() { builder.setExecutables(executables); builder.setStackFrames(stackFrames); log.debug("retrieveStackTraceDetails found [{}] stack frames, [{}] executables.", stackFrames.size(), executables.size()); - log.debug("retrieveStackTraceDetails took [" + (System.nanoTime() - start) / 1_000_000.0d + " ms]."); + log.debug(() -> watch.report()); submitListener.onResponse(builder.build()); } } diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/IndexResolver.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/IndexResolver.java index 0f887ef4fc105..291722f42ca94 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/IndexResolver.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/IndexResolver.java @@ -651,8 +651,7 @@ public void resolveAsSeparateMappings( } private static GetAliasesRequest createGetAliasesRequest(FieldCapabilitiesResponse response, boolean includeFrozen) { - return new GetAliasesRequest().local(true) - .aliases("*") + return new GetAliasesRequest().aliases("*") .indices(response.getIndices()) .indicesOptions(includeFrozen ? FIELD_CAPS_FROZEN_INDICES_OPTIONS : FIELD_CAPS_INDICES_OPTIONS); } diff --git a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankCoordinatorContext.java b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankCoordinatorContext.java index d7b96ad439501..50f3646264a92 100644 --- a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankCoordinatorContext.java +++ b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankCoordinatorContext.java @@ -127,12 +127,25 @@ protected boolean lessThan(RRFRankDoc a, RRFRankDoc b) { } } - // sort the results based on rrf score, tiebreaker based on smaller shard then smaller doc id + // sort the results based on rrf score, tiebreaker based on + // larger individual query score from 1 to n, smaller shard then smaller doc id RRFRankDoc[] sortedResults = results.values().toArray(RRFRankDoc[]::new); Arrays.sort(sortedResults, (RRFRankDoc rrf1, RRFRankDoc rrf2) -> { if (rrf1.score != rrf2.score) { return rrf1.score < rrf2.score ? 1 : -1; } + assert rrf1.positions.length == rrf2.positions.length; + for (int qi = 0; qi < rrf1.positions.length; ++qi) { + if (rrf1.positions[qi] != NO_RANK && rrf2.positions[qi] != NO_RANK) { + if (rrf1.scores[qi] != rrf2.scores[qi]) { + return rrf1.scores[qi] < rrf2.scores[qi] ? 1 : -1; + } + } else if (rrf1.positions[qi] != NO_RANK) { + return -1; + } else if (rrf2.positions[qi] != NO_RANK) { + return 1; + } + } if (rrf1.shardIndex != rrf2.shardIndex) { return rrf1.shardIndex < rrf2.shardIndex ? -1 : 1; } diff --git a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankShardContext.java b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankShardContext.java index e251207bdcb2a..e22e328193700 100644 --- a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankShardContext.java +++ b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankShardContext.java @@ -17,6 +17,8 @@ import java.util.List; import java.util.Map; +import static org.elasticsearch.search.rank.RankDoc.NO_RANK; + /** * Executes queries and generates results on the shard for RRF. */ @@ -74,6 +76,18 @@ public RRFRankShardResult combine(List rankResults) { if (rrf1.score != rrf2.score) { return rrf1.score < rrf2.score ? 1 : -1; } + assert rrf1.positions.length == rrf2.positions.length; + for (int qi = 0; qi < rrf1.positions.length; ++qi) { + if (rrf1.positions[qi] != NO_RANK && rrf2.positions[qi] != NO_RANK) { + if (rrf1.scores[qi] != rrf2.scores[qi]) { + return rrf1.scores[qi] < rrf2.scores[qi] ? 1 : -1; + } + } else if (rrf1.positions[qi] != NO_RANK) { + return -1; + } else if (rrf2.positions[qi] != NO_RANK) { + return 1; + } + } return rrf1.doc < rrf2.doc ? -1 : 1; }); // trim the results to window size diff --git a/x-pack/plugin/rank-rrf/src/test/java/org/elasticsearch/xpack/rank/rrf/RRFRankContextTests.java b/x-pack/plugin/rank-rrf/src/test/java/org/elasticsearch/xpack/rank/rrf/RRFRankContextTests.java index f1f19a371ed07..5cb89c071c767 100644 --- a/x-pack/plugin/rank-rrf/src/test/java/org/elasticsearch/xpack/rank/rrf/RRFRankContextTests.java +++ b/x-pack/plugin/rank-rrf/src/test/java/org/elasticsearch/xpack/rank/rrf/RRFRankContextTests.java @@ -239,31 +239,338 @@ public void testCoordinatorRank() { expected.score = 0.6666667f; assertRDEquals(expected, (RRFRankDoc) std.scoreDocs()[0]); - expected = new RRFRankDoc(1, 1, 2); + expected = new RRFRankDoc(3, 1, 2); expected.rank = 2; + expected.positions[0] = 0; + expected.positions[1] = NO_RANK; + expected.scores[0] = 10.0f; + expected.scores[1] = 0.0f; + expected.score = 0.5f; + assertRDEquals(expected, (RRFRankDoc) std.scoreDocs()[1]); + + expected = new RRFRankDoc(1, 1, 2); + expected.rank = 3; expected.positions[0] = NO_RANK; expected.positions[1] = 0; expected.scores[0] = 0.0f; expected.scores[1] = 8.0f; expected.score = 0.5f; + assertRDEquals(expected, (RRFRankDoc) std.scoreDocs()[2]); + + expected = new RRFRankDoc(2, 2, 2); + expected.rank = 4; + expected.positions[0] = 3; + expected.positions[1] = 3; + expected.scores[0] = 8.5f; + expected.scores[1] = 6.5f; + expected.score = 0.4f; + assertRDEquals(expected, (RRFRankDoc) std.scoreDocs()[3]); + } + + public void testShardTieBreaker() { + RRFRankShardContext context = new RRFRankShardContext(null, 0, 10, 1); + + List topDocs = List.of( + new TopDocs(null, new ScoreDoc[] { new ScoreDoc(1, 10.0f, -1), new ScoreDoc(2, 9.0f, -1) }), + new TopDocs(null, new ScoreDoc[] { new ScoreDoc(2, 8.0f, -1), new ScoreDoc(1, 7.0f, -1) }) + ); + + RRFRankShardResult result = context.combine(topDocs); + assertEquals(2, result.queryCount); + assertEquals(2, result.rrfRankDocs.length); + + RRFRankDoc expected = new RRFRankDoc(1, -1, 2); + expected.rank = 1; + expected.positions[0] = 0; + expected.positions[1] = 1; + expected.scores[0] = 10.0f; + expected.scores[1] = 7.0f; + expected.score = Float.NaN; + assertRDEquals(expected, result.rrfRankDocs[0]); + + expected = new RRFRankDoc(2, -1, 2); + expected.rank = 2; + expected.positions[0] = 1; + expected.positions[1] = 0; + expected.scores[0] = 9.0f; + expected.scores[1] = 8.0f; + expected.score = Float.NaN; + assertRDEquals(expected, result.rrfRankDocs[1]); + + topDocs = List.of( + new TopDocs(null, new ScoreDoc[] { new ScoreDoc(1, 10.0f, -1), new ScoreDoc(2, 9.0f, -1), new ScoreDoc(3, 9.0f, -1) }), + new TopDocs(null, new ScoreDoc[] { new ScoreDoc(4, 11.0f, -1), new ScoreDoc(3, 9.0f, -1), new ScoreDoc(2, 7.0f, -1) }) + ); + + result = context.combine(topDocs); + assertEquals(2, result.queryCount); + assertEquals(4, result.rrfRankDocs.length); + + expected = new RRFRankDoc(3, -1, 2); + expected.rank = 1; + expected.positions[0] = 2; + expected.positions[1] = 1; + expected.scores[0] = 9.0f; + expected.scores[1] = 9.0f; + expected.score = Float.NaN; + assertRDEquals(expected, result.rrfRankDocs[0]); + + expected = new RRFRankDoc(2, -1, 2); + expected.rank = 2; + expected.positions[0] = 1; + expected.positions[1] = 2; + expected.scores[0] = 9.0f; + expected.scores[1] = 7.0f; + expected.score = Float.NaN; + assertRDEquals(expected, result.rrfRankDocs[1]); + + expected = new RRFRankDoc(1, -1, 2); + expected.rank = 3; + expected.positions[0] = 0; + expected.positions[1] = -1; + expected.scores[0] = 10.0f; + expected.scores[1] = 0.0f; + expected.score = Float.NaN; + assertRDEquals(expected, result.rrfRankDocs[2]); + + expected = new RRFRankDoc(4, -1, 2); + expected.rank = 4; + expected.positions[0] = -1; + expected.positions[1] = 0; + expected.scores[0] = 0.0f; + expected.scores[1] = 11.0f; + expected.score = Float.NaN; + assertRDEquals(expected, result.rrfRankDocs[3]); + + topDocs = List.of( + new TopDocs(null, new ScoreDoc[] { new ScoreDoc(1, 10.0f, -1), new ScoreDoc(3, 3.0f, -1) }), + new TopDocs(null, new ScoreDoc[] { new ScoreDoc(2, 8.0f, -1), new ScoreDoc(4, 5.0f, -1) }) + ); + + result = context.combine(topDocs); + assertEquals(2, result.queryCount); + assertEquals(4, result.rrfRankDocs.length); + + expected = new RRFRankDoc(1, -1, 2); + expected.rank = 1; + expected.positions[0] = 0; + expected.positions[1] = -1; + expected.scores[0] = 10.0f; + expected.scores[1] = 0.0f; + expected.score = Float.NaN; + assertRDEquals(expected, result.rrfRankDocs[0]); + + expected = new RRFRankDoc(2, -1, 2); + expected.rank = 2; + expected.positions[0] = -1; + expected.positions[1] = 0; + expected.scores[0] = 0.0f; + expected.scores[1] = 8.0f; + expected.score = Float.NaN; + assertRDEquals(expected, result.rrfRankDocs[1]); + + expected = new RRFRankDoc(3, -1, 2); + expected.rank = 3; + expected.positions[0] = 1; + expected.positions[1] = -1; + expected.scores[0] = 3.0f; + expected.scores[1] = 0.0f; + expected.score = Float.NaN; + assertRDEquals(expected, result.rrfRankDocs[2]); + + expected = new RRFRankDoc(4, -1, 2); + expected.rank = 4; + expected.positions[0] = -1; + expected.positions[1] = 1; + expected.scores[0] = 0.0f; + expected.scores[1] = 5.0f; + expected.score = Float.NaN; + assertRDEquals(expected, result.rrfRankDocs[3]); + } + + public void testCoordinatorRankTieBreaker() { + RRFRankCoordinatorContext context = new RRFRankCoordinatorContext(4, 0, 5, 1); + + QuerySearchResult qsr0 = new QuerySearchResult(); + qsr0.setShardIndex(1); + RRFRankDoc rd11 = new RRFRankDoc(1, -1, 2); + rd11.positions[0] = 0; + rd11.positions[1] = 0; + rd11.scores[0] = 10.0f; + rd11.scores[1] = 7.0f; + qsr0.setRankShardResult(new RRFRankShardResult(2, new RRFRankDoc[] { rd11 })); + + QuerySearchResult qsr1 = new QuerySearchResult(); + qsr1.setShardIndex(2); + RRFRankDoc rd21 = new RRFRankDoc(1, -1, 2); + rd21.positions[0] = 0; + rd21.positions[1] = 0; + rd21.scores[0] = 9.0f; + rd21.scores[1] = 8.0f; + qsr1.setRankShardResult(new RRFRankShardResult(2, new RRFRankDoc[] { rd21 })); + + TopDocsStats tds = new TopDocsStats(0); + SortedTopDocs std = context.rank(List.of(qsr0, qsr1), tds); + + assertEquals(2, tds.fetchHits); + assertEquals(2, std.scoreDocs().length); + + RRFRankDoc expected = new RRFRankDoc(1, 1, 2); + expected.rank = 1; + expected.positions[0] = 0; + expected.positions[1] = 1; + expected.scores[0] = 10.0f; + expected.scores[1] = 7.0f; + expected.score = 0.8333333730697632f; + assertRDEquals(expected, (RRFRankDoc) std.scoreDocs()[0]); + + expected = new RRFRankDoc(1, 2, 2); + expected.rank = 2; + expected.positions[0] = 1; + expected.positions[1] = 0; + expected.scores[0] = 9.0f; + expected.scores[1] = 8.0f; + expected.score = 0.8333333730697632f; assertRDEquals(expected, (RRFRankDoc) std.scoreDocs()[1]); - expected = new RRFRankDoc(3, 1, 2); + qsr0 = new QuerySearchResult(); + qsr0.setShardIndex(1); + rd11 = new RRFRankDoc(1, -1, 2); + rd11.positions[0] = 0; + rd11.positions[1] = -1; + rd11.scores[0] = 10.0f; + rd11.scores[1] = 0.0f; + RRFRankDoc rd12 = new RRFRankDoc(2, -1, 2); + rd12.positions[0] = 0; + rd12.positions[1] = 1; + rd12.scores[0] = 9.0f; + rd12.scores[1] = 7.0f; + qsr0.setRankShardResult(new RRFRankShardResult(2, new RRFRankDoc[] { rd11, rd12 })); + + qsr1 = new QuerySearchResult(); + qsr1.setShardIndex(2); + rd21 = new RRFRankDoc(1, -1, 2); + rd21.positions[0] = -1; + rd21.positions[1] = 0; + rd21.scores[0] = 0.0f; + rd21.scores[1] = 11.0f; + RRFRankDoc rd22 = new RRFRankDoc(2, -1, 2); + rd22.positions[0] = 0; + rd22.positions[1] = 1; + rd22.scores[0] = 9.0f; + rd22.scores[1] = 9.0f; + qsr1.setRankShardResult(new RRFRankShardResult(2, new RRFRankDoc[] { rd21, rd22 })); + + tds = new TopDocsStats(0); + std = context.rank(List.of(qsr0, qsr1), tds); + + assertEquals(4, tds.fetchHits); + assertEquals(4, std.scoreDocs().length); + + expected = new RRFRankDoc(2, 2, 2); + expected.rank = 1; + expected.positions[0] = 2; + expected.positions[1] = 1; + expected.scores[0] = 9.0f; + expected.scores[1] = 9.0f; + expected.score = 0.5833333730697632f; + assertRDEquals(expected, (RRFRankDoc) std.scoreDocs()[0]); + + expected = new RRFRankDoc(2, 1, 2); + expected.rank = 2; + expected.positions[0] = 1; + expected.positions[1] = 2; + expected.scores[0] = 9.0f; + expected.scores[1] = 7.0f; + expected.score = 0.5833333730697632f; + assertRDEquals(expected, (RRFRankDoc) std.scoreDocs()[1]); + + expected = new RRFRankDoc(1, 1, 2); expected.rank = 3; expected.positions[0] = 0; - expected.positions[1] = NO_RANK; + expected.positions[1] = -1; + expected.scores[0] = 10.0f; + expected.scores[1] = 0.0f; + expected.score = 0.5f; + assertRDEquals(expected, (RRFRankDoc) std.scoreDocs()[2]); + + expected = new RRFRankDoc(1, 2, 2); + expected.rank = 4; + expected.positions[0] = -1; + expected.positions[1] = 0; + expected.scores[0] = 0.0f; + expected.scores[1] = 11.0f; + expected.score = 0.5f; + assertRDEquals(expected, (RRFRankDoc) std.scoreDocs()[3]); + + qsr0 = new QuerySearchResult(); + qsr0.setShardIndex(1); + rd11 = new RRFRankDoc(1, -1, 2); + rd11.positions[0] = 0; + rd11.positions[1] = -1; + rd11.scores[0] = 10.0f; + rd11.scores[1] = 0.0f; + rd12 = new RRFRankDoc(2, -1, 2); + rd12.positions[0] = -1; + rd12.positions[1] = 0; + rd12.scores[0] = 0.0f; + rd12.scores[1] = 12.0f; + qsr0.setRankShardResult(new RRFRankShardResult(2, new RRFRankDoc[] { rd11, rd12 })); + + qsr1 = new QuerySearchResult(); + qsr1.setShardIndex(2); + rd21 = new RRFRankDoc(1, -1, 2); + rd21.positions[0] = 0; + rd21.positions[1] = -1; + rd21.scores[0] = 3.0f; + rd21.scores[1] = 0.0f; + rd22 = new RRFRankDoc(2, -1, 2); + rd22.positions[0] = -1; + rd22.positions[1] = 0; + rd22.scores[0] = 0.0f; + rd22.scores[1] = 5.0f; + qsr1.setRankShardResult(new RRFRankShardResult(2, new RRFRankDoc[] { rd21, rd22 })); + + tds = new TopDocsStats(0); + std = context.rank(List.of(qsr0, qsr1), tds); + + assertEquals(4, tds.fetchHits); + assertEquals(4, std.scoreDocs().length); + + expected = new RRFRankDoc(1, 1, 2); + expected.rank = 1; + expected.positions[0] = 0; + expected.positions[1] = -1; expected.scores[0] = 10.0f; expected.scores[1] = 0.0f; expected.score = 0.5f; + assertRDEquals(expected, (RRFRankDoc) std.scoreDocs()[0]); + + expected = new RRFRankDoc(2, 1, 2); + expected.rank = 2; + expected.positions[0] = -1; + expected.positions[1] = 0; + expected.scores[0] = 0.0f; + expected.scores[1] = 12.0f; + expected.score = 0.5f; + assertRDEquals(expected, (RRFRankDoc) std.scoreDocs()[1]); + + expected = new RRFRankDoc(1, 2, 2); + expected.rank = 3; + expected.positions[0] = 1; + expected.positions[1] = -1; + expected.scores[0] = 3.0f; + expected.scores[1] = 0.0f; + expected.score = 0.3333333333333333f; assertRDEquals(expected, (RRFRankDoc) std.scoreDocs()[2]); expected = new RRFRankDoc(2, 2, 2); expected.rank = 4; - expected.positions[0] = 3; - expected.positions[1] = 3; - expected.scores[0] = 8.5f; - expected.scores[1] = 6.5f; - expected.score = 0.4f; + expected.positions[0] = -1; + expected.positions[1] = 1; + expected.scores[0] = 0.0f; + expected.scores[1] = 5.0f; + expected.score = 0.3333333333333333f; assertRDEquals(expected, (RRFRankDoc) std.scoreDocs()[3]); } } diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java index c315218bdaab4..5befaafba0f8a 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java @@ -64,7 +64,7 @@ public class RollupJobTaskTests extends ESTestCase { private ThreadPool pool; @Before - public void createThreadPool() { + public void createSuiteThreadPool() { pool = new TestThreadPool("test"); } @@ -291,7 +291,8 @@ public void testStartWhenStopping() throws InterruptedException { final CountDownLatch block = new CountDownLatch(1); final CountDownLatch unblock = new CountDownLatch(1); - try (NoOpClient client = getEmptySearchResponseClient(block, unblock)) { + try (var threadPool = createThreadPool()) { + final var client = getEmptySearchResponseClient(threadPool, block, unblock); SchedulerEngine schedulerEngine = mock(SchedulerEngine.class); AtomicInteger counter = new AtomicInteger(0); @@ -949,7 +950,8 @@ public void testStopWhenStopping() throws InterruptedException { RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random()), Collections.emptyMap()); final CountDownLatch block = new CountDownLatch(1); final CountDownLatch unblock = new CountDownLatch(1); - try (NoOpClient client = getEmptySearchResponseClient(block, unblock)) { + try (var threadPool = createThreadPool()) { + final var client = getEmptySearchResponseClient(threadPool, block, unblock); SchedulerEngine schedulerEngine = mock(SchedulerEngine.class); AtomicInteger counter = new AtomicInteger(0); @@ -1118,8 +1120,8 @@ private static void assertUnblockIn10s(CountDownLatch latch) { } } - private NoOpClient getEmptySearchResponseClient(CountDownLatch unblock, CountDownLatch block) { - return new NoOpClient(getTestName()) { + private NoOpClient getEmptySearchResponseClient(ThreadPool threadPool, CountDownLatch unblock, CountDownLatch block) { + return new NoOpClient(threadPool) { @SuppressWarnings("unchecked") @Override protected void doExecute( diff --git a/x-pack/plugin/security/qa/jwt-realm/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRestIT.java b/x-pack/plugin/security/qa/jwt-realm/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRestIT.java index a3d4c94e91882..db59bea999852 100644 --- a/x-pack/plugin/security/qa/jwt-realm/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRestIT.java +++ b/x-pack/plugin/security/qa/jwt-realm/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRestIT.java @@ -709,11 +709,11 @@ private SignedJWT signHmacJwt(JWTClaimsSet claimsSet, String hmacPassphrase) thr } // JWT construction - private JWTClaimsSet buildJwt(Map claims, Instant issueTime) { + static JWTClaimsSet buildJwt(Map claims, Instant issueTime) { return buildJwt(claims, issueTime, true, true); } - private JWTClaimsSet buildJwt(Map claims, Instant issueTime, boolean includeSub, boolean includeAud) { + static JWTClaimsSet buildJwt(Map claims, Instant issueTime, boolean includeSub, boolean includeAud) { final JWTClaimsSet.Builder builder = new JWTClaimsSet.Builder(); builder.issuer(randomAlphaOfLengthBetween(4, 24)); if (includeSub) { @@ -743,7 +743,7 @@ private JWTClaimsSet buildJwt(Map claims, Instant issueTime, boo return builder.build(); } - private SignedJWT signJWT(JWSSigner signer, String algorithm, JWTClaimsSet claimsSet) throws JOSEException { + static SignedJWT signJWT(JWSSigner signer, String algorithm, JWTClaimsSet claimsSet) throws JOSEException { final JWSHeader.Builder builder = new JWSHeader.Builder(JWSAlgorithm.parse(algorithm)); if (randomBoolean()) { builder.type(JOSEObjectType.JWT); @@ -775,13 +775,13 @@ private TestSecurityClient getSecurityClient(Consumer co } // Utility methods - private Map assertMap(Map response, ParseField field) { + static Map assertMap(Map response, ParseField field) { assertThat(response, hasKey(field.getPreferredName())); assertThat(response, hasEntry(is(field.getPreferredName()), instanceOf(Map.class))); return (Map) response.get(field.getPreferredName()); } - private List assertList(Map response, ParseField field) { + static List assertList(Map response, ParseField field) { assertThat(response, hasKey(field.getPreferredName())); assertThat(response, hasEntry(is(field.getPreferredName()), instanceOf(List.class))); return (List) response.get(field.getPreferredName()); diff --git a/x-pack/plugin/security/qa/jwt-realm/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtWithUnavailableSecurityIndexRestIT.java b/x-pack/plugin/security/qa/jwt-realm/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtWithUnavailableSecurityIndexRestIT.java new file mode 100644 index 0000000000000..015c66aea6164 --- /dev/null +++ b/x-pack/plugin/security/qa/jwt-realm/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtWithUnavailableSecurityIndexRestIT.java @@ -0,0 +1,310 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.authc.jwt; + +import com.nimbusds.jose.JOSEException; +import com.nimbusds.jose.crypto.RSASSASigner; +import com.nimbusds.jose.jwk.JWK; +import com.nimbusds.jose.jwk.JWKSet; +import com.nimbusds.jose.jwk.RSAKey; +import com.nimbusds.jwt.JWTClaimsSet; +import com.nimbusds.jwt.SignedJWT; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.PathUtils; +import org.elasticsearch.core.Strings; +import org.elasticsearch.test.TestSecurityClient; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.MutableSettingsProvider; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.resource.Resource; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.security.user.User; +import org.hamcrest.Matchers; +import org.junit.BeforeClass; +import org.junit.ClassRule; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.URISyntaxException; +import java.net.URL; +import java.nio.file.Path; +import java.text.ParseException; +import java.time.Instant; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.instanceOf; + +public class JwtWithUnavailableSecurityIndexRestIT extends ESRestTestCase { + + // Using this to first test without, then with caching. Since caching is controlled by a static setting, we need a + // MutableSettingsProvider instance + private static final MutableSettingsProvider mutableSettingsForLastLoadCache = new MutableSettingsProvider(); + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .nodes(1) + .distribution(DistributionType.DEFAULT) + .setting("xpack.ml.enabled", "false") + .setting("xpack.license.self_generated.type", "trial") + .setting("xpack.security.enabled", "true") + .setting("xpack.security.transport.ssl.enabled", "false") + .setting("xpack.security.authc.token.enabled", "true") + .setting("xpack.security.authc.api_key.enabled", "true") + .setting("xpack.security.http.ssl.enabled", "true") + .setting("xpack.security.http.ssl.certificate", "http.crt") + .setting("xpack.security.http.ssl.key", "http.key") + .setting("xpack.security.http.ssl.key_passphrase", "http-password") + .setting("xpack.security.http.ssl.certificate_authorities", "ca.crt") + .setting("xpack.security.http.ssl.client_authentication", "optional") + .setting("xpack.security.authc.realms.jwt.jwt1.order", "1") + .setting("xpack.security.authc.realms.jwt.jwt1.allowed_issuer", "https://issuer.example.com/") + .setting("xpack.security.authc.realms.jwt.jwt1.allowed_audiences", "https://audience.example.com/") + .setting("xpack.security.authc.realms.jwt.jwt1.claims.principal", "sub") + .setting("xpack.security.authc.realms.jwt.jwt1.claims.dn", "dn") + .setting("xpack.security.authc.realms.jwt.jwt1.required_claims.token_use", "id") + .setting("xpack.security.authc.realms.jwt.jwt1.required_claims.version", "2.0") + .setting("xpack.security.authc.realms.jwt.jwt1.client_authentication.type", "NONE") + .setting("xpack.security.authc.realms.jwt.jwt1.pkc_jwkset_path", "rsa.jwkset") + .settings(mutableSettingsForLastLoadCache) + .configFile("http.key", Resource.fromClasspath("ssl/http.key")) + .configFile("http.crt", Resource.fromClasspath("ssl/http.crt")) + .configFile("ca.crt", Resource.fromClasspath("ssl/ca.crt")) + .configFile("rsa.jwkset", Resource.fromClasspath("jwk/rsa-public-jwkset.json")) + .user("admin_user", "admin-password") + .build(); + + private static Path httpCertificateAuthority; + private TestSecurityClient adminSecurityClient; + + @BeforeClass + public static void findTrustStore() throws Exception { + httpCertificateAuthority = findResource("/ssl/ca.crt"); + } + + @Override + protected boolean preserveClusterUponCompletion() { + return true; + } + + private static Path findResource(String name) throws FileNotFoundException, URISyntaxException { + final URL resource = JwtWithUnavailableSecurityIndexRestIT.class.getResource(name); + if (resource == null) { + throw new FileNotFoundException("Cannot find classpath resource " + name); + } + return PathUtils.get(resource.toURI()); + } + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + @Override + protected String getProtocol() { + return "https"; + } + + @Override + protected Settings restAdminSettings() { + String token = basicAuthHeaderValue("admin_user", new SecureString("admin-password".toCharArray())); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).put(restSslSettings()).build(); + } + + @Override + protected Settings restClientSettings() { + return Settings.builder().put(super.restClientSettings()).put(restSslSettings()).build(); + } + + private Settings restSslSettings() { + return Settings.builder().put(CERTIFICATE_AUTHORITIES, httpCertificateAuthority).build(); + } + + protected TestSecurityClient getAdminSecurityClient() { + if (adminSecurityClient == null) { + adminSecurityClient = new TestSecurityClient(adminClient()); + } + return adminSecurityClient; + } + + public void testRoleMappingWithoutCacheFailsWithoutAccessToSecurityIndex() throws Exception { + final String dn = randomDn(); + + final String rules = Strings.format(""" + { "all": [ + { "field": { "realm.name": "jwt1" } }, + { "field": { "dn": "%s" } } + ] } + """, dn); + + final List roles = randomRoles(); + final String roleMappingName = createRoleMapping(roles, rules); + final String principal = randomPrincipal(); + + try { + { + final SignedJWT jwt = buildAndSignJwt(principal, dn, Instant.now()); + + final Map response = getSecurityClient(jwt).authenticate(); + + assertAuthenticationHasUsernameAndRoles(response, principal, roles); + } + + makeSecurityIndexUnavailable(); + + { + final SignedJWT jwt = buildAndSignJwt(principal, dn, Instant.now()); + + final Map response = getSecurityClient(jwt).authenticate(); + + assertAuthenticationHasUsernameAndRoles(response, principal, List.of()); + } + + // Now enable caching (since the setting is not dynamic, this requires a cluster restart), and test caching + makeSecurityIndexAvailable(); + mutableSettingsForLastLoadCache.put("xpack.security.authz.store.role_mappings.last_load_cache.enabled", "true"); + restartClusterAndResetClients(); + + { + final SignedJWT jwt = buildAndSignJwt(principal, dn, Instant.now()); + + final Map response = getSecurityClient(jwt).authenticate(); + + assertAuthenticationHasUsernameAndRoles(response, principal, roles); + } + + makeSecurityIndexUnavailable(); + + { + final SignedJWT jwt = buildAndSignJwt(principal, dn, Instant.now()); + + final Map response = getSecurityClient(jwt).authenticate(); + + assertAuthenticationHasUsernameAndRoles(response, principal, roles); + } + } finally { + makeSecurityIndexAvailable(); + deleteRoleMapping(roleMappingName); + } + } + + private void restartClusterAndResetClients() throws IOException { + cluster.restart(false); + adminSecurityClient = null; + closeClients(); + initClient(); + } + + private void assertAuthenticationHasUsernameAndRoles( + Map response, + String expectedUsername, + List expectedRoles + ) { + final String description = "Authentication response [" + response + "]"; + assertThat(description, response, hasEntry(User.Fields.USERNAME.getPreferredName(), expectedUsername)); + assertThat( + description, + JwtRestIT.assertList(response, User.Fields.ROLES), + Matchers.containsInAnyOrder(expectedRoles.toArray(String[]::new)) + ); + } + + private void makeSecurityIndexUnavailable() throws IOException { + Request closeRequest = new Request("POST", "/.security/_close"); + closeRequest.setOptions(systemIndexWarningHandlerOptions(".security-7")); + assertOK(adminClient().performRequest(closeRequest)); + } + + private void makeSecurityIndexAvailable() throws IOException { + Request openRequest = new Request("POST", "/.security/_open"); + openRequest.setOptions(systemIndexWarningHandlerOptions(".security-7")); + assertOK(adminClient().performRequest(openRequest)); + } + + private RequestOptions.Builder systemIndexWarningHandlerOptions(String index) { + return RequestOptions.DEFAULT.toBuilder() + .setWarningsHandler( + w -> w.size() > 0 + && w.contains( + "this request accesses system indices: [" + + index + + "], but in a future major " + + "version, direct access to system indices will be prevented by default" + ) == false + ); + } + + private String randomPrincipal() { + // We append _test so that it cannot randomly conflict with builtin user + return randomAlphaOfLengthBetween(4, 12) + "_test"; + } + + private String randomDn() { + return "CN=" + randomPrincipal(); + } + + private List randomRoles() { + // We append _test so that it cannot randomly conflict with builtin roles + return randomList(1, 3, () -> randomAlphaOfLengthBetween(4, 12) + "_test"); + } + + private SignedJWT buildAndSignJwt(String principal, String dn, Instant issueTime) throws JOSEException, ParseException, IOException { + final JWTClaimsSet claimsSet = JwtRestIT.buildJwt( + Map.ofEntries( + Map.entry("iss", "https://issuer.example.com/"), + Map.entry("aud", "https://audience.example.com/"), + Map.entry("sub", principal), + Map.entry("dn", dn), + Map.entry("token_use", "id"), + Map.entry("version", "2.0") + ), + issueTime + ); + final RSASSASigner signer = loadRsaSigner(); + return JwtRestIT.signJWT(signer, "RS256", claimsSet); + } + + private RSASSASigner loadRsaSigner() throws IOException, ParseException, JOSEException { + try (var in = getDataInputStream("/jwk/rsa-private-jwkset.json")) { + final JWKSet jwkSet = JWKSet.load(in); + final JWK key = jwkSet.getKeyByKeyId("test-rsa-key"); + assertThat(key, instanceOf(RSAKey.class)); + return new RSASSASigner((RSAKey) key); + } + } + + private TestSecurityClient getSecurityClient(SignedJWT jwt) { + final String bearerHeader = "Bearer " + jwt.serialize(); + final RequestOptions.Builder options = RequestOptions.DEFAULT.toBuilder(); + options.addHeader("Authorization", bearerHeader); + return new TestSecurityClient(client(), options.build()); + } + + private String createRoleMapping(List roles, String rules) throws IOException { + Map mapping = new HashMap<>(); + mapping.put("enabled", true); + mapping.put("roles", roles); + mapping.put("rules", XContentHelper.convertToMap(XContentType.JSON.xContent(), rules, true)); + final String mappingName = "test-" + getTestName() + "-" + randomAlphaOfLength(8); + getAdminSecurityClient().putRoleMapping(mappingName, mapping); + return mappingName; + } + + private void deleteRoleMapping(String name) throws IOException { + getAdminSecurityClient().deleteRoleMapping(name); + } +} diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/ShrinkIndexWithSecurityTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/ShrinkIndexWithSecurityTests.java index c5efabfca13db..c3016a810c27f 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/ShrinkIndexWithSecurityTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/ShrinkIndexWithSecurityTests.java @@ -23,11 +23,6 @@ @ClusterScope(minNumDataNodes = 2) public class ShrinkIndexWithSecurityTests extends SecurityIntegTestCase { - @Override - protected final boolean ignoreExternalCluster() { - return true; - } - @Override protected int minimumNumberOfShards() { return 2; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 42b4c8c459eb0..02de32078469e 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -1198,6 +1198,7 @@ public static List> getSettings(List securityExten settingsList.add(CachingServiceAccountTokenStore.CACHE_HASH_ALGO_SETTING); settingsList.add(CachingServiceAccountTokenStore.CACHE_MAX_TOKENS_SETTING); settingsList.add(SimpleRole.CACHE_SIZE_SETTING); + settingsList.add(NativeRoleMappingStore.LAST_LOAD_CACHE_ENABLED_SETTING); // hide settings settingsList.add(Setting.stringListSetting(SecurityField.setting("hide_settings"), Property.NodeScope, Property.Filtered)); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealm.java index d56d8dd6b968f..dea471846b9f4 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealm.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; @@ -283,7 +284,9 @@ public void authenticate(final AuthenticationToken authenticationToken, final Ac return; // FAILED (secret is missing or mismatched) } - final BytesArray jwtCacheKey = isCacheEnabled() ? new BytesArray(jwtAuthenticationToken.getUserCredentialsHash()) : null; + final BytesArray jwtCacheKey = isCacheEnabled() + ? new BytesArray(new BytesRef(jwtAuthenticationToken.getUserCredentialsHash()), true) + : null; if (jwtCacheKey != null) { final User cachedUser = tryAuthenticateWithCache(tokenPrincipal, jwtCacheKey); if (cachedUser != null) { @@ -483,6 +486,11 @@ private boolean isCacheEnabled() { return jwtCache != null && jwtCacheHelper != null; } + // package private for testing + Cache getJwtCache() { + return jwtCache; + } + /** * Format and filter JWT contents as user metadata. * @param claimsSet Claims are supported. Claim keys are prefixed by "jwt_claim_". diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java index 218e120e30941..ba28e2a9952cc 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java @@ -8,6 +8,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.delete.DeleteResponse; @@ -17,9 +18,11 @@ import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.core.Nullable; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.script.ScriptService; @@ -52,6 +55,7 @@ import java.util.Objects; import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Supplier; import java.util.stream.Collectors; @@ -87,17 +91,27 @@ public class NativeRoleMappingStore implements UserRoleMapper { private static final String ID_PREFIX = DOC_TYPE_ROLE_MAPPING + "_"; + public static final Setting LAST_LOAD_CACHE_ENABLED_SETTING = Setting.boolSetting( + "xpack.security.authz.store.role_mappings.last_load_cache.enabled", + false, + Setting.Property.NodeScope, + Setting.Property.Filtered + ); + private final Settings settings; private final Client client; private final SecurityIndexManager securityIndex; private final ScriptService scriptService; private final List realmsToRefresh = new CopyOnWriteArrayList<>(); + private final boolean lastLoadCacheEnabled; + private final AtomicReference> lastLoadRef = new AtomicReference<>(null); public NativeRoleMappingStore(Settings settings, Client client, SecurityIndexManager securityIndex, ScriptService scriptService) { this.settings = settings; this.client = client; this.securityIndex = securityIndex; this.scriptService = scriptService; + this.lastLoadCacheEnabled = LAST_LOAD_CACHE_ENABLED_SETTING.get(settings); } private static String getNameFromId(String id) { @@ -105,7 +119,8 @@ private static String getNameFromId(String id) { return id.substring(ID_PREFIX.length()); } - private static String getIdForName(String name) { + // package-private for testing + static String getIdForName(String name) { return ID_PREFIX + name; } @@ -139,6 +154,10 @@ protected void loadMappings(ActionListener> listener new ContextPreservingActionListener<>(supplier, ActionListener.wrap((Collection mappings) -> { final List mappingList = mappings.stream().filter(Objects::nonNull).toList(); logger.debug("successfully loaded [{}] role-mapping(s) from [{}]", mappingList.size(), securityIndex.aliasName()); + if (lastLoadCacheEnabled) { + logger.debug("caching loaded role-mapping(s)"); + lastLoadRef.set(List.copyOf(mappingList)); + } listener.onResponse(mappingList); }, ex -> { logger.error( @@ -294,19 +313,44 @@ public void getRoleMappings(Set names, ActionListener> listener) { final SecurityIndexManager frozenSecurityIndex = securityIndex.defensiveCopy(); if (frozenSecurityIndex.indexExists() == false) { - logger.debug("The security does not index exist - no role mappings can be loaded"); - listener.onResponse(Collections.emptyList()); - } else if (frozenSecurityIndex.indexIsClosed()) { - logger.debug("The security index exists but is closed - no role mappings can be loaded"); + logger.debug("The security index does not exist - no role mappings can be loaded"); listener.onResponse(Collections.emptyList()); + return; + } + final List lastLoad = lastLoadRef.get(); + if (frozenSecurityIndex.indexIsClosed()) { + if (lastLoad != null) { + assert lastLoadCacheEnabled; + logger.debug("The security index exists but is closed - returning previously cached role mappings"); + listener.onResponse(lastLoad); + } else { + logger.debug("The security index exists but is closed - no role mappings can be loaded"); + listener.onResponse(Collections.emptyList()); + } } else if (frozenSecurityIndex.isAvailable(SEARCH_SHARDS) == false) { - logger.debug("The security index exists but is not available - no role mappings can be loaded"); - listener.onFailure(frozenSecurityIndex.getUnavailableReason(SEARCH_SHARDS)); + final ElasticsearchException unavailableReason = frozenSecurityIndex.getUnavailableReason(SEARCH_SHARDS); + if (lastLoad != null) { + assert lastLoadCacheEnabled; + logger.debug( + "The security index exists but is not available - returning previously cached role mappings", + unavailableReason + ); + listener.onResponse(lastLoad); + } else { + logger.debug("The security index exists but is not available - no role mappings can be loaded"); + listener.onFailure(unavailableReason); + } } else { loadMappings(listener); } } + // package-private for testing + @Nullable + List getLastLoad() { + return lastLoadRef.get(); + } + /** * Provides usage statistics for this store. * The resulting map contains the keys @@ -317,7 +361,7 @@ private void getMappings(ActionListener> listener) { * */ public void usageStats(ActionListener> listener) { - if (securityIndex.isAvailable(SEARCH_SHARDS) == false) { + if (securityIndex.indexIsClosed() || securityIndex.isAvailable(SEARCH_SHARDS) == false) { reportStats(listener, Collections.emptyList()); } else { getMappings(ActionListener.wrap(mappings -> reportStats(listener, mappings), listener::onFailure)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/PutRoleBuilderTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/PutRoleBuilderTests.java index efe7892daec70..984442e82be16 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/PutRoleBuilderTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/role/PutRoleBuilderTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.security.action.role; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpClient; @@ -26,7 +25,8 @@ public void testBWCFieldPermissions() throws Exception { Path path = getDataPath("roles2xformat.json"); byte[] bytes = Files.readAllBytes(path); String roleString = new String(bytes, Charset.defaultCharset()); - try (Client client = new NoOpClient("testBWCFieldPermissions")) { + try (var threadPool = createThreadPool()) { + final var client = new NoOpClient(threadPool); ElasticsearchParseException e = expectThrows( ElasticsearchParseException.class, () -> new PutRoleRequestBuilder(client).source("role1", new BytesArray(roleString), XContentType.JSON) diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmAuthenticateTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmAuthenticateTests.java index 7fd4a70f7505e..f75876a755557 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmAuthenticateTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmAuthenticateTests.java @@ -67,6 +67,26 @@ public void testJwtAuthcRealmAuthcAuthzWithEmptyRoles() throws Exception { doMultipleAuthcAuthzAndVerifySuccess(jwtIssuerAndRealm.realm(), user, jwt, clientSecret, jwtAuthcCount); } + public void testJwtCache() throws Exception { + jwtIssuerAndRealms = generateJwtIssuerRealmPairs(1, 1, 1, 1, 1, 1, 99, false); + JwtRealm realm = jwtIssuerAndRealms.get(0).realm(); + realm.expireAll(); + assertThat(realm.getJwtCache().count(), is(0)); + final JwtIssuerAndRealm jwtIssuerAndRealm = randomJwtIssuerRealmPair(); + final SecureString clientSecret = JwtRealmInspector.getClientAuthenticationSharedSecret(jwtIssuerAndRealm.realm()); + for (int i = 1; i <= randomIntBetween(2, 10); i++) { + User user = randomUser(jwtIssuerAndRealm.issuer()); + doMultipleAuthcAuthzAndVerifySuccess( + jwtIssuerAndRealm.realm(), + user, + randomJwt(jwtIssuerAndRealm, user), + clientSecret, + randomIntBetween(2, 10) + ); + assertThat(realm.getJwtCache().count(), is(i)); + } + } + /** * Test with no authz realms. * @throws Exception Unexpected test failure diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmTestCase.java index ab1b7867ffa04..64f2444e0182d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmTestCase.java @@ -12,6 +12,8 @@ import com.nimbusds.openid.connect.sdk.Nonce; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.cache.Cache; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; @@ -46,6 +48,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Date; +import java.util.HexFormat; import java.util.List; import java.util.Map; import java.util.TreeSet; @@ -290,7 +293,7 @@ protected JwtRealmSettingsBuilder createJwtRealmSettingsBuilder(final JwtIssuer if (randomBoolean()) { authcSettings.put( RealmSettings.getFullSettingKey(authcRealmName, JwtRealmSettings.JWT_CACHE_TTL), - randomIntBetween(10, 120) + randomFrom("s", "m", "h") + randomIntBetween(10, 120) + randomFrom("m", "h") ); } authcSettings.put(RealmSettings.getFullSettingKey(authcRealmName, JwtRealmSettings.JWT_CACHE_SIZE), jwtCacheSize); @@ -378,11 +381,12 @@ protected void doMultipleAuthcAuthzAndVerifySuccess( final int jwtAuthcRepeats ) { final List jwtRealmsList = jwtIssuerAndRealms.stream().map(p -> p.realm).toList(); - + BytesArray firstCacheKeyFound = null; // Select different test JWKs from the JWT realm, and generate test JWTs for the test user. Run the JWT through the chain. for (int authcRun = 1; authcRun <= jwtAuthcRepeats; authcRun++) { + final ThreadContext requestThreadContext = createThreadContext(jwt, sharedSecret); - logger.info("REQ[" + authcRun + "/" + jwtAuthcRepeats + "] HEADERS=" + requestThreadContext.getHeaders()); + logger.debug("REQ[" + authcRun + "/" + jwtAuthcRepeats + "] HEADERS=" + requestThreadContext.getHeaders()); // Any JWT realm can recognize and extract the request headers. final var jwtAuthenticationToken = (JwtAuthenticationToken) randomFrom(jwtRealmsList).token(requestThreadContext); @@ -393,11 +397,11 @@ protected void doMultipleAuthcAuthzAndVerifySuccess( // Loop through all authc/authz realms. Confirm user is returned with expected principal and roles. User authenticatedUser = null; realmLoop: for (final JwtRealm candidateJwtRealm : jwtRealmsList) { - logger.info("TRY AUTHC: expected=[" + jwtRealm.name() + "], candidate[" + candidateJwtRealm.name() + "]."); + logger.debug("TRY AUTHC: expected=[" + jwtRealm.name() + "], candidate[" + candidateJwtRealm.name() + "]."); final PlainActionFuture> authenticateFuture = PlainActionFuture.newFuture(); candidateJwtRealm.authenticate(jwtAuthenticationToken, authenticateFuture); final AuthenticationResult authenticationResult = authenticateFuture.actionGet(); - logger.info("Authentication result with realm [{}]: [{}]", candidateJwtRealm.name(), authenticationResult); + logger.debug("Authentication result with realm [{}]: [{}]", candidateJwtRealm.name(), authenticationResult); switch (authenticationResult.getStatus()) { case SUCCESS: assertThat("Unexpected realm SUCCESS status", candidateJwtRealm.name(), equalTo(jwtRealm.name())); @@ -430,20 +434,41 @@ protected void doMultipleAuthcAuthzAndVerifySuccess( equalTo(Map.of("jwt_token_type", JwtRealmInspector.getTokenType(jwtRealm).value())) ); } + // if the cache is enabled ensure the cache is used and does not change for the provided jwt + if (jwtRealm.getJwtCache() != null) { + Cache cache = jwtRealm.getJwtCache(); + if (firstCacheKeyFound == null) { + assertNotNull("could not find cache keys", cache.keys()); + firstCacheKeyFound = cache.keys().iterator().next(); + } + jwtAuthenticationToken.clearCredentials(); // simulates the realm's context closing which clears the credential + boolean foundInCache = false; + for (BytesArray key : cache.keys()) { + logger.trace("cache key: " + HexFormat.of().formatHex(key.array())); + if (key.equals(firstCacheKeyFound)) { + foundInCache = true; + } + assertFalse( + "cache key should not be nulled out", + IntStream.range(0, key.array().length).map(idx -> key.array()[idx]).allMatch(b -> b == 0) + ); + } + assertTrue("cache key was not found in cache", foundInCache); + } } - logger.info("Test succeeded"); + logger.debug("Test succeeded"); } protected User randomUser(final JwtIssuer jwtIssuer) { final User user = randomFrom(jwtIssuer.principals.values()); - logger.info("USER[" + user.principal() + "]: roles=[" + String.join(",", user.roles()) + "]."); + logger.debug("USER[" + user.principal() + "]: roles=[" + String.join(",", user.roles()) + "]."); return user; } protected SecureString randomJwt(final JwtIssuerAndRealm jwtIssuerAndRealm, User user) throws Exception { final JwtIssuer.AlgJwkPair algJwkPair = randomFrom(jwtIssuerAndRealm.issuer.algAndJwksAll); final JWK jwk = algJwkPair.jwk(); - logger.info( + logger.debug( "ALG[" + algJwkPair.alg() + "]. JWK: kty=[" @@ -491,7 +516,7 @@ protected void printJwtRealmAndIssuer(JwtIssuerAndRealm jwtIssuerAndRealm) throw } protected void printJwtRealm(final JwtRealm jwtRealm) { - logger.info( + logger.debug( "REALM[" + jwtRealm.name() + "," @@ -527,15 +552,15 @@ protected void printJwtRealm(final JwtRealm jwtRealm) { + "]." ); for (final JWK jwk : JwtRealmInspector.getJwksAlgsHmac(jwtRealm).jwks()) { - logger.info("REALM HMAC: jwk=[{}]", jwk); + logger.debug("REALM HMAC: jwk=[{}]", jwk); } for (final JWK jwk : JwtRealmInspector.getJwksAlgsPkc(jwtRealm).jwks()) { - logger.info("REALM PKC: jwk=[{}]", jwk); + logger.debug("REALM PKC: jwk=[{}]", jwk); } } protected void printJwtIssuer(final JwtIssuer jwtIssuer) { - logger.info( + logger.debug( "ISSUER: iss=[" + jwtIssuer.issuerClaimValue + "], aud=[" @@ -549,13 +574,13 @@ protected void printJwtIssuer(final JwtIssuer jwtIssuer) { + "]." ); if (jwtIssuer.algAndJwkHmacOidc != null) { - logger.info("ISSUER HMAC OIDC: alg=[{}] jwk=[{}]", jwtIssuer.algAndJwkHmacOidc.alg(), jwtIssuer.encodedKeyHmacOidc); + logger.debug("ISSUER HMAC OIDC: alg=[{}] jwk=[{}]", jwtIssuer.algAndJwkHmacOidc.alg(), jwtIssuer.encodedKeyHmacOidc); } for (final JwtIssuer.AlgJwkPair pair : jwtIssuer.algAndJwksHmac) { - logger.info("ISSUER HMAC: alg=[{}] jwk=[{}]", pair.alg(), pair.jwk()); + logger.debug("ISSUER HMAC: alg=[{}] jwk=[{}]", pair.alg(), pair.jwk()); } for (final JwtIssuer.AlgJwkPair pair : jwtIssuer.algAndJwksPkc) { - logger.info("ISSUER PKC: alg=[{}] jwk=[{}]", pair.alg(), pair.jwk()); + logger.debug("ISSUER PKC: alg=[{}] jwk=[{}]", pair.alg(), pair.jwk()); } } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java index 16ef229ed5436..efc97ca30cd1a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java @@ -6,13 +6,19 @@ */ package org.elasticsearch.xpack.security.authc.support.mapper; +import org.apache.lucene.search.TotalHits; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.env.Environment; @@ -20,8 +26,14 @@ import org.elasticsearch.script.ScriptModule; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.mustache.MustacheScriptEngine; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.ToXContent; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheAction; import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheRequest; import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheResponse; @@ -40,6 +52,8 @@ import org.elasticsearch.xpack.security.authc.support.CachingUsernamePasswordRealm; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.hamcrest.Matchers; +import org.junit.Before; +import org.mockito.Mockito; import java.time.Instant; import java.util.Arrays; @@ -48,14 +62,22 @@ import java.util.Locale; import java.util.Map; import java.util.Set; +import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import static org.elasticsearch.test.ActionListenerUtils.anyActionListener; +import static org.elasticsearch.xpack.security.support.SecuritySystemIndices.SECURITY_MAIN_ALIAS; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class NativeRoleMappingStoreTests extends ESTestCase { @@ -64,6 +86,20 @@ public class NativeRoleMappingStoreTests extends ESTestCase { TestRestrictedIndices.INTERNAL_SECURITY_MAIN_INDEX_7 ); + private ScriptService scriptService; + private SecurityIndexManager securityIndex; + + @Before + public void setup() { + scriptService = new ScriptService( + Settings.EMPTY, + Collections.singletonMap(MustacheScriptEngine.NAME, new MustacheScriptEngine()), + ScriptModule.CORE_CONTEXTS, + () -> 1L + ); + securityIndex = mockHealthySecurityIndex(); + } + public void testResolveRoles() throws Exception { // Does match DN final ExpressionRoleMapping mapping1 = new ExpressionRoleMapping( @@ -118,17 +154,6 @@ public void testResolveRoles() throws Exception { ); final Client client = mock(Client.class); - SecurityIndexManager securityIndex = mock(SecurityIndexManager.class); - ScriptService scriptService = new ScriptService( - Settings.EMPTY, - Collections.singletonMap(MustacheScriptEngine.NAME, new MustacheScriptEngine()), - ScriptModule.CORE_CONTEXTS, - () -> 1L - ); - when(securityIndex.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn(true); - when(securityIndex.isAvailable(SecurityIndexManager.Availability.SEARCH_SHARDS)).thenReturn(true); - when(securityIndex.indexExists()).thenReturn(true); - when(securityIndex.defensiveCopy()).thenReturn(securityIndex); final NativeRoleMappingStore store = new NativeRoleMappingStore(Settings.EMPTY, client, securityIndex, scriptService) { @Override @@ -161,6 +186,218 @@ protected void loadMappings(ActionListener> listener store.resolveRoles(user, future); final Set roles = future.get(); assertThat(roles, Matchers.containsInAnyOrder("dept_h", "defence", "flight")); + assertThat(store.getLastLoad(), is(nullValue())); + } + + public void testResolveRolesDoesNotUseLastLoadCacheWhenSecurityIndexAvailable() throws Exception { + final Client client = mock(Client.class); + final ThreadPool mockThreadPool = mock(ThreadPool.class); + when(mockThreadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); + when(client.threadPool()).thenReturn(mockThreadPool); + when(client.prepareSearch(eq(SECURITY_MAIN_ALIAS))).thenReturn( + Mockito.spy(new SearchRequestBuilder(client, SearchAction.INSTANCE)) + ); + final ExpressionRoleMapping mapping = new ExpressionRoleMapping( + "mapping", + new FieldExpression("dn", Collections.singletonList(new FieldValue("*"))), + List.of("role"), + Collections.emptyList(), + Collections.emptyMap(), + true + ); + doAnswerWithSearchResult(client, mapping); + + final NativeRoleMappingStore store = new NativeRoleMappingStore( + Settings.builder().put("xpack.security.authz.store.role_mappings.last_load_cache.enabled", "true").build(), + client, + securityIndex, + scriptService + ); + + final UserRoleMapper.UserData user = new UserRoleMapper.UserData( + "user", + randomiseDn("cn=user,ou=people,dc=org"), + List.of(), + Map.of(), + mock(RealmConfig.class) + ); + assertThat(store.getLastLoad(), is(nullValue())); + + assertThat(resolveRoles(store, user), Matchers.containsInAnyOrder("role")); + assertThat(store.getLastLoad(), contains(mapping)); + verify(client, times(1)).search(any(SearchRequest.class), anyActionListener()); + + // when security index is available, we still run a search + assertThat(resolveRoles(store, user), Matchers.containsInAnyOrder("role")); + assertThat(store.getLastLoad(), contains(mapping)); + verify(client, times(2)).search(any(SearchRequest.class), anyActionListener()); + } + + public void testResolveRolesUsesLastLoadCacheWhenSecurityIndexUnavailable() throws Exception { + final Client client = mock(Client.class); + final ThreadPool mockThreadPool = mock(ThreadPool.class); + when(mockThreadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); + when(client.threadPool()).thenReturn(mockThreadPool); + when(client.prepareSearch(eq(SECURITY_MAIN_ALIAS))).thenReturn( + Mockito.spy(new SearchRequestBuilder(client, SearchAction.INSTANCE)) + ); + final ExpressionRoleMapping mapping = new ExpressionRoleMapping( + "mapping", + new FieldExpression("dn", Collections.singletonList(new FieldValue("*"))), + List.of("role"), + Collections.emptyList(), + Collections.emptyMap(), + true + ); + doAnswerWithSearchResult(client, mapping); + + final NativeRoleMappingStore store = new NativeRoleMappingStore( + Settings.builder().put("xpack.security.authz.store.role_mappings.last_load_cache.enabled", "true").build(), + client, + securityIndex, + scriptService + ); + + final UserRoleMapper.UserData user = new UserRoleMapper.UserData( + "user", + randomiseDn("cn=user,ou=people,dc=org"), + List.of(), + Map.of(), + mock(RealmConfig.class) + ); + assertThat(store.getLastLoad(), is(nullValue())); + + assertThat(resolveRoles(store, user), Matchers.containsInAnyOrder("role")); + assertThat(store.getLastLoad(), contains(mapping)); + verify(client, times(1)).search(any(SearchRequest.class), anyActionListener()); + + final boolean indexAvailable = randomBoolean(); + when(securityIndex.isAvailable(SecurityIndexManager.Availability.SEARCH_SHARDS)).thenReturn(indexAvailable); + final boolean indexClosed = indexAvailable || randomBoolean(); + when(securityIndex.indexIsClosed()).thenReturn(indexClosed); + assertThat(resolveRoles(store, user), Matchers.containsInAnyOrder(mapping.getRoles().toArray())); + assertThat(store.getLastLoad(), contains(mapping)); + // index was unavailable, so we returned result from cache; no new search + verify(client, times(1)).search(any(SearchRequest.class), anyActionListener()); + + // new search result from index overwrites previous + when(securityIndex.indexExists()).thenReturn(true); + when(securityIndex.isAvailable(SecurityIndexManager.Availability.SEARCH_SHARDS)).thenReturn(true); + when(securityIndex.indexIsClosed()).thenReturn(false); + final ExpressionRoleMapping mapping2 = new ExpressionRoleMapping( + "mapping2", + new FieldExpression("dn", Collections.singletonList(new FieldValue("*"))), + List.of("role2"), + Collections.emptyList(), + Collections.emptyMap(), + true + ); + doAnswerWithSearchResult(client, mapping2); + assertThat(resolveRoles(store, user), Matchers.containsInAnyOrder(mapping2.getRoles().toArray())); + assertThat(store.getLastLoad(), contains(mapping2)); + } + + public void testResolveRolesDoesNotUseLastLoadCacheWhenSecurityIndexDoesNotExist() throws Exception { + final Client client = mock(Client.class); + final ThreadPool mockThreadPool = mock(ThreadPool.class); + when(mockThreadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); + when(client.threadPool()).thenReturn(mockThreadPool); + when(client.prepareSearch(eq(SECURITY_MAIN_ALIAS))).thenReturn( + Mockito.spy(new SearchRequestBuilder(client, SearchAction.INSTANCE)) + ); + final ExpressionRoleMapping mapping = new ExpressionRoleMapping( + "mapping", + new FieldExpression("dn", Collections.singletonList(new FieldValue("*"))), + List.of("role"), + Collections.emptyList(), + Collections.emptyMap(), + true + ); + doAnswerWithSearchResult(client, mapping); + + final NativeRoleMappingStore store = new NativeRoleMappingStore( + Settings.builder().put("xpack.security.authz.store.role_mappings.last_load_cache.enabled", "true").build(), + client, + securityIndex, + scriptService + ); + + final UserRoleMapper.UserData user = new UserRoleMapper.UserData( + "user", + randomiseDn("cn=user,ou=people,dc=org"), + List.of(), + Map.of(), + mock(RealmConfig.class) + ); + assertThat(store.getLastLoad(), is(nullValue())); + + assertThat(resolveRoles(store, user), Matchers.containsInAnyOrder("role")); + assertThat(store.getLastLoad(), contains(mapping)); + verify(client, times(1)).search(any(SearchRequest.class), anyActionListener()); + + when(securityIndex.indexExists()).thenReturn(false); + assertThat(resolveRoles(store, user), is(empty())); + assertThat(store.getLastLoad(), contains(mapping)); + } + + private SecurityIndexManager mockHealthySecurityIndex() { + final SecurityIndexManager securityIndex = mock(SecurityIndexManager.class); + when(securityIndex.isAvailable(SecurityIndexManager.Availability.PRIMARY_SHARDS)).thenReturn(true); + when(securityIndex.isAvailable(SecurityIndexManager.Availability.SEARCH_SHARDS)).thenReturn(true); + when(securityIndex.indexExists()).thenReturn(true); + when(securityIndex.isIndexUpToDate()).thenReturn(true); + when(securityIndex.defensiveCopy()).thenReturn(securityIndex); + return securityIndex; + } + + private void doAnswerWithSearchResult(Client client, ExpressionRoleMapping mapping) { + doAnswer(invocation -> { + @SuppressWarnings("unchecked") + final var listener = (ActionListener) invocation.getArguments()[1]; + final var searchHit = new SearchHit( + randomIntBetween(0, Integer.MAX_VALUE), + NativeRoleMappingStore.getIdForName(mapping.getName()) + ); + try (XContentBuilder builder = JsonXContent.contentBuilder()) { + mapping.toXContent(builder, ToXContent.EMPTY_PARAMS); + searchHit.sourceRef(BytesReference.bytes(builder)); + } + final var internalSearchResponse = new InternalSearchResponse( + new SearchHits( + new SearchHit[] { searchHit }, + new TotalHits(1, TotalHits.Relation.EQUAL_TO), + randomFloat(), + null, + null, + null + ), + null, + null, + null, + false, + null, + 0 + ); + final var searchResponse = new SearchResponse( + internalSearchResponse, + randomAlphaOfLengthBetween(3, 8), + 1, + 1, + 0, + 10, + null, + null + ); + listener.onResponse(searchResponse); + return null; + }).when(client).search(any(SearchRequest.class), anyActionListener()); + } + + private Set resolveRoles(NativeRoleMappingStore store, UserRoleMapper.UserData user) throws InterruptedException, + ExecutionException { + final PlainActionFuture> future = new PlainActionFuture<>(); + store.resolveRoles(user, future); + return future.get(); } private String randomiseDn(String dn) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java index 01d3ca6db354e..ecc69e957d8ba 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java @@ -38,7 +38,6 @@ import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpClient; -import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; @@ -108,7 +107,8 @@ public class NativePrivilegeStoreTests extends ESTestCase { public void setup() { requests = new ArrayList<>(); listener = new AtomicReference<>(); - client = new NoOpClient(getTestName()) { + threadPool = createThreadPool(); + client = new NoOpClient(threadPool) { @Override @SuppressWarnings("unchecked") protected void doExecute( @@ -144,7 +144,6 @@ public void searchScroll(SearchScrollRequest request, ActionListener void doExecute( ActionType action, @@ -103,17 +103,15 @@ public void doE listener.onFailure(new ElasticsearchSecurityException("encountered an error while creating API key")); } } - }) { - final RestCreateApiKeyAction restCreateApiKeyAction = new RestCreateApiKeyAction(Settings.EMPTY, mockLicenseState); - restCreateApiKeyAction.handleRequest(restRequest, restChannel, client); + }; + final RestCreateApiKeyAction restCreateApiKeyAction = new RestCreateApiKeyAction(Settings.EMPTY, mockLicenseState); + restCreateApiKeyAction.handleRequest(restRequest, restChannel, client); - final RestResponse restResponse = responseSetOnce.get(); - assertNotNull(restResponse); - assertThat( - CreateApiKeyResponse.fromXContent(createParser(XContentType.JSON.xContent(), restResponse.content())), - equalTo(expected) - ); - } + final RestResponse restResponse = responseSetOnce.get(); + assertNotNull(restResponse); + assertThat( + CreateApiKeyResponse.fromXContent(createParser(XContentType.JSON.xContent(), restResponse.content())), + equalTo(expected) + ); } - } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java index f4c293edb59d3..e842dd8588fa9 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestGetApiKeyActionTests.java @@ -122,7 +122,7 @@ public void sendResponse(RestResponse restResponse) { ) ); - try (NodeClient client = new NodeClient(Settings.EMPTY, threadPool) { + final var client = new NodeClient(Settings.EMPTY, threadPool) { @SuppressWarnings("unchecked") @Override public void doExecute( @@ -149,44 +149,37 @@ public void doE listener.onFailure(new ElasticsearchSecurityException("encountered an error while creating API key")); } } - }) { - final RestGetApiKeyAction restGetApiKeyAction = new RestGetApiKeyAction(Settings.EMPTY, mockLicenseState); + }; + final RestGetApiKeyAction restGetApiKeyAction = new RestGetApiKeyAction(Settings.EMPTY, mockLicenseState); - restGetApiKeyAction.handleRequest(restRequest, restChannel, client); + restGetApiKeyAction.handleRequest(restRequest, restChannel, client); - final RestResponse restResponse = responseSetOnce.get(); - assertNotNull(restResponse); + final RestResponse restResponse = responseSetOnce.get(); + assertNotNull(restResponse); + assertThat(restResponse.status(), (replyEmptyResponse && params.get("id") != null) ? is(RestStatus.NOT_FOUND) : is(RestStatus.OK)); + final GetApiKeyResponse actual = GetApiKeyResponse.fromXContent(createParser(XContentType.JSON.xContent(), restResponse.content())); + if (replyEmptyResponse) { + assertThat(actual.getApiKeyInfos().length, is(0)); + } else { assertThat( - restResponse.status(), - (replyEmptyResponse && params.get("id") != null) ? is(RestStatus.NOT_FOUND) : is(RestStatus.OK) - ); - final GetApiKeyResponse actual = GetApiKeyResponse.fromXContent( - createParser(XContentType.JSON.xContent(), restResponse.content()) - ); - if (replyEmptyResponse) { - assertThat(actual.getApiKeyInfos().length, is(0)); - } else { - assertThat( - actual.getApiKeyInfos(), - arrayContaining( - new ApiKey( - "api-key-name-1", - "api-key-id-1", - type, - creation, - expiration, - false, - "user-x", - "realm-1", - metadata, - roleDescriptors, - limitedByRoleDescriptors - ) + actual.getApiKeyInfos(), + arrayContaining( + new ApiKey( + "api-key-name-1", + "api-key-id-1", + type, + creation, + expiration, + false, + "user-x", + "realm-1", + metadata, + roleDescriptors, + limitedByRoleDescriptors ) - ); - } + ) + ); } - } public void testGetApiKeyOwnedByCurrentAuthenticatedUser() throws Exception { @@ -253,7 +246,7 @@ public void sendResponse(RestResponse restResponse) { final GetApiKeyResponse getApiKeyResponseExpectedWhenOwnerFlagIsTrue = new GetApiKeyResponse(Collections.singletonList(apiKey1)); final GetApiKeyResponse getApiKeyResponseExpectedWhenOwnerFlagIsFalse = new GetApiKeyResponse(List.of(apiKey1, apiKey2)); - try (NodeClient client = new NodeClient(Settings.EMPTY, threadPool) { + final var client = new NodeClient(Settings.EMPTY, threadPool) { @SuppressWarnings("unchecked") @Override public void doExecute( @@ -274,24 +267,21 @@ public void doE listener.onResponse((Response) getApiKeyResponseExpectedWhenOwnerFlagIsFalse); } } - }) { - final RestGetApiKeyAction restGetApiKeyAction = new RestGetApiKeyAction(Settings.EMPTY, mockLicenseState); + }; + final RestGetApiKeyAction restGetApiKeyAction = new RestGetApiKeyAction(Settings.EMPTY, mockLicenseState); - restGetApiKeyAction.handleRequest(restRequest, restChannel, client); + restGetApiKeyAction.handleRequest(restRequest, restChannel, client); - final RestResponse restResponse = responseSetOnce.get(); - assertNotNull(restResponse); - assertThat(restResponse.status(), is(RestStatus.OK)); - final GetApiKeyResponse actual = GetApiKeyResponse.fromXContent( - createParser(XContentType.JSON.xContent(), restResponse.content()) - ); - if (isGetRequestForOwnedKeysOnly) { - assertThat(actual.getApiKeyInfos().length, is(1)); - assertThat(actual.getApiKeyInfos(), arrayContaining(apiKey1)); - } else { - assertThat(actual.getApiKeyInfos().length, is(2)); - assertThat(actual.getApiKeyInfos(), arrayContaining(apiKey1, apiKey2)); - } + final RestResponse restResponse = responseSetOnce.get(); + assertNotNull(restResponse); + assertThat(restResponse.status(), is(RestStatus.OK)); + final GetApiKeyResponse actual = GetApiKeyResponse.fromXContent(createParser(XContentType.JSON.xContent(), restResponse.content())); + if (isGetRequestForOwnedKeysOnly) { + assertThat(actual.getApiKeyInfos().length, is(1)); + assertThat(actual.getApiKeyInfos(), arrayContaining(apiKey1)); + } else { + assertThat(actual.getApiKeyInfos().length, is(2)); + assertThat(actual.getApiKeyInfos(), arrayContaining(apiKey1, apiKey2)); } } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java index e008a674b28fb..3c0e24da32763 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestInvalidateApiKeyActionTests.java @@ -88,7 +88,7 @@ public void sendResponse(RestResponse restResponse) { null ); - try (NodeClient client = new NodeClient(Settings.EMPTY, threadPool) { + final var client = new NodeClient(Settings.EMPTY, threadPool) { @Override @SuppressWarnings("unchecked") public void doExecute( @@ -112,24 +112,19 @@ public void doE listener.onFailure(new ElasticsearchSecurityException("encountered an error while creating API key")); } } - }) { - final RestInvalidateApiKeyAction restInvalidateApiKeyAction = new RestInvalidateApiKeyAction(Settings.EMPTY, mockLicenseState); - - restInvalidateApiKeyAction.handleRequest(restRequest, restChannel, client); - - final RestResponse restResponse = responseSetOnce.get(); - assertNotNull(restResponse); - final InvalidateApiKeyResponse actual = InvalidateApiKeyResponse.fromXContent( - createParser(XContentType.JSON.xContent(), restResponse.content()) - ); - assertThat(actual.getInvalidatedApiKeys(), equalTo(invalidateApiKeyResponseExpected.getInvalidatedApiKeys())); - assertThat( - actual.getPreviouslyInvalidatedApiKeys(), - equalTo(invalidateApiKeyResponseExpected.getPreviouslyInvalidatedApiKeys()) - ); - assertThat(actual.getErrors(), equalTo(invalidateApiKeyResponseExpected.getErrors())); - } + }; + final RestInvalidateApiKeyAction restInvalidateApiKeyAction = new RestInvalidateApiKeyAction(Settings.EMPTY, mockLicenseState); + + restInvalidateApiKeyAction.handleRequest(restRequest, restChannel, client); + final RestResponse restResponse = responseSetOnce.get(); + assertNotNull(restResponse); + final InvalidateApiKeyResponse actual = InvalidateApiKeyResponse.fromXContent( + createParser(XContentType.JSON.xContent(), restResponse.content()) + ); + assertThat(actual.getInvalidatedApiKeys(), equalTo(invalidateApiKeyResponseExpected.getInvalidatedApiKeys())); + assertThat(actual.getPreviouslyInvalidatedApiKeys(), equalTo(invalidateApiKeyResponseExpected.getPreviouslyInvalidatedApiKeys())); + assertThat(actual.getErrors(), equalTo(invalidateApiKeyResponseExpected.getErrors())); } public void testInvalidateApiKeyOwnedByCurrentAuthenticatedUser() throws Exception { @@ -165,7 +160,7 @@ public void sendResponse(RestResponse restResponse) { null ); - try (NodeClient client = new NodeClient(Settings.EMPTY, threadPool) { + final var client = new NodeClient(Settings.EMPTY, threadPool) { @SuppressWarnings("unchecked") @Override public void doExecute( @@ -186,25 +181,23 @@ public void doE listener.onResponse((Response) invalidateApiKeyResponseExpectedWhenOwnerFlagIsFalse); } } - }) { - final RestInvalidateApiKeyAction restInvalidateApiKeyAction = new RestInvalidateApiKeyAction(Settings.EMPTY, mockLicenseState); - - restInvalidateApiKeyAction.handleRequest(restRequest, restChannel, client); - - final RestResponse restResponse = responseSetOnce.get(); - assertNotNull(restResponse); - assertThat(restResponse.status(), is(RestStatus.OK)); - final InvalidateApiKeyResponse actual = InvalidateApiKeyResponse.fromXContent( - createParser(XContentType.JSON.xContent(), restResponse.content()) - ); - if (isInvalidateRequestForOwnedKeysOnly) { - assertThat(actual.getInvalidatedApiKeys().size(), is(1)); - assertThat(actual.getInvalidatedApiKeys(), containsInAnyOrder("api-key-id-1")); - } else { - assertThat(actual.getInvalidatedApiKeys().size(), is(2)); - assertThat(actual.getInvalidatedApiKeys(), containsInAnyOrder("api-key-id-1", "api-key-id-2")); - } - } + }; + final RestInvalidateApiKeyAction restInvalidateApiKeyAction = new RestInvalidateApiKeyAction(Settings.EMPTY, mockLicenseState); + + restInvalidateApiKeyAction.handleRequest(restRequest, restChannel, client); + final RestResponse restResponse = responseSetOnce.get(); + assertNotNull(restResponse); + assertThat(restResponse.status(), is(RestStatus.OK)); + final InvalidateApiKeyResponse actual = InvalidateApiKeyResponse.fromXContent( + createParser(XContentType.JSON.xContent(), restResponse.content()) + ); + if (isInvalidateRequestForOwnedKeysOnly) { + assertThat(actual.getInvalidatedApiKeys().size(), is(1)); + assertThat(actual.getInvalidatedApiKeys(), containsInAnyOrder("api-key-id-1")); + } else { + assertThat(actual.getInvalidatedApiKeys().size(), is(2)); + assertThat(actual.getInvalidatedApiKeys(), containsInAnyOrder("api-key-id-1", "api-key-id-2")); + } } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestQueryApiKeyActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestQueryApiKeyActionTests.java index 83e8dbb96a41e..67d2ab006eb22 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestQueryApiKeyActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/apikey/RestQueryApiKeyActionTests.java @@ -102,7 +102,7 @@ public void sendResponse(RestResponse restResponse) { } }; - try (NodeClient client = new NodeClient(Settings.EMPTY, threadPool) { + final var client = new NodeClient(Settings.EMPTY, threadPool) { @SuppressWarnings("unchecked") @Override public void doExecute( @@ -127,10 +127,9 @@ public void doE assertThat(((PrefixQueryBuilder) shouldQueryBuilder).fieldName(), equalTo("metadata.environ")); listener.onResponse((Response) new QueryApiKeyResponse(0, List.of())); } - }) { - final RestQueryApiKeyAction restQueryApiKeyAction = new RestQueryApiKeyAction(Settings.EMPTY, mockLicenseState); - restQueryApiKeyAction.handleRequest(restRequest, restChannel, client); - } + }; + final RestQueryApiKeyAction restQueryApiKeyAction = new RestQueryApiKeyAction(Settings.EMPTY, mockLicenseState); + restQueryApiKeyAction.handleRequest(restRequest, restChannel, client); assertNotNull(responseSetOnce.get()); } @@ -160,7 +159,7 @@ public void sendResponse(RestResponse restResponse) { } }; - try (NodeClient client = new NodeClient(Settings.EMPTY, threadPool) { + final var client = new NodeClient(Settings.EMPTY, threadPool) { @SuppressWarnings("unchecked") @Override public void doExecute( @@ -192,10 +191,10 @@ public void doE listener.onResponse((Response) new QueryApiKeyResponse(0, List.of())); } - }) { - final RestQueryApiKeyAction restQueryApiKeyAction = new RestQueryApiKeyAction(Settings.EMPTY, mockLicenseState); - restQueryApiKeyAction.handleRequest(restRequest, restChannel, client); - } + }; + + final RestQueryApiKeyAction restQueryApiKeyAction = new RestQueryApiKeyAction(Settings.EMPTY, mockLicenseState); + restQueryApiKeyAction.handleRequest(restRequest, restChannel, client); assertNotNull(responseSetOnce.get()); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/role/RestPutRoleActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/role/RestPutRoleActionTests.java index 7a842fd310fea..01219a97e5905 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/role/RestPutRoleActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/role/RestPutRoleActionTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.security.rest.action.role; -import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.license.License; @@ -42,7 +41,8 @@ public void testFailureWhenNativeRolesDisabled() throws Exception { .build(); final FakeRestChannel channel = new FakeRestChannel(request, true, 1); - try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName())) { + try (var threadPool = createThreadPool()) { + final var nodeClient = new NoOpNodeClient(threadPool); action.handleRequest(request, channel, nodeClient); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestGetUserPrivilegesActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestGetUserPrivilegesActionTests.java index af48949f2d318..8423d89f000af 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestGetUserPrivilegesActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestGetUserPrivilegesActionTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.security.rest.action.user; -import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; @@ -55,7 +54,8 @@ public void testSecurityDisabled() throws Exception { ); final FakeRestRequest request = new FakeRestRequest(); final FakeRestChannel channel = new FakeRestChannel(request, true, 1); - try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName())) { + try (var threadPool = createThreadPool()) { + final var nodeClient = new NoOpNodeClient(threadPool); action.handleRequest(request, channel, nodeClient); } assertThat(channel.capturedResponse(), notNullValue()); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestHasPrivilegesActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestHasPrivilegesActionTests.java index d96446eec25c5..56eeb3405875c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestHasPrivilegesActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestHasPrivilegesActionTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.security.rest.action.user; import org.elasticsearch.ElasticsearchSecurityException; -import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.license.License; @@ -43,10 +42,8 @@ public class RestHasPrivilegesActionTests extends ESTestCase { public void testBodyConsumed() throws Exception { final XPackLicenseState licenseState = mock(XPackLicenseState.class); final RestHasPrivilegesAction action = new RestHasPrivilegesAction(Settings.EMPTY, mock(SecurityContext.class), licenseState); - try ( - XContentBuilder bodyBuilder = JsonXContent.contentBuilder().startObject().endObject(); - NodeClient client = new NoOpNodeClient(this.getTestName()) - ) { + try (XContentBuilder bodyBuilder = JsonXContent.contentBuilder().startObject().endObject(); var threadPool = createThreadPool()) { + final var client = new NoOpNodeClient(threadPool); final RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withPath("/_security/user/_has_privileges/") .withContent(new BytesArray(bodyBuilder.toString()), XContentType.JSON) .build(); @@ -68,10 +65,8 @@ public void testSecurityDisabled() throws Exception { mock(SecurityContext.class), licenseState ); - try ( - XContentBuilder bodyBuilder = JsonXContent.contentBuilder().startObject().endObject(); - NodeClient client = new NoOpNodeClient(this.getTestName()) - ) { + try (XContentBuilder bodyBuilder = JsonXContent.contentBuilder().startObject().endObject(); var threadPool = createThreadPool()) { + final var client = new NoOpNodeClient(threadPool); final RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withPath("/_security/user/_has_privileges/") .withContent(new BytesArray(bodyBuilder.toString()), XContentType.JSON) .build(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestPutUserActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestPutUserActionTests.java index 603e37440da0c..a2a9cbb7d092d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestPutUserActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/action/user/RestPutUserActionTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.security.rest.action.user; -import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.license.License; @@ -42,7 +41,8 @@ public void testFailureWhenNativeUsersDisabled() throws Exception { .build(); final FakeRestChannel channel = new FakeRestChannel(request, true, 1); - try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName())) { + try (var threadPool = createThreadPool()) { + final var nodeClient = new NoOpNodeClient(threadPool); action.handleRequest(request, channel, nodeClient); } diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTaskTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTaskTests.java index 0ab2adfc44639..729cb8ef47292 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTaskTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTaskTests.java @@ -106,13 +106,11 @@ public void testSkipCreatingSnapshotWhenJobDoesNotMatch() { Settings.EMPTY, Sets.union(ClusterSettings.BUILT_IN_CLUSTER_SETTINGS, Set.of(SLM_HISTORY_INDEX_ENABLED_SETTING)) ); - try ( - ClusterService clusterService = ClusterServiceUtils.createClusterService(state, threadPool, settings); + try (ClusterService clusterService = ClusterServiceUtils.createClusterService(state, threadPool, settings)) { VerifyingClient client = new VerifyingClient(threadPool, (a, r, l) -> { fail("should not have tried to take a snapshot"); return null; - }) - ) { + }); SnapshotHistoryStore historyStore = new VerifyingHistoryStore( null, clusterService, @@ -173,8 +171,7 @@ public void testCreateSnapshotOnTrigger() { final AtomicBoolean clientCalled = new AtomicBoolean(false); final SetOnce snapshotName = new SetOnce<>(); - try ( - ClusterService clusterService = ClusterServiceUtils.createClusterService(state, threadPool, settings); + try (ClusterService clusterService = ClusterServiceUtils.createClusterService(state, threadPool, settings)) { // This verifying client will verify that we correctly invoked // client.admin().createSnapshot(...) with the appropriate // request. It also returns a mock real response @@ -202,8 +199,7 @@ public void testCreateSnapshotOnTrigger() { fail("failed to parse snapshot response"); return null; } - }) - ) { + }); final AtomicBoolean historyStoreCalled = new AtomicBoolean(false); SnapshotHistoryStore historyStore = new VerifyingHistoryStore(null, clusterService, item -> { assertFalse(historyStoreCalled.getAndSet(true)); @@ -247,8 +243,7 @@ public void testPartialFailureSnapshot() throws Exception { ); final AtomicBoolean clientCalled = new AtomicBoolean(false); final SetOnce snapshotName = new SetOnce<>(); - try ( - ClusterService clusterService = ClusterServiceUtils.createClusterService(state, threadPool, settings); + try (ClusterService clusterService = ClusterServiceUtils.createClusterService(state, threadPool, settings)) { VerifyingClient client = new VerifyingClient(threadPool, (action, request, listener) -> { assertFalse(clientCalled.getAndSet(true)); assertThat(action, instanceOf(CreateSnapshotAction.class)); @@ -285,8 +280,8 @@ public void testPartialFailureSnapshot() throws Exception { Collections.emptyMap() ) ); - }) - ) { + }); + final AtomicBoolean historyStoreCalled = new AtomicBoolean(false); SnapshotHistoryStore historyStore = new VerifyingHistoryStore(null, clusterService, item -> { assertFalse(historyStoreCalled.getAndSet(true)); diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionTaskTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionTaskTests.java index 5934123ca1bae..5ba343de28752 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionTaskTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionTaskTests.java @@ -135,9 +135,9 @@ private void retentionTaskTest(final boolean deletionSuccess) throws Exception { ); try ( ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool, settings); - Client noOpClient = new NoOpClient("slm-test") + var clientThreadPool = createThreadPool() ) { - + final var noOpClient = new NoOpClient(clientThreadPool); final String policyId = "policy"; final String repoId = "repo"; SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy( @@ -232,7 +232,9 @@ public void testErrStillRunsFailureHandlerWhenRetrieving() throws Exception { final String repoId = "repo"; try ( ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool, settings); - Client noOpClient = new NoOpClient("slm-test") { + var clientThreadPool = createThreadPool() + ) { + final var noOpClient = new NoOpClient(clientThreadPool) { @Override @SuppressWarnings("unchecked") @@ -248,8 +250,7 @@ protected void super.doExecute(action, request, listener); } } - } - ) { + }; SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy( policyId, "snap", @@ -307,8 +308,9 @@ public void testErrStillRunsFailureHandlerWhenDeleting() throws Exception { ); try ( ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool, settings); - Client noOpClient = new NoOpClient("slm-test") { - + var clientThreadPool = createThreadPool() + ) { + final var noOpClient = new NoOpClient(clientThreadPool) { @Override @SuppressWarnings("unchecked") protected void doExecute( @@ -323,8 +325,7 @@ protected void super.doExecute(action, request, listener); } } - } - ) { + }; final String policyId = "policy"; final String repoId = "repo"; SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy( @@ -393,8 +394,9 @@ private void doTestSkipDuringMode(OperationMode mode) throws Exception { ); try ( ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool, settings); - Client noOpClient = new NoOpClient("slm-test") + var clientThreadPool = createThreadPool() ) { + final var noOpClient = new NoOpClient(clientThreadPool); final String policyId = "policy"; final String repoId = "repo"; SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy( @@ -449,8 +451,9 @@ private void doTestRunManuallyDuringMode(OperationMode mode) throws Exception { ); try ( ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool, settings); - Client noOpClient = new NoOpClient("slm-test") + var clientThreadPool = createThreadPool() ) { + final var noOpClient = new NoOpClient(clientThreadPool); final String policyId = "policy"; final String repoId = "repo"; SnapshotLifecyclePolicy policy = new SnapshotLifecyclePolicy( diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/history/SnapshotHistoryStoreTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/history/SnapshotHistoryStoreTests.java index cb31c282b73e4..6b2e23594ec3f 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/history/SnapshotHistoryStoreTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/history/SnapshotHistoryStoreTests.java @@ -68,7 +68,6 @@ public void setup() { public void tearDown() throws Exception { super.tearDown(); clusterService.stop(); - client.close(); threadPool.shutdownNow(); } diff --git a/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/RestSqlCancellationIT.java b/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/RestSqlCancellationIT.java index 0100634766bfe..48ee5b05ffe0e 100644 --- a/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/RestSqlCancellationIT.java +++ b/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/RestSqlCancellationIT.java @@ -183,8 +183,4 @@ private static String queryAsJson(String query) throws IOException { return out.bytes().utf8ToString(); } - @Override - protected boolean ignoreExternalCluster() { - return true; - } } diff --git a/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/SqlLicenseIT.java b/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/SqlLicenseIT.java index 3e0841873b6db..0374818d7e3b5 100644 --- a/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/SqlLicenseIT.java +++ b/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/SqlLicenseIT.java @@ -36,10 +36,6 @@ @LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37320") public class SqlLicenseIT extends AbstractLicensesIntegrationTestCase { - @Override - protected boolean ignoreExternalCluster() { - return true; - } @Before public void resetLicensing() throws Exception { diff --git a/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformCheckpointServiceNodeTests.java b/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformCheckpointServiceNodeTests.java index 9b6b67e76c01c..366f3e6f917bf 100644 --- a/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformCheckpointServiceNodeTests.java +++ b/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformCheckpointServiceNodeTests.java @@ -48,6 +48,8 @@ import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ActionNotFoundTransportException; import org.elasticsearch.xpack.core.transform.action.GetCheckpointAction; import org.elasticsearch.xpack.core.transform.transforms.TransformCheckpoint; @@ -82,6 +84,7 @@ public class TransformCheckpointServiceNodeTests extends TransformSingleNodeTest // re-use the mock client for the whole test suite as the underlying thread pool and the // corresponding context if recreated cause unreliable test execution // see https://github.com/elastic/elasticsearch/issues/45238 and https://github.com/elastic/elasticsearch/issues/42577 + private static TestThreadPool threadPool; private static MockClientForCheckpointing mockClientForCheckpointing = null; private IndexBasedTransformConfigManager transformsConfigManager; @@ -96,11 +99,10 @@ private class MockClientForCheckpointing extends NoOpClient { /** * Mock client for checkpointing * - * @param testName name of the test, used for naming the threadpool * @param supportTransformCheckpointApi whether to mock the checkpoint API, if false throws action not found */ - MockClientForCheckpointing(String testName, boolean supportTransformCheckpointApi) { - super(testName); + MockClientForCheckpointing(ThreadPool threadPool, boolean supportTransformCheckpointApi) { + super(threadPool); this.supportTransformCheckpointApi = supportTransformCheckpointApi; } @@ -156,8 +158,11 @@ protected void @Before public void createComponents() { // it's not possible to run it as @BeforeClass as clients aren't initialized + if (threadPool == null) { + threadPool = new TestThreadPool("TransformCheckpointServiceNodeTests"); + } if (mockClientForCheckpointing == null) { - mockClientForCheckpointing = new MockClientForCheckpointing("TransformCheckpointServiceNodeTests", randomBoolean()); + mockClientForCheckpointing = new MockClientForCheckpointing(threadPool, randomBoolean()); } ClusterService clusterService = mock(ClusterService.class); transformsConfigManager = new IndexBasedTransformConfigManager( @@ -185,8 +190,9 @@ public void createComponents() { @AfterClass public static void tearDownClient() { - mockClientForCheckpointing.close(); mockClientForCheckpointing = null; + threadPool.close(); + threadPool = null; } public void testCreateReadDeleteCheckpoint() throws InterruptedException { diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransformPrivilegeCheckerTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransformPrivilegeCheckerTests.java index 5dd01a32d5c43..cab7377695b0a 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransformPrivilegeCheckerTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransformPrivilegeCheckerTests.java @@ -23,7 +23,6 @@ import org.elasticsearch.indices.TestIndexNameExpressionResolver; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpClient; -import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.security.SecurityContext; import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesRequest; @@ -90,11 +89,8 @@ public class TransformPrivilegeCheckerTests extends ESTestCase { @Before public void setupClient() { - if (client != null) { - client.close(); - } - client = new MyMockClient(getTestName()); - threadPool = new TestThreadPool("transform_privilege_checker_tests"); + threadPool = createThreadPool(); + client = new MyMockClient(threadPool); securityContext = new SecurityContext(Settings.EMPTY, threadPool.getThreadContext()) { public User getUser() { return new User(USER_NAME); @@ -104,7 +100,6 @@ public User getUser() { @After public void tearDownClient() { - client.close(); threadPool.shutdown(); } @@ -404,8 +399,8 @@ private static class MyMockClient extends NoOpClient { emptyMap() ); - MyMockClient(String testName) { - super(testName); + MyMockClient(ThreadPool threadPool) { + super(threadPool); } @SuppressWarnings("unchecked") diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransformUpdaterTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransformUpdaterTests.java index 27ca18d0e1d2f..fa957a2ac89cf 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransformUpdaterTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransformUpdaterTests.java @@ -26,6 +26,8 @@ import org.elasticsearch.indices.TestIndexNameExpressionResolver; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.core.security.SecurityContext; @@ -73,6 +75,7 @@ public class TransformUpdaterTests extends ESTestCase { private static final String JOHN = "john"; private final SecurityContext johnSecurityContext = newSecurityContextFor(JOHN); private final IndexNameExpressionResolver indexNameExpressionResolver = TestIndexNameExpressionResolver.newInstance(); + private TestThreadPool threadPool; private Client client; private ClusterService clusterService = mock(ClusterService.class); private TransformAuditor auditor = new MockTransformAuditor(clusterService); @@ -81,8 +84,8 @@ public class TransformUpdaterTests extends ESTestCase { private static class MyMockClient extends NoOpClient { - MyMockClient(String testName) { - super(testName); + MyMockClient(ThreadPool threadPool) { + super(threadPool); } @SuppressWarnings("unchecked") @@ -116,17 +119,18 @@ protected void @Before public void setupClient() { - if (client != null) { - client.close(); + if (threadPool != null) { + threadPool.close(); } - client = new MyMockClient(getTestName()); + threadPool = createThreadPool(); + client = new MyMockClient(threadPool); clusterService = mock(ClusterService.class); auditor = new MockTransformAuditor(clusterService); } @After public void tearDownClient() { - client.close(); + threadPool.close(); } public void testTransformUpdateNoAction() throws InterruptedException { diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java index a850c7beef7dd..06de37af346d2 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java @@ -127,7 +127,8 @@ public void testPitInjection() throws InterruptedException { new SettingsConfig.Builder().setUsePit(true).build() ).build(); - try (PitMockClient client = new PitMockClient(getTestName(), true)) { + try (var threadPool = createThreadPool()) { + final var client = new PitMockClient(threadPool, true); MockClientTransformIndexer indexer = new MockClientTransformIndexer( mock(ThreadPool.class), new TransformServices( @@ -220,7 +221,8 @@ public void testPitInjectionIfPitNotSupported() throws InterruptedException { new SettingsConfig.Builder().setUsePit(true).build() ).build(); - try (PitMockClient client = new PitMockClient(getTestName(), false)) { + try (var threadPool = createThreadPool()) { + final var client = new PitMockClient(threadPool, false); MockClientTransformIndexer indexer = new MockClientTransformIndexer( mock(ThreadPool.class), new TransformServices( @@ -296,7 +298,8 @@ public void testDisablePit() throws InterruptedException { TransformConfig config = TransformConfigTests.randomTransformConfig(); boolean pitEnabled = config.getSettings().getUsePit() == null || config.getSettings().getUsePit(); - try (PitMockClient client = new PitMockClient(getTestName(), true)) { + try (var threadPool = createThreadPool()) { + final var client = new PitMockClient(threadPool, true); MockClientTransformIndexer indexer = new MockClientTransformIndexer( mock(ThreadPool.class), new TransformServices( @@ -359,7 +362,8 @@ public void testDisablePitWhenThereIsRemoteIndexInSource() throws InterruptedExc .build(); boolean pitEnabled = config.getSettings().getUsePit() == null || config.getSettings().getUsePit(); - try (PitMockClient client = new PitMockClient(getTestName(), true)) { + try (var threadPool = createThreadPool()) { + final var client = new PitMockClient(threadPool, true); MockClientTransformIndexer indexer = new MockClientTransformIndexer( mock(ThreadPool.class), new TransformServices( @@ -413,7 +417,8 @@ public void testDisablePitWhenThereIsRemoteIndexInSource() throws InterruptedExc public void testHandlePitIndexNotFound() throws InterruptedException { // simulate a deleted index due to ILM - try (PitMockClient client = new PitMockClient(getTestName(), true)) { + try (var threadPool = createThreadPool()) { + final var client = new PitMockClient(threadPool, true); ClientTransformIndexer indexer = createTestIndexer(new ParentTaskAssigningClient(client, new TaskId("dummy-node:123456"))); SearchRequest searchRequest = new SearchRequest("deleted-index"); searchRequest.source().pointInTimeBuilder(new PointInTimeBuilder("the_pit_id")); @@ -425,7 +430,8 @@ public void testHandlePitIndexNotFound() throws InterruptedException { } // simulate a deleted index that is essential, search must fail (after a retry without pit) - try (PitMockClient client = new PitMockClient(getTestName(), true)) { + try (var threadPool = createThreadPool()) { + final var client = new PitMockClient(threadPool, true); ClientTransformIndexer indexer = createTestIndexer(new ParentTaskAssigningClient(client, new TaskId("dummy-node:123456"))); SearchRequest searchRequest = new SearchRequest("essential-deleted-index"); searchRequest.source().pointInTimeBuilder(new PointInTimeBuilder("the_pit_id")); @@ -483,8 +489,8 @@ private static class PitMockClient extends NoOpClient { private final boolean pitSupported; private AtomicLong pitContextCounter = new AtomicLong(); - PitMockClient(String testName, boolean pitSupported) { - super(testName); + PitMockClient(ThreadPool threadPool, boolean pitSupported) { + super(threadPool); this.pitSupported = pitSupported; } diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureHandlingTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureHandlingTests.java index 161f364b6e7e4..f59aaab33f0f1 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureHandlingTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureHandlingTests.java @@ -35,7 +35,6 @@ import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpClient; -import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.common.notifications.Level; import org.elasticsearch.xpack.core.indexing.IndexerState; @@ -274,13 +273,12 @@ protected void persistState(TransformState state, ActionListener listener) @Before public void setUpMocks() { - client = new NoOpClient(getTestName()); - threadPool = new TestThreadPool(getTestName()); + threadPool = createThreadPool(); + client = new NoOpClient(threadPool); } @After public void tearDownClient() { - client.close(); ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); } diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureOnStatePersistenceTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureOnStatePersistenceTests.java index aeb94cd2c2f66..33ced92a8fa19 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureOnStatePersistenceTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureOnStatePersistenceTests.java @@ -10,7 +10,6 @@ import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; -import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.ParentTaskAssigningClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Tuple; @@ -209,7 +208,8 @@ public void fail(String failureMessage, ActionListener listener) { ? new VersionConflictEngineException(new ShardId("index", "indexUUID", 42), "some_id", 45L, 44L, 43L, 42L) : new ElasticsearchTimeoutException("timeout"); TransformConfigManager configManager = new FailingToPutStoredDocTransformConfigManager(Set.of(0, 1, 2, 3), exceptionToThrow); - try (Client client = new NoOpClient(getTestName())) { + try (var threadPool = createThreadPool()) { + final var client = new NoOpClient(threadPool); MockClientTransformIndexer indexer = new MockClientTransformIndexer( mock(ThreadPool.class), @@ -292,7 +292,8 @@ public void fail(String failureMessage, ActionListener listener) { ? new VersionConflictEngineException(new ShardId("index", "indexUUID", 42), "some_id", 45L, 44L, 43L, 42L) : new ElasticsearchTimeoutException("timeout"); TransformConfigManager configManager = new FailingToPutStoredDocTransformConfigManager(Set.of(0, 2, 3, 4), exceptionToThrow); - try (Client client = new NoOpClient(getTestName())) { + try (var threadPool = createThreadPool()) { + final var client = new NoOpClient(threadPool); MockClientTransformIndexer indexer = new MockClientTransformIndexer( mock(ThreadPool.class), new TransformServices( @@ -422,7 +423,8 @@ public void fail(String failureMessage, ActionListener listener) { TransformContext context = new TransformContext(state.get(), null, 0, contextListener); TransformConfigManager configManager = new SeqNoCheckingTransformConfigManager(); - try (Client client = new NoOpClient(getTestName())) { + try (var threadPool = createThreadPool()) { + final var client = new NoOpClient(threadPool); MockClientTransformIndexer indexer = new MockClientTransformIndexer( mock(ThreadPool.class), new TransformServices( diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerStateTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerStateTests.java index 33a72fd1e0181..638a66fa3fb0d 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerStateTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerStateTests.java @@ -351,13 +351,12 @@ public void initialize() { public void setUpMocks() { auditor = MockTransformAuditor.createMockAuditor(); transformConfigManager = new InMemoryTransformConfigManager(); - client = new NoOpClient(getTestName()); threadPool = new TestThreadPool(ThreadPool.Names.GENERIC); + client = new NoOpClient(threadPool); } @After public void tearDownClient() { - client.close(); ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); } diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerTests.java index 41fcd84af2fcc..6406308312f04 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerTests.java @@ -280,13 +280,12 @@ void validate(ActionListener listener) { public void setUpMocks() { auditor = MockTransformAuditor.createMockAuditor(); transformConfigManager = new InMemoryTransformConfigManager(); - client = new NoOpClient(getTestName()); threadPool = new TestThreadPool(ThreadPool.Names.GENERIC); + client = new NoOpClient(threadPool); } @After public void tearDownClient() { - client.close(); ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); } diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformTaskTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformTaskTests.java index 4d5807913636d..277553cd9f4ec 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformTaskTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformTaskTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.core.transform.TransformConfigVersion; @@ -69,19 +70,21 @@ public class TransformTaskTests extends ESTestCase { + private TestThreadPool threadPool; private Client client; @Before public void setupClient() { - if (client != null) { - client.close(); + if (threadPool != null) { + threadPool.close(); } - client = new NoOpClient(getTestName()); + threadPool = createThreadPool(); + client = new NoOpClient(threadPool); } @After public void tearDownClient() { - client.close(); + threadPool.close(); } // see https://github.com/elastic/elasticsearch/issues/48957 diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationSchemaAndResultTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationSchemaAndResultTests.java index b93c2544da456..a2dda2a1603f1 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationSchemaAndResultTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationSchemaAndResultTests.java @@ -25,6 +25,8 @@ import org.elasticsearch.search.aggregations.metrics.Percentile; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.transform.transforms.pivot.AggregationConfig; import org.elasticsearch.xpack.core.transform.transforms.pivot.GroupConfig; import org.elasticsearch.xpack.core.transform.transforms.pivot.GroupConfigTests; @@ -48,25 +50,27 @@ public class AggregationSchemaAndResultTests extends ESTestCase { + private TestThreadPool threadPool; private Client client; @Before public void setupClient() { - if (client != null) { - client.close(); + if (threadPool != null) { + threadPool.close(); } - client = new MyMockClient(getTestName()); + threadPool = createThreadPool(); + client = new MyMockClient(threadPool); } @After public void tearDownClient() { - client.close(); + threadPool.close(); } private class MyMockClient extends NoOpClient { - MyMockClient(String testName) { - super(testName); + MyMockClient(ThreadPool threadPool) { + super(threadPool); } @SuppressWarnings("unchecked") diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/PivotTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/PivotTests.java index 40af93c35cd39..37bee4a4eb999 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/PivotTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/PivotTests.java @@ -30,6 +30,8 @@ import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregation; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; @@ -82,6 +84,7 @@ public class PivotTests extends ESTestCase { private NamedXContentRegistry namedXContentRegistry; + private TestThreadPool threadPool; private Client client; // exclude aggregations from the analytics module as we don't have parser for it here @@ -102,15 +105,16 @@ public void registerAggregationNamedObjects() throws Exception { @Before public void setupClient() { - if (client != null) { - client.close(); + if (threadPool != null) { + threadPool.close(); } - client = new MyMockClient(getTestName()); + threadPool = createThreadPool(); + client = new MyMockClient(threadPool); } @After public void tearDownClient() { - client.close(); + threadPool.close(); } @Override @@ -279,17 +283,17 @@ public void testPreviewForEmptyAggregation() throws Exception { final AtomicReference exceptionHolder = new AtomicReference<>(); final AtomicReference>> responseHolder = new AtomicReference<>(); - Client emptyAggregationClient = new MyMockClientWithEmptyAggregation("empty aggregation test for preview"); - pivot.preview(emptyAggregationClient, null, new HashMap<>(), new SourceConfig("test"), null, 1, ActionListener.wrap(r -> { - responseHolder.set(r); - latch.countDown(); - }, e -> { - exceptionHolder.set(e); - latch.countDown(); - })); - assertTrue(latch.await(100, TimeUnit.MILLISECONDS)); - emptyAggregationClient.close(); - + try (var threadPool = createThreadPool()) { + final var emptyAggregationClient = new MyMockClientWithEmptyAggregation(threadPool); + pivot.preview(emptyAggregationClient, null, new HashMap<>(), new SourceConfig("test"), null, 1, ActionListener.wrap(r -> { + responseHolder.set(r); + latch.countDown(); + }, e -> { + exceptionHolder.set(e); + latch.countDown(); + })); + assertTrue(latch.await(100, TimeUnit.MILLISECONDS)); + } assertThat(exceptionHolder.get(), is(nullValue())); assertThat(responseHolder.get(), is(empty())); } @@ -306,16 +310,17 @@ public void testPreviewForCompositeAggregation() throws Exception { final AtomicReference exceptionHolder = new AtomicReference<>(); final AtomicReference>> responseHolder = new AtomicReference<>(); - Client compositeAggregationClient = new MyMockClientWithCompositeAggregation("composite aggregation test for preview"); - pivot.preview(compositeAggregationClient, null, new HashMap<>(), new SourceConfig("test"), null, 1, ActionListener.wrap(r -> { - responseHolder.set(r); - latch.countDown(); - }, e -> { - exceptionHolder.set(e); - latch.countDown(); - })); - assertTrue(latch.await(100, TimeUnit.MILLISECONDS)); - compositeAggregationClient.close(); + try (var threadPool = createThreadPool()) { + final var compositeAggregationClient = new MyMockClientWithCompositeAggregation(threadPool); + pivot.preview(compositeAggregationClient, null, new HashMap<>(), new SourceConfig("test"), null, 1, ActionListener.wrap(r -> { + responseHolder.set(r); + latch.countDown(); + }, e -> { + exceptionHolder.set(e); + latch.countDown(); + })); + assertTrue(latch.await(100, TimeUnit.MILLISECONDS)); + } assertThat(exceptionHolder.get(), is(nullValue())); assertThat(responseHolder.get(), is(empty())); @@ -328,8 +333,8 @@ private static SearchResponse searchResponseFromAggs(Aggregations aggs) { } private class MyMockClient extends NoOpClient { - MyMockClient(String testName) { - super(testName); + MyMockClient(ThreadPool threadPool) { + super(threadPool); } @SuppressWarnings("unchecked") @@ -383,8 +388,8 @@ protected void } private class MyMockClientWithEmptyAggregation extends NoOpClient { - MyMockClientWithEmptyAggregation(String testName) { - super(testName); + MyMockClientWithEmptyAggregation(ThreadPool threadPool) { + super(threadPool); } @SuppressWarnings("unchecked") @@ -401,8 +406,8 @@ protected void } private class MyMockClientWithCompositeAggregation extends NoOpClient { - MyMockClientWithCompositeAggregation(String testName) { - super(testName); + MyMockClientWithCompositeAggregation(ThreadPool threadPool) { + super(threadPool); } @SuppressWarnings("unchecked") diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/SchemaUtilTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/SchemaUtilTests.java index 37206f20d1269..778ca4bf7767d 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/SchemaUtilTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/SchemaUtilTests.java @@ -16,10 +16,10 @@ import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; import org.elasticsearch.action.support.ActionTestUtils; -import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.Strings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.threadpool.ThreadPool; import java.math.BigInteger; import java.util.Collections; @@ -96,7 +96,8 @@ public void testConvertToIntegerTypeIfNeeded() { } public void testGetSourceFieldMappings() throws InterruptedException { - try (Client client = new FieldCapsMockClient(getTestName())) { + try (var threadPool = createThreadPool()) { + final var client = new FieldCapsMockClient(threadPool); // fields is null this.>assertAsync( listener -> SchemaUtil.getSourceFieldMappings( @@ -188,7 +189,8 @@ public void testGetSourceFieldMappingsWithRuntimeMappings() throws InterruptedEx put("field-3", singletonMap("type", "boolean")); } }; - try (Client client = new FieldCapsMockClient(getTestName())) { + try (var threadPool = createThreadPool()) { + final var client = new FieldCapsMockClient(threadPool); this.>assertAsync( listener -> SchemaUtil.getSourceFieldMappings( client, @@ -210,8 +212,8 @@ public void testGetSourceFieldMappingsWithRuntimeMappings() throws InterruptedEx } private static class FieldCapsMockClient extends NoOpClient { - FieldCapsMockClient(String testName) { - super(testName); + FieldCapsMockClient(ThreadPool threadPool) { + super(threadPool); } @SuppressWarnings("unchecked") diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java index a46b42e2153bd..a32edf1a0545c 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java @@ -19,6 +19,7 @@ import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.OriginSettingClient; +import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexTemplateMetadata; @@ -423,7 +424,7 @@ public void afterBulk(long executionId, BulkRequest request, BulkResponse respon .collect(Collectors.toMap(BulkItemResponse::getId, BulkItemResponse::getFailureMessage)); Map historyFailures = Arrays.stream(response.getItems()) .filter(BulkItemResponse::isFailed) - .filter(r -> r.getIndex().startsWith(HistoryStoreField.INDEX_PREFIX)) + .filter(r -> r.getIndex().startsWith(DataStream.BACKING_INDEX_PREFIX + HistoryStoreField.DATA_STREAM)) .collect(Collectors.toMap(BulkItemResponse::getId, BulkItemResponse::getFailureMessage)); if (triggeredFailures.isEmpty() == false) { String failure = String.join(", ", triggeredFailures.values()); @@ -444,7 +445,7 @@ public void afterBulk(long executionId, BulkRequest request, BulkResponse respon Map overwrittenIds = Arrays.stream(response.getItems()) .filter(BulkItemResponse::isFailed) - .filter(r -> r.getIndex().startsWith(HistoryStoreField.INDEX_PREFIX)) + .filter(r -> r.getIndex().startsWith(DataStream.BACKING_INDEX_PREFIX + HistoryStoreField.DATA_STREAM)) .filter(r -> r.getVersion() > 1) .collect(Collectors.toMap(BulkItemResponse::getId, BulkItemResponse::getFailureMessage)); if (overwrittenIds.isEmpty() == false) { diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherScheduleEngineBenchmark.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherScheduleEngineBenchmark.java index cbe233f0d911d..f0fc8686840e1 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherScheduleEngineBenchmark.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/bench/WatcherScheduleEngineBenchmark.java @@ -107,44 +107,43 @@ public static void main(String[] args) throws Exception { ) ).start() ) { - try (Client client = node.client()) { - ClusterHealthResponse response = client.admin().cluster().prepareHealth().setWaitForNodes("2").get(); - if (response.getNumberOfNodes() != 2 && response.getNumberOfDataNodes() != 1) { - throw new IllegalStateException("This benchmark needs one extra data only node running outside this benchmark"); - } + final Client client = node.client(); + ClusterHealthResponse response = client.admin().cluster().prepareHealth().setWaitForNodes("2").get(); + if (response.getNumberOfNodes() != 2 && response.getNumberOfDataNodes() != 1) { + throw new IllegalStateException("This benchmark needs one extra data only node running outside this benchmark"); + } - client.admin().indices().prepareDelete("_all").get(); - client.admin().indices().prepareCreate("test").get(); - client.prepareIndex().setIndex("test").setId("1").setSource("{}", XContentType.JSON).get(); - - System.out.println("===============> indexing [" + numWatches + "] watches"); - for (int i = 0; i < numWatches; i++) { - final String id = "_id_" + i; - client.prepareIndex() - .setIndex(Watch.INDEX) - .setId(id) - .setSource( - new WatchSourceBuilder().trigger(schedule(interval(interval + "s"))) - .input(searchInput(templateRequest(new SearchSourceBuilder(), "test"))) - .condition( - new ScriptCondition( - new Script( - ScriptType.INLINE, - Script.DEFAULT_SCRIPT_LANG, - "ctx.payload.hits.total.value > 0", - emptyMap() - ) + client.admin().indices().prepareDelete("_all").get(); + client.admin().indices().prepareCreate("test").get(); + client.prepareIndex().setIndex("test").setId("1").setSource("{}", XContentType.JSON).get(); + + System.out.println("===============> indexing [" + numWatches + "] watches"); + for (int i = 0; i < numWatches; i++) { + final String id = "_id_" + i; + client.prepareIndex() + .setIndex(Watch.INDEX) + .setId(id) + .setSource( + new WatchSourceBuilder().trigger(schedule(interval(interval + "s"))) + .input(searchInput(templateRequest(new SearchSourceBuilder(), "test"))) + .condition( + new ScriptCondition( + new Script( + ScriptType.INLINE, + Script.DEFAULT_SCRIPT_LANG, + "ctx.payload.hits.total.value > 0", + emptyMap() ) ) - .addAction("logging", ActionBuilders.loggingAction("test").setLevel(LoggingLevel.TRACE)) - .buildAsBytes(XContentType.JSON), - XContentType.JSON - ) - .get(); - } - client.admin().indices().prepareFlush(Watch.INDEX, "test").get(); - System.out.println("===============> indexed [" + numWatches + "] watches"); + ) + .addAction("logging", ActionBuilders.loggingAction("test").setLevel(LoggingLevel.TRACE)) + .buildAsBytes(XContentType.JSON), + XContentType.JSON + ) + .get(); } + client.admin().indices().prepareFlush(Watch.INDEX, "test").get(); + System.out.println("===============> indexed [" + numWatches + "] watches"); } // Now for each scheduler impl run the benchmark @@ -160,90 +159,89 @@ public static void main(String[] args) throws Exception { .put("node.data", false) .build(); try (Node node = new MockNode(settings, Arrays.asList(LocalStateWatcher.class))) { - try (Client client = node.client()) { - client.admin().cluster().prepareHealth().setWaitForNodes("2").get(); - client.admin().indices().prepareDelete(HistoryStoreField.DATA_STREAM + "*").get(); - client.admin().cluster().prepareHealth(Watch.INDEX, "test").setWaitForYellowStatus().get(); - - Clock clock = node.injector().getInstance(Clock.class); - while (new WatcherStatsRequestBuilder(client).get() - .getNodes() - .stream() - .allMatch(r -> r.getWatcherState() == WatcherState.STARTED) == false) { - Thread.sleep(100); - } - long actualLoadedWatches = new WatcherStatsRequestBuilder(client).get().getWatchesCount(); - if (actualLoadedWatches != numWatches) { - throw new IllegalStateException( - "Expected [" - + numWatches - + "] watched to be loaded, but only [" - + actualLoadedWatches - + "] watches were actually loaded" - ); - } - long startTime = clock.millis(); - System.out.println("==> watcher started, waiting [" + benchTime + "] seconds now..."); - - final AtomicBoolean start = new AtomicBoolean(true); - final MeanMetric jvmUsedHeapSpace = new MeanMetric(); - Thread sampleThread = new Thread(new Runnable() { - @Override - public void run() { - try { - while (start.get()) { - NodesStatsResponse response = client.admin().cluster().prepareNodesStats("_master").setJvm(true).get(); - ByteSizeValue heapUsed = response.getNodes().get(0).getJvm().getMem().getHeapUsed(); - jvmUsedHeapSpace.inc(heapUsed.getBytes()); - Thread.sleep(1000); - } - } catch (InterruptedException ignored) {} - } - }); - sampleThread.start(); - Thread.sleep(benchTime); - long endTime = clock.millis(); - start.set(false); - sampleThread.join(); - - NodesStatsResponse response = client.admin().cluster().prepareNodesStats().setThreadPool(true).get(); - for (NodeStats nodeStats : response.getNodes()) { - for (ThreadPoolStats.Stats threadPoolStats : nodeStats.getThreadPool()) { - if ("watcher".equals(threadPoolStats.name())) { - stats.setWatcherThreadPoolStats(threadPoolStats); - } - } - } - client.admin().indices().prepareRefresh(HistoryStoreField.DATA_STREAM + "*").get(); - Script script = new Script( - ScriptType.INLINE, - Script.DEFAULT_SCRIPT_LANG, - "doc['trigger_event.schedule.triggered_time'].value - doc['trigger_event.schedule.scheduled_time'].value", - emptyMap() + final Client client = node.client(); + client.admin().cluster().prepareHealth().setWaitForNodes("2").get(); + client.admin().indices().prepareDelete(HistoryStoreField.DATA_STREAM + "*").get(); + client.admin().cluster().prepareHealth(Watch.INDEX, "test").setWaitForYellowStatus().get(); + + Clock clock = node.injector().getInstance(Clock.class); + while (new WatcherStatsRequestBuilder(client).get() + .getNodes() + .stream() + .allMatch(r -> r.getWatcherState() == WatcherState.STARTED) == false) { + Thread.sleep(100); + } + long actualLoadedWatches = new WatcherStatsRequestBuilder(client).get().getWatchesCount(); + if (actualLoadedWatches != numWatches) { + throw new IllegalStateException( + "Expected [" + + numWatches + + "] watched to be loaded, but only [" + + actualLoadedWatches + + "] watches were actually loaded" ); - SearchResponse searchResponse = client.prepareSearch(HistoryStoreField.DATA_STREAM + "*") - .setQuery(QueryBuilders.rangeQuery("trigger_event.schedule.scheduled_time").gte(startTime).lte(endTime)) - .addAggregation(terms("state").field("state")) - .addAggregation(histogram("delay").script(script).interval(10)) - .addAggregation(percentiles("percentile_delay").script(script).percentiles(1.0, 20.0, 50.0, 80.0, 99.0)) - .get(); - Terms terms = searchResponse.getAggregations().get("state"); - stats.setStateStats(terms); - Histogram histogram = searchResponse.getAggregations().get("delay"); - stats.setDelayStats(histogram); - System.out.println("===> State"); - for (Terms.Bucket bucket : terms.getBuckets()) { - System.out.println("\t" + bucket.getKey() + "=" + bucket.getDocCount()); + } + long startTime = clock.millis(); + System.out.println("==> watcher started, waiting [" + benchTime + "] seconds now..."); + + final AtomicBoolean start = new AtomicBoolean(true); + final MeanMetric jvmUsedHeapSpace = new MeanMetric(); + Thread sampleThread = new Thread(new Runnable() { + @Override + public void run() { + try { + while (start.get()) { + NodesStatsResponse response = client.admin().cluster().prepareNodesStats("_master").setJvm(true).get(); + ByteSizeValue heapUsed = response.getNodes().get(0).getJvm().getMem().getHeapUsed(); + jvmUsedHeapSpace.inc(heapUsed.getBytes()); + Thread.sleep(1000); + } + } catch (InterruptedException ignored) {} } - System.out.println("===> Delay"); - for (Histogram.Bucket bucket : histogram.getBuckets()) { - System.out.println("\t" + bucket.getKey() + "=" + bucket.getDocCount()); + }); + sampleThread.start(); + Thread.sleep(benchTime); + long endTime = clock.millis(); + start.set(false); + sampleThread.join(); + + NodesStatsResponse response = client.admin().cluster().prepareNodesStats().setThreadPool(true).get(); + for (NodeStats nodeStats : response.getNodes()) { + for (ThreadPoolStats.Stats threadPoolStats : nodeStats.getThreadPool()) { + if ("watcher".equals(threadPoolStats.name())) { + stats.setWatcherThreadPoolStats(threadPoolStats); + } } - Percentiles percentiles = searchResponse.getAggregations().get("percentile_delay"); - stats.setDelayPercentiles(percentiles); - stats.setAvgJvmUsed(jvmUsedHeapSpace); - new WatcherServiceRequestBuilder(client).stop().get(); } + client.admin().indices().prepareRefresh(HistoryStoreField.DATA_STREAM + "*").get(); + Script script = new Script( + ScriptType.INLINE, + Script.DEFAULT_SCRIPT_LANG, + "doc['trigger_event.schedule.triggered_time'].value - doc['trigger_event.schedule.scheduled_time'].value", + emptyMap() + ); + SearchResponse searchResponse = client.prepareSearch(HistoryStoreField.DATA_STREAM + "*") + .setQuery(QueryBuilders.rangeQuery("trigger_event.schedule.scheduled_time").gte(startTime).lte(endTime)) + .addAggregation(terms("state").field("state")) + .addAggregation(histogram("delay").script(script).interval(10)) + .addAggregation(percentiles("percentile_delay").script(script).percentiles(1.0, 20.0, 50.0, 80.0, 99.0)) + .get(); + Terms terms = searchResponse.getAggregations().get("state"); + stats.setStateStats(terms); + Histogram histogram = searchResponse.getAggregations().get("delay"); + stats.setDelayStats(histogram); + System.out.println("===> State"); + for (Terms.Bucket bucket : terms.getBuckets()) { + System.out.println("\t" + bucket.getKey() + "=" + bucket.getDocCount()); + } + System.out.println("===> Delay"); + for (Histogram.Bucket bucket : histogram.getBuckets()) { + System.out.println("\t" + bucket.getKey() + "=" + bucket.getDocCount()); + } + Percentiles percentiles = searchResponse.getAggregations().get("percentile_delay"); + stats.setDelayPercentiles(percentiles); + stats.setAvgJvmUsed(jvmUsedHeapSpace); + new WatcherServiceRequestBuilder(client).stop().get(); } } diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlAssignmentPlannerUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlAssignmentPlannerUpgradeIT.java new file mode 100644 index 0000000000000..fc78bf36c72fb --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlAssignmentPlannerUpgradeIT.java @@ -0,0 +1,289 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.upgrades; + +import org.elasticsearch.Version; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.core.Strings; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Base64; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import static org.elasticsearch.client.WarningsHandler.PERMISSIVE; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +public class MlAssignmentPlannerUpgradeIT extends AbstractUpgradeTestCase { + + private Logger logger = LogManager.getLogger(MlAssignmentPlannerUpgradeIT.class); + + // See PyTorchModelIT for how this model was created + static final String BASE_64_ENCODED_MODEL = + "UEsDBAAACAgAAAAAAAAAAAAAAAAAAAAAAAAUAA4Ac2ltcGxlbW9kZWwvZGF0YS5wa2xGQgoAWlpaWlpaWlpaWoACY19fdG9yY2hfXwp" + + "TdXBlclNpbXBsZQpxACmBfShYCAAAAHRyYWluaW5ncQGIdWJxAi5QSwcIXOpBBDQAAAA0AAAAUEsDBBQACAgIAAAAAAAAAAAAAAAAAA" + + "AAAAAdAEEAc2ltcGxlbW9kZWwvY29kZS9fX3RvcmNoX18ucHlGQj0AWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaW" + + "lpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWnWOMWvDMBCF9/yKI5MMrnHTQsHgjt2aJdlCEIp9SgWSTpykFvfXV1htaYds0nfv473Jqhjh" + + "kAPywbhgUbzSnC02wwZAyqBYOUzIUUoY4XRe6SVr/Q8lVsYbf4UBLkS2kBk1aOIPxbOIaPVQtEQ8vUnZ/WlrSxTA+JCTNHMc4Ig+Ele" + + "s+Jod+iR3N/jDDf74wxu4e/5+DmtE9mUyhdgFNq7bZ3ekehbruC6aTxS/c1rom6Z698WrEfIYxcn4JGTftLA7tzCnJeD41IJVC+U07k" + + "umUHw3E47Vqh+xnULeFisYLx064mV8UTZibWFMmX0p23wBUEsHCE0EGH3yAAAAlwEAAFBLAwQUAAgICAAAAAAAAAAAAAAAAAAAAAAAJ" + + "wA5AHNpbXBsZW1vZGVsL2NvZGUvX190b3JjaF9fLnB5LmRlYnVnX3BrbEZCNQBaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpa" + + "WlpaWlpaWlpaWlpaWlpaWlpaWlpaWrWST0+DMBiHW6bOod/BGS94kKpo2Mwyox5x3pbgiXSAFtdR/nQu3IwHiZ9oX88CaeGu9tL0efq" + + "+v8P7fmiGA1wgTgoIcECZQqe6vmYD6G4hAJOcB1E8NazTm+ELyzY4C3Q0z8MsRwF+j4JlQUPEEo5wjH0WB9hCNFqgpOCExZY5QnnEw7" + + "ME+0v8GuaIs8wnKI7RigVrKkBzm0lh2OdjkeHllG28f066vK6SfEypF60S+vuYt4gjj2fYr/uPrSvRv356TepfJ9iWJRN0OaELQSZN3" + + "FRPNbcP1PTSntMr0x0HzLZQjPYIEo3UaFeiISRKH0Mil+BE/dyT1m7tCBLwVO1MX4DK3bbuTlXuy8r71j5Aoho66udAoseOnrdVzx28" + + "UFW6ROuO/lT6QKKyo79VU54emj9QSwcInsUTEDMBAAAFAwAAUEsDBAAACAgAAAAAAAAAAAAAAAAAAAAAAAAZAAYAc2ltcGxlbW9kZWw" + + "vY29uc3RhbnRzLnBrbEZCAgBaWoACKS5QSwcIbS8JVwQAAAAEAAAAUEsDBAAACAgAAAAAAAAAAAAAAAAAAAAAAAATADsAc2ltcGxlbW" + + "9kZWwvdmVyc2lvbkZCNwBaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaWlpaMwpQSwcI0" + + "Z5nVQIAAAACAAAAUEsBAgAAAAAICAAAAAAAAFzqQQQ0AAAANAAAABQAAAAAAAAAAAAAAAAAAAAAAHNpbXBsZW1vZGVsL2RhdGEucGts" + + "UEsBAgAAFAAICAgAAAAAAE0EGH3yAAAAlwEAAB0AAAAAAAAAAAAAAAAAhAAAAHNpbXBsZW1vZGVsL2NvZGUvX190b3JjaF9fLnB5UEs" + + "BAgAAFAAICAgAAAAAAJ7FExAzAQAABQMAACcAAAAAAAAAAAAAAAAAAgIAAHNpbXBsZW1vZGVsL2NvZGUvX190b3JjaF9fLnB5LmRlYn" + + "VnX3BrbFBLAQIAAAAACAgAAAAAAABtLwlXBAAAAAQAAAAZAAAAAAAAAAAAAAAAAMMDAABzaW1wbGVtb2RlbC9jb25zdGFudHMucGtsU" + + "EsBAgAAAAAICAAAAAAAANGeZ1UCAAAAAgAAABMAAAAAAAAAAAAAAAAAFAQAAHNpbXBsZW1vZGVsL3ZlcnNpb25QSwYGLAAAAAAAAAAe" + + "Ay0AAAAAAAAAAAAFAAAAAAAAAAUAAAAAAAAAagEAAAAAAACSBAAAAAAAAFBLBgcAAAAA/AUAAAAAAAABAAAAUEsFBgAAAAAFAAUAagE" + + "AAJIEAAAAAA=="; + static final long RAW_MODEL_SIZE; // size of the model before base64 encoding + static { + RAW_MODEL_SIZE = Base64.getDecoder().decode(BASE_64_ENCODED_MODEL).length; + } + + public void testMlAssignmentPlannerUpgrade() throws Exception { + assumeTrue("NLP model deployments added in 8.0", isOriginalClusterVersionAtLeast(Version.V_8_0_0)); + + logger.info("Starting testMlAssignmentPlannerUpgrade, model size {}", RAW_MODEL_SIZE); + + switch (CLUSTER_TYPE) { + case OLD -> { + // setup deployments using old and new memory format + setupDeployments(); + + waitForDeploymentStarted("old_memory_format"); + waitForDeploymentStarted("new_memory_format"); + + // assert correct memory format is used + assertOldMemoryFormat("old_memory_format"); + if (isOriginalClusterVersionAtLeast(Version.V_8_11_0)) { + assertNewMemoryFormat("new_memory_format"); + } else { + assertOldMemoryFormat("new_memory_format"); + } + } + case MIXED -> { + ensureHealth(".ml-inference-*,.ml-config*", (request -> { + request.addParameter("wait_for_status", "yellow"); + request.addParameter("timeout", "70s"); + })); + waitForDeploymentStarted("old_memory_format"); + waitForDeploymentStarted("new_memory_format"); + + // assert correct memory format is used + assertOldMemoryFormat("old_memory_format"); + if (isOriginalClusterVersionAtLeast(Version.V_8_11_0)) { + assertNewMemoryFormat("new_memory_format"); + } else { + assertOldMemoryFormat("new_memory_format"); + } + + } + case UPGRADED -> { + ensureHealth(".ml-inference-*,.ml-config*", (request -> { + request.addParameter("wait_for_status", "yellow"); + request.addParameter("timeout", "70s"); + })); + waitForDeploymentStarted("old_memory_format"); + waitForDeploymentStarted("new_memory_format"); + + // assert correct memory format is used + assertOldMemoryFormat("old_memory_format"); + assertNewMemoryFormat("new_memory_format"); + + cleanupDeployments(); + } + } + } + + @SuppressWarnings("unchecked") + private void waitForDeploymentStarted(String modelId) throws Exception { + assertBusy(() -> { + var response = getTrainedModelStats(modelId); + Map map = entityAsMap(response); + List> stats = (List>) map.get("trained_model_stats"); + assertThat(stats, hasSize(1)); + var stat = stats.get(0); + assertThat(stat.toString(), XContentMapValues.extractValue("deployment_stats.state", stat), equalTo("started")); + }, 30, TimeUnit.SECONDS); + } + + @SuppressWarnings("unchecked") + private void assertOldMemoryFormat(String modelId) throws Exception { + // There was a change in the MEMORY_OVERHEAD value in 8.3.0, see #86416 + long memoryOverheadMb = Version.fromString(UPGRADE_FROM_VERSION).onOrAfter(Version.V_8_2_1) ? 240 : 270; + var response = getTrainedModelStats(modelId); + Map map = entityAsMap(response); + List> stats = (List>) map.get("trained_model_stats"); + assertThat(stats, hasSize(1)); + var stat = stats.get(0); + Long expectedMemoryUsage = ByteSizeValue.ofMb(memoryOverheadMb).getBytes() + RAW_MODEL_SIZE * 2; + Integer actualMemoryUsage = (Integer) XContentMapValues.extractValue("model_size_stats.required_native_memory_bytes", stat); + assertThat( + Strings.format("Memory usage mismatch for the model %s in cluster state %s", modelId, CLUSTER_TYPE.toString()), + actualMemoryUsage, + equalTo(expectedMemoryUsage.intValue()) + ); + } + + @SuppressWarnings("unchecked") + private void assertNewMemoryFormat(String modelId) throws Exception { + var response = getTrainedModelStats(modelId); + Map map = entityAsMap(response); + List> stats = (List>) map.get("trained_model_stats"); + assertThat(stats, hasSize(1)); + var stat = stats.get(0); + Long expectedMemoryUsage = ByteSizeValue.ofMb(300).getBytes() + RAW_MODEL_SIZE + ByteSizeValue.ofMb(10).getBytes(); + Integer actualMemoryUsage = (Integer) XContentMapValues.extractValue("model_size_stats.required_native_memory_bytes", stat); + assertThat(stat.toString(), actualMemoryUsage.toString(), equalTo(expectedMemoryUsage.toString())); + } + + private Response getTrainedModelStats(String modelId) throws IOException { + Request request = new Request("GET", "/_ml/trained_models/" + modelId + "/_stats"); + request.setOptions(request.getOptions().toBuilder().setWarningsHandler(PERMISSIVE).build()); + var response = client().performRequest(request); + assertOK(response); + return response; + } + + private Response infer(String input, String modelId) throws IOException { + Request request = new Request("POST", "/_ml/trained_models/" + modelId + "/deployment/_infer"); + request.setJsonEntity(Strings.format(""" + { "docs": [{"input":"%s"}] } + """, input)); + request.setOptions(request.getOptions().toBuilder().setWarningsHandler(PERMISSIVE).build()); + var response = client().performRequest(request); + assertOK(response); + return response; + } + + private void putModelDefinition(String modelId) throws IOException { + Request request = new Request("PUT", "_ml/trained_models/" + modelId + "/definition/0"); + request.setJsonEntity(Strings.format(""" + {"total_definition_length":%s,"definition": "%s","total_parts": 1}""", RAW_MODEL_SIZE, BASE_64_ENCODED_MODEL)); + client().performRequest(request); + } + + private void putVocabulary(List vocabulary, String modelId) throws IOException { + List vocabularyWithPad = new ArrayList<>(); + vocabularyWithPad.add("[PAD]"); + vocabularyWithPad.add("[UNK]"); + vocabularyWithPad.addAll(vocabulary); + String quotedWords = vocabularyWithPad.stream().map(s -> "\"" + s + "\"").collect(Collectors.joining(",")); + + Request request = new Request("PUT", "_ml/trained_models/" + modelId + "/vocabulary"); + request.setJsonEntity(Strings.format(""" + { "vocabulary": [%s] } + """, quotedWords)); + client().performRequest(request); + } + + private void setupDeployments() throws Exception { + createTrainedModel("old_memory_format", 0, 0); + putModelDefinition("old_memory_format"); + putVocabulary(List.of("these", "are", "my", "words"), "old_memory_format"); + startDeployment("old_memory_format"); + + createTrainedModel("new_memory_format", ByteSizeValue.ofMb(300).getBytes(), ByteSizeValue.ofMb(10).getBytes()); + putModelDefinition("new_memory_format"); + putVocabulary(List.of("these", "are", "my", "words"), "new_memory_format"); + startDeployment("new_memory_format"); + } + + private void cleanupDeployments() throws IOException { + stopDeployment("old_memory_format"); + deleteTrainedModel("old_memory_format"); + stopDeployment("new_memory_format"); + deleteTrainedModel("new_memory_format"); + } + + private void createTrainedModel(String modelId, long perDeploymentMemoryBytes, long perAllocationMemoryBytes) throws IOException { + Request request = new Request("PUT", "/_ml/trained_models/" + modelId); + if (perAllocationMemoryBytes > 0 && perDeploymentMemoryBytes > 0) { + request.setJsonEntity(Strings.format(""" + { + "description": "simple model for testing", + "model_type": "pytorch", + "inference_config": { + "pass_through": { + "tokenization": { + "bert": { + "with_special_tokens": false + } + } + } + }, + "metadata": { + "per_deployment_memory_bytes": %s, + "per_allocation_memory_bytes": %s + } + }""", perDeploymentMemoryBytes, perAllocationMemoryBytes)); + } else { + request.setJsonEntity(""" + { + "description": "simple model for testing", + "model_type": "pytorch", + "inference_config": { + "pass_through": { + "tokenization": { + "bert": { + "with_special_tokens": false + } + } + } + } + }"""); + } + client().performRequest(request); + } + + private void deleteTrainedModel(String modelId) throws IOException { + Request request = new Request("DELETE", "_ml/trained_models/" + modelId); + client().performRequest(request); + } + + private Response startDeployment(String modelId) throws IOException { + return startDeployment(modelId, "started"); + } + + private Response startDeployment(String modelId, String waitForState) throws IOException { + Request request = new Request( + "POST", + "/_ml/trained_models/" + + modelId + + "/deployment/_start?timeout=40s&wait_for=" + + waitForState + + "&inference_threads=1&model_threads=1" + ); + request.setOptions(request.getOptions().toBuilder().setWarningsHandler(PERMISSIVE).build()); + var response = client().performRequest(request); + assertOK(response); + return response; + } + + private void stopDeployment(String modelId) throws IOException { + String endpoint = "/_ml/trained_models/" + modelId + "/deployment/_stop"; + Request request = new Request("POST", endpoint); + client().performRequest(request); + } +} diff --git a/x-pack/qa/xpack-prefix-rest-compat/build.gradle b/x-pack/qa/xpack-prefix-rest-compat/build.gradle index caca3b63d4951..8b91aae21ff73 100644 --- a/x-pack/qa/xpack-prefix-rest-compat/build.gradle +++ b/x-pack/qa/xpack-prefix-rest-compat/build.gradle @@ -34,10 +34,11 @@ tasks.named("copyRestCompatTestTask").configure { task -> task.dependsOn(configurations.compatXpackTests); task.setXpackConfig(configurations.compatXpackTests); task.getIncludeXpack().set(List.of("license", "migration", "ml", "rollup", "sql", "ssl")); - task.getOutputResourceDir().set(project.getLayout().getBuildDirectory().dir("restResources/v${compatVersion}/yamlTests/original")); + def fileOperations = task.getFileOperations() + task.getOutputResourceDir().set(project.getLayout().getBuildDirectory().dir("restResources/v${compatVersion}/yamlTests/original")) task.setXpackConfigToFileTree( - config -> fileTree( - config.getSingleFile().toPath() + config -> fileOperations.fileTree( + config.getSingleFile() ) ) }