diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 3ce048533d131..3928354e54e8f 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -1301,7 +1301,7 @@ steps: image: family/elasticsearch-ubuntu-2004 machineType: n2-standard-8 buildDirectory: /dev/shm/bk - if: build.branch == "main" || build.branch =~ /^[0-9]+\.[0-9]+\$/ + if: build.branch == "main" || build.branch == "7.17" - label: Check branch consistency command: .ci/scripts/run-gradle.sh branchConsistency timeout_in_minutes: 15 diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index a5e959e795c07..64ad5c5c851e3 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -26,3 +26,16 @@ x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/sto # APM Data index templates, etc. x-pack/plugin/apm-data/src/main/resources @elastic/apm-server x-pack/plugin/apm-data/src/yamlRestTest/resources @elastic/apm-server + +# Delivery +gradle @elastic/es-delivery +build-conventions @elastic/es-delivery +build-tools @elastic/es-delivery +build-tools-internal @elastic/es-delivery +*.gradle @elastic/es-delivery +.buildkite @elastic/es-delivery +.ci @elastic/es-delivery +.idea @elastic/es-delivery +distribution/src @elastic/es-delivery +distribution/packages/src @elastic/es-delivery +distribution/docker/src @elastic/es-delivery diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/AggregatorBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/AggregatorBenchmark.java index 7b1efb82cd1f0..e6f27211525f5 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/AggregatorBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/AggregatorBenchmark.java @@ -27,11 +27,9 @@ import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.BytesRefBlock; -import org.elasticsearch.compute.data.DoubleArrayVector; import org.elasticsearch.compute.data.DoubleBlock; import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.compute.data.IntBlock; -import org.elasticsearch.compute.data.LongArrayVector; import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.AggregationOperator; @@ -67,6 +65,10 @@ public class AggregatorBenchmark { private static final int GROUPS = 5; private static final BigArrays BIG_ARRAYS = BigArrays.NON_RECYCLING_INSTANCE; // TODO real big arrays? + private static final BlockFactory blockFactory = BlockFactory.getInstance( + new NoopCircuitBreaker("noop"), + BigArrays.NON_RECYCLING_INSTANCE + ); private static final String LONGS = "longs"; private static final String INTS = "ints"; @@ -116,8 +118,7 @@ public class AggregatorBenchmark { @Param({ VECTOR_LONGS, HALF_NULL_LONGS, VECTOR_DOUBLES, HALF_NULL_DOUBLES }) public String blockType; - private static Operator operator(String grouping, String op, String dataType) { - DriverContext driverContext = driverContext(); + private static Operator operator(DriverContext driverContext, String grouping, String op, String dataType) { if (grouping.equals("none")) { return new AggregationOperator( List.of(supplier(op, dataType, 0).aggregatorFactory(AggregatorMode.SINGLE).apply(driverContext)), @@ -432,8 +433,8 @@ private static void checkUngrouped(String prefix, String op, String dataType, Pa } } - private static Page page(String grouping, String blockType) { - Block dataBlock = dataBlock(blockType); + private static Page page(BlockFactory blockFactory, String grouping, String blockType) { + Block dataBlock = dataBlock(blockFactory, blockType); if (grouping.equals("none")) { return new Page(dataBlock); } @@ -441,15 +442,15 @@ private static Page page(String grouping, String blockType) { return new Page(Stream.concat(blocks.stream(), Stream.of(dataBlock)).toArray(Block[]::new)); } - private static Block dataBlock(String blockType) { + private static Block dataBlock(BlockFactory blockFactory, String blockType) { return switch (blockType) { - case VECTOR_LONGS -> new LongArrayVector(LongStream.range(0, BLOCK_LENGTH).toArray(), BLOCK_LENGTH).asBlock(); - case VECTOR_DOUBLES -> new DoubleArrayVector( + case VECTOR_LONGS -> blockFactory.newLongArrayVector(LongStream.range(0, BLOCK_LENGTH).toArray(), BLOCK_LENGTH).asBlock(); + case VECTOR_DOUBLES -> blockFactory.newDoubleArrayVector( LongStream.range(0, BLOCK_LENGTH).mapToDouble(l -> Long.valueOf(l).doubleValue()).toArray(), BLOCK_LENGTH ).asBlock(); case MULTIVALUED_LONGS -> { - var builder = LongBlock.newBlockBuilder(BLOCK_LENGTH); + var builder = blockFactory.newLongBlockBuilder(BLOCK_LENGTH); builder.beginPositionEntry(); for (int i = 0; i < BLOCK_LENGTH; i++) { builder.appendLong(i); @@ -462,7 +463,7 @@ private static Block dataBlock(String blockType) { yield builder.build(); } case HALF_NULL_LONGS -> { - var builder = LongBlock.newBlockBuilder(BLOCK_LENGTH); + var builder = blockFactory.newLongBlockBuilder(BLOCK_LENGTH); for (int i = 0; i < BLOCK_LENGTH; i++) { builder.appendLong(i); builder.appendNull(); @@ -470,7 +471,7 @@ private static Block dataBlock(String blockType) { yield builder.build(); } case HALF_NULL_DOUBLES -> { - var builder = DoubleBlock.newBlockBuilder(BLOCK_LENGTH); + var builder = blockFactory.newDoubleBlockBuilder(BLOCK_LENGTH); for (int i = 0; i < BLOCK_LENGTH; i++) { builder.appendDouble(i); builder.appendNull(); @@ -502,7 +503,7 @@ private static Block groupingBlock(String grouping, String blockType) { }; return switch (grouping) { case LONGS -> { - var builder = LongBlock.newBlockBuilder(BLOCK_LENGTH); + var builder = blockFactory.newLongBlockBuilder(BLOCK_LENGTH); for (int i = 0; i < BLOCK_LENGTH; i++) { for (int v = 0; v < valuesPerGroup; v++) { builder.appendLong(i % GROUPS); @@ -511,7 +512,7 @@ private static Block groupingBlock(String grouping, String blockType) { yield builder.build(); } case INTS -> { - var builder = IntBlock.newBlockBuilder(BLOCK_LENGTH); + var builder = blockFactory.newIntBlockBuilder(BLOCK_LENGTH); for (int i = 0; i < BLOCK_LENGTH; i++) { for (int v = 0; v < valuesPerGroup; v++) { builder.appendInt(i % GROUPS); @@ -520,7 +521,7 @@ private static Block groupingBlock(String grouping, String blockType) { yield builder.build(); } case DOUBLES -> { - var builder = DoubleBlock.newBlockBuilder(BLOCK_LENGTH); + var builder = blockFactory.newDoubleBlockBuilder(BLOCK_LENGTH); for (int i = 0; i < BLOCK_LENGTH; i++) { for (int v = 0; v < valuesPerGroup; v++) { builder.appendDouble(i % GROUPS); @@ -529,7 +530,7 @@ private static Block groupingBlock(String grouping, String blockType) { yield builder.build(); } case BOOLEANS -> { - BooleanBlock.Builder builder = BooleanBlock.newBlockBuilder(BLOCK_LENGTH); + BooleanBlock.Builder builder = blockFactory.newBooleanBlockBuilder(BLOCK_LENGTH); for (int i = 0; i < BLOCK_LENGTH; i++) { for (int v = 0; v < valuesPerGroup; v++) { builder.appendBoolean(i % 2 == 1); @@ -538,7 +539,7 @@ private static Block groupingBlock(String grouping, String blockType) { yield builder.build(); } case BYTES_REFS -> { - BytesRefBlock.Builder builder = BytesRefBlock.newBlockBuilder(BLOCK_LENGTH); + BytesRefBlock.Builder builder = blockFactory.newBytesRefBlockBuilder(BLOCK_LENGTH); for (int i = 0; i < BLOCK_LENGTH; i++) { for (int v = 0; v < valuesPerGroup; v++) { builder.appendBytesRef(bytesGroup(i % GROUPS)); @@ -574,8 +575,9 @@ private static void run(String grouping, String op, String blockType, int opCoun default -> throw new IllegalArgumentException(); }; - Operator operator = operator(grouping, op, dataType); - Page page = page(grouping, blockType); + DriverContext driverContext = driverContext(); + Operator operator = operator(driverContext, grouping, op, dataType); + Page page = page(driverContext.blockFactory(), grouping, blockType); for (int i = 0; i < opCount; i++) { operator.addInput(page); } @@ -584,9 +586,6 @@ private static void run(String grouping, String op, String blockType, int opCoun } static DriverContext driverContext() { - return new DriverContext( - BigArrays.NON_RECYCLING_INSTANCE, - BlockFactory.getInstance(new NoopCircuitBreaker("noop"), BigArrays.NON_RECYCLING_INSTANCE) - ); + return new DriverContext(BigArrays.NON_RECYCLING_INSTANCE, blockFactory); } } diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/BlockBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/BlockBenchmark.java new file mode 100644 index 0000000000000..e0281dbb856d4 --- /dev/null +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/BlockBenchmark.java @@ -0,0 +1,849 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.benchmark.compute.operator; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.breaker.NoopCircuitBreaker; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.BitArray; +import org.elasticsearch.common.util.BytesRefArray; +import org.elasticsearch.common.util.DoubleArray; +import org.elasticsearch.common.util.IntArray; +import org.elasticsearch.common.util.LongArray; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BooleanBigArrayBlock; +import org.elasticsearch.compute.data.BooleanBigArrayVector; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BooleanVector; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.DoubleBigArrayBlock; +import org.elasticsearch.compute.data.DoubleBigArrayVector; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.DoubleVector; +import org.elasticsearch.compute.data.IntBigArrayBlock; +import org.elasticsearch.compute.data.IntBigArrayVector; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.LongBigArrayBlock; +import org.elasticsearch.compute.data.LongBigArrayVector; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OperationsPerInvocation; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.TearDown; +import org.openjdk.jmh.annotations.Warmup; + +import java.util.ArrayList; +import java.util.BitSet; +import java.util.Collections; +import java.util.List; +import java.util.Random; +import java.util.concurrent.TimeUnit; +import java.util.stream.IntStream; + +@Warmup(iterations = 5) +@Measurement(iterations = 7) +@BenchmarkMode(Mode.AverageTime) +@OutputTimeUnit(TimeUnit.NANOSECONDS) +@State(Scope.Thread) +@Fork(1) +public class BlockBenchmark { + + /** + * All data type/block kind combinations to be loaded before the benchmark. + * It is important to be exhaustive here so that all implementers of {@link IntBlock#getInt(int)} are actually loaded when we benchmark + * {@link IntBlock}s etc. + */ + // We could also consider DocBlocks/DocVectors but they do not implement any of the typed block interfaces like IntBlock etc. + public static final String[] RELEVANT_TYPE_BLOCK_COMBINATIONS = { + "boolean/array", + "boolean/array-multivalue-null", + "boolean/big-array", + "boolean/big-array-multivalue-null", + "boolean/vector", + "boolean/vector-big-array", + "boolean/vector-const", + "BytesRef/array", + "BytesRef/array-multivalue-null", + "BytesRef/vector", + "BytesRef/vector-const", + "double/array", + "double/array-multivalue-null", + "double/big-array", + "double/big-array-multivalue-null", + "double/vector", + "double/vector-big-array", + "double/vector-const", + "int/array", + "int/array-multivalue-null", + "int/big-array", + "int/big-array-multivalue-null", + "int/vector", + "int/vector-big-array", + "int/vector-const", + "long/array", + "long/array-multivalue-null", + "long/big-array", + "long/big-array-multivalue-null", + "long/vector", + "long/vector-big-array", + "long/vector-const" }; + public static final int NUM_BLOCKS_PER_ITERATION = 1024; + public static final int BLOCK_TOTAL_POSITIONS = 8096; + + private static final double MV_PERCENTAGE = 0.3; + private static final double NULL_PERCENTAGE = 0.1; + private static final int MAX_MV_ELEMENTS = 100; + private static final int MAX_BYTES_REF_LENGTH = 255; + + private static final Random random = new Random(); + + private static final BlockFactory blockFactory = BlockFactory.getInstance( + new NoopCircuitBreaker("noop"), + BigArrays.NON_RECYCLING_INSTANCE + ); + + static { + // Smoke test all the expected values and force loading subclasses more like prod + int totalPositions = 10; + long[] actualCheckSums = new long[NUM_BLOCKS_PER_ITERATION]; + + for (String paramString : RELEVANT_TYPE_BLOCK_COMBINATIONS) { + String[] params = paramString.split("/"); + String dataType = params[0]; + String blockKind = params[1]; + + BenchmarkBlocks data = buildBlocks(dataType, blockKind, totalPositions); + int[][] traversalOrders = createTraversalOrders(data.blocks, false); + run(dataType, data, traversalOrders, actualCheckSums); + assertCheckSums(data, actualCheckSums); + } + } + + private record BenchmarkBlocks(Block[] blocks, long[] checkSums) {}; + + private static BenchmarkBlocks buildBlocks(String dataType, String blockKind, int totalPositions) { + Block[] blocks = new Block[NUM_BLOCKS_PER_ITERATION]; + long[] checkSums = new long[NUM_BLOCKS_PER_ITERATION]; + + switch (dataType) { + case "boolean" -> { + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + if (blockKind.equalsIgnoreCase("vector-const")) { + BooleanVector vector = blockFactory.newConstantBooleanVector(random.nextBoolean(), totalPositions); + blocks[blockIndex] = vector.asBlock(); + continue; + } + + boolean[] values = new boolean[totalPositions]; + for (int i = 0; i < totalPositions; i++) { + values[i] = random.nextBoolean(); + } + + switch (blockKind) { + case "array" -> { + blocks[blockIndex] = blockFactory.newBooleanArrayBlock( + values, + totalPositions, + null, + null, + Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING + ); + } + case "array-multivalue-null" -> { + int[] firstValueIndexes = randomFirstValueIndexes(totalPositions); + int positionCount = firstValueIndexes.length - 1; + BitSet nulls = randomNulls(positionCount); + + blocks[blockIndex] = blockFactory.newBooleanArrayBlock( + values, + positionCount, + firstValueIndexes, + nulls, + Block.MvOrdering.UNORDERED + ); + } + case "big-array" -> { + BitArray valuesBigArray = new BitArray(totalPositions, BigArrays.NON_RECYCLING_INSTANCE); + for (int i = 0; i < values.length; i++) { + if (values[i]) { + valuesBigArray.set(i); + } + } + + blocks[blockIndex] = new BooleanBigArrayBlock( + valuesBigArray, + totalPositions, + null, + null, + Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, + blockFactory + ); + } + case "big-array-multivalue-null" -> { + int[] firstValueIndexes = randomFirstValueIndexes(totalPositions); + int positionCount = firstValueIndexes.length - 1; + BitSet nulls = randomNulls(positionCount); + BitArray valuesBigArray = new BitArray(totalPositions, BigArrays.NON_RECYCLING_INSTANCE); + for (int i = 0; i < values.length; i++) { + if (values[i]) { + valuesBigArray.set(i); + } + } + + blocks[blockIndex] = new BooleanBigArrayBlock( + valuesBigArray, + positionCount, + firstValueIndexes, + nulls, + Block.MvOrdering.UNORDERED, + blockFactory + ); + } + case "vector" -> { + BooleanVector vector = blockFactory.newBooleanArrayVector(values, totalPositions); + blocks[blockIndex] = vector.asBlock(); + } + case "vector-big-array" -> { + BitArray valuesBigArray = new BitArray(totalPositions, BigArrays.NON_RECYCLING_INSTANCE); + for (int i = 0; i < values.length; i++) { + if (values[i]) { + valuesBigArray.set(i); + } + } + BooleanVector vector = new BooleanBigArrayVector(valuesBigArray, totalPositions, blockFactory); + blocks[blockIndex] = vector.asBlock(); + } + default -> { + throw new IllegalStateException("illegal block kind [" + blockKind + "]"); + } + } + } + + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + BooleanBlock block = (BooleanBlock) blocks[blockIndex]; + checkSums[blockIndex] = computeBooleanCheckSum(block, IntStream.range(0, block.getPositionCount()).toArray()); + } + } + case "BytesRef" -> { + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + if (blockKind.equalsIgnoreCase("vector-const")) { + byte[] bytes = new byte[random.nextInt(MAX_BYTES_REF_LENGTH)]; + random.nextBytes(bytes); + + BytesRefVector vector = blockFactory.newConstantBytesRefVector(new BytesRef(bytes), totalPositions); + blocks[blockIndex] = vector.asBlock(); + continue; + } + + BytesRefArray values = new BytesRefArray(totalPositions, BigArrays.NON_RECYCLING_INSTANCE); + byte[] bytes; + for (int i = 0; i < totalPositions; i++) { + bytes = new byte[random.nextInt(MAX_BYTES_REF_LENGTH)]; + random.nextBytes(bytes); + values.append(new BytesRef(bytes)); + } + + switch (blockKind) { + case "array" -> { + blocks[blockIndex] = blockFactory.newBytesRefArrayBlock( + values, + totalPositions, + null, + null, + Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING + ); + } + case "array-multivalue-null" -> { + int[] firstValueIndexes = randomFirstValueIndexes(totalPositions); + int positionCount = firstValueIndexes.length - 1; + BitSet nulls = randomNulls(positionCount); + + blocks[blockIndex] = blockFactory.newBytesRefArrayBlock( + values, + positionCount, + firstValueIndexes, + nulls, + Block.MvOrdering.UNORDERED + ); + } + case "vector" -> { + BytesRefVector vector = blockFactory.newBytesRefArrayVector(values, totalPositions); + blocks[blockIndex] = vector.asBlock(); + } + default -> { + throw new IllegalStateException("illegal block kind [" + blockKind + "]"); + } + } + } + + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + BytesRefBlock block = (BytesRefBlock) blocks[blockIndex]; + checkSums[blockIndex] = computeBytesRefCheckSum(block, IntStream.range(0, block.getPositionCount()).toArray()); + } + } + case "double" -> { + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + if (blockKind.equalsIgnoreCase("vector-const")) { + DoubleVector vector = blockFactory.newConstantDoubleVector(random.nextDouble() * 1000000.0, totalPositions); + blocks[blockIndex] = vector.asBlock(); + continue; + } + + double[] values = new double[totalPositions]; + for (int i = 0; i < totalPositions; i++) { + values[i] = random.nextDouble() * 1000000.0; + } + + switch (blockKind) { + case "array" -> { + blocks[blockIndex] = blockFactory.newDoubleArrayBlock( + values, + totalPositions, + null, + null, + Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING + ); + } + case "array-multivalue-null" -> { + int[] firstValueIndexes = randomFirstValueIndexes(totalPositions); + int positionCount = firstValueIndexes.length - 1; + BitSet nulls = randomNulls(positionCount); + + blocks[blockIndex] = blockFactory.newDoubleArrayBlock( + values, + positionCount, + firstValueIndexes, + nulls, + Block.MvOrdering.UNORDERED + ); + } + case "big-array" -> { + DoubleArray valuesBigArray = blockFactory.bigArrays().newDoubleArray(totalPositions, false); + for (int i = 0; i < values.length; i++) { + valuesBigArray.set(i, values[i]); + } + + blocks[blockIndex] = new DoubleBigArrayBlock( + valuesBigArray, + totalPositions, + null, + null, + Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, + blockFactory + ); + } + case "big-array-multivalue-null" -> { + int[] firstValueIndexes = randomFirstValueIndexes(totalPositions); + int positionCount = firstValueIndexes.length - 1; + BitSet nulls = randomNulls(positionCount); + DoubleArray valuesBigArray = blockFactory.bigArrays().newDoubleArray(totalPositions, false); + for (int i = 0; i < values.length; i++) { + valuesBigArray.set(i, values[i]); + } + + blocks[blockIndex] = new DoubleBigArrayBlock( + valuesBigArray, + positionCount, + firstValueIndexes, + nulls, + Block.MvOrdering.UNORDERED, + blockFactory + ); + } + case "vector" -> { + DoubleVector vector = blockFactory.newDoubleArrayVector(values, totalPositions); + blocks[blockIndex] = vector.asBlock(); + } + case "vector-big-array" -> { + DoubleArray valuesBigArray = blockFactory.bigArrays().newDoubleArray(totalPositions, false); + for (int i = 0; i < values.length; i++) { + valuesBigArray.set(i, values[i]); + } + DoubleVector vector = new DoubleBigArrayVector(valuesBigArray, totalPositions, blockFactory); + blocks[blockIndex] = vector.asBlock(); + } + default -> { + throw new IllegalStateException("illegal block kind [" + blockKind + "]"); + } + } + } + + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + DoubleBlock block = (DoubleBlock) blocks[blockIndex]; + checkSums[blockIndex] = computeDoubleCheckSum(block, IntStream.range(0, block.getPositionCount()).toArray()); + } + } + case "int" -> { + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + if (blockKind.equalsIgnoreCase("vector-const")) { + IntVector vector = blockFactory.newConstantIntVector(random.nextInt(), totalPositions); + blocks[blockIndex] = vector.asBlock(); + continue; + } + + int[] values = new int[totalPositions]; + for (int i = 0; i < totalPositions; i++) { + values[i] = random.nextInt(); + } + + switch (blockKind) { + case "array" -> { + blocks[blockIndex] = blockFactory.newIntArrayBlock( + values, + totalPositions, + null, + null, + Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING + ); + } + case "array-multivalue-null" -> { + int[] firstValueIndexes = randomFirstValueIndexes(totalPositions); + int positionCount = firstValueIndexes.length - 1; + BitSet nulls = randomNulls(positionCount); + + blocks[blockIndex] = blockFactory.newIntArrayBlock( + values, + positionCount, + firstValueIndexes, + nulls, + Block.MvOrdering.UNORDERED + ); + } + case "big-array" -> { + IntArray valuesBigArray = blockFactory.bigArrays().newIntArray(totalPositions, false); + for (int i = 0; i < values.length; i++) { + valuesBigArray.set(i, values[i]); + } + + blocks[blockIndex] = new IntBigArrayBlock( + valuesBigArray, + totalPositions, + null, + null, + Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, + blockFactory + ); + } + case "big-array-multivalue-null" -> { + int[] firstValueIndexes = randomFirstValueIndexes(totalPositions); + int positionCount = firstValueIndexes.length - 1; + BitSet nulls = randomNulls(positionCount); + IntArray valuesBigArray = blockFactory.bigArrays().newIntArray(totalPositions, false); + for (int i = 0; i < values.length; i++) { + valuesBigArray.set(i, values[i]); + } + + blocks[blockIndex] = new IntBigArrayBlock( + valuesBigArray, + positionCount, + firstValueIndexes, + nulls, + Block.MvOrdering.UNORDERED, + blockFactory + ); + } + case "vector" -> { + IntVector vector = blockFactory.newIntArrayVector(values, totalPositions); + blocks[blockIndex] = vector.asBlock(); + } + case "vector-big-array" -> { + IntArray valuesBigArray = blockFactory.bigArrays().newIntArray(totalPositions, false); + for (int i = 0; i < values.length; i++) { + valuesBigArray.set(i, values[i]); + } + IntVector vector = new IntBigArrayVector(valuesBigArray, totalPositions, blockFactory); + blocks[blockIndex] = vector.asBlock(); + } + default -> { + throw new IllegalStateException("illegal block kind [" + blockKind + "]"); + } + } + } + + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + IntBlock block = (IntBlock) blocks[blockIndex]; + checkSums[blockIndex] = computeIntCheckSum(block, IntStream.range(0, block.getPositionCount()).toArray()); + } + } + case "long" -> { + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + if (blockKind.equalsIgnoreCase("vector-const")) { + LongVector vector = blockFactory.newConstantLongVector(random.nextLong(), totalPositions); + blocks[blockIndex] = vector.asBlock(); + continue; + } + + long[] values = new long[totalPositions]; + for (int i = 0; i < totalPositions; i++) { + values[i] = random.nextLong(); + } + + switch (blockKind) { + case "array" -> { + blocks[blockIndex] = blockFactory.newLongArrayBlock( + values, + totalPositions, + null, + null, + Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING + ); + } + case "array-multivalue-null" -> { + int[] firstValueIndexes = randomFirstValueIndexes(totalPositions); + int positionCount = firstValueIndexes.length - 1; + BitSet nulls = randomNulls(positionCount); + + blocks[blockIndex] = blockFactory.newLongArrayBlock( + values, + positionCount, + firstValueIndexes, + nulls, + Block.MvOrdering.UNORDERED + ); + } + case "big-array" -> { + LongArray valuesBigArray = blockFactory.bigArrays().newLongArray(totalPositions, false); + for (int i = 0; i < values.length; i++) { + valuesBigArray.set(i, values[i]); + } + + blocks[blockIndex] = new LongBigArrayBlock( + valuesBigArray, + totalPositions, + null, + null, + Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING, + blockFactory + ); + } + case "big-array-multivalue-null" -> { + int[] firstValueIndexes = randomFirstValueIndexes(totalPositions); + int positionCount = firstValueIndexes.length - 1; + BitSet nulls = randomNulls(positionCount); + LongArray valuesBigArray = blockFactory.bigArrays().newLongArray(totalPositions, false); + for (int i = 0; i < values.length; i++) { + valuesBigArray.set(i, values[i]); + } + + blocks[blockIndex] = new LongBigArrayBlock( + valuesBigArray, + positionCount, + firstValueIndexes, + nulls, + Block.MvOrdering.UNORDERED, + blockFactory + ); + } + case "vector" -> { + LongVector vector = blockFactory.newLongArrayVector(values, totalPositions); + blocks[blockIndex] = vector.asBlock(); + } + case "vector-big-array" -> { + LongArray valuesBigArray = blockFactory.bigArrays().newLongArray(totalPositions, false); + for (int i = 0; i < values.length; i++) { + valuesBigArray.set(i, values[i]); + } + LongVector vector = new LongBigArrayVector(valuesBigArray, totalPositions, blockFactory); + blocks[blockIndex] = vector.asBlock(); + } + default -> { + throw new IllegalStateException("illegal block kind [" + blockKind + "]"); + } + } + } + + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + LongBlock block = (LongBlock) blocks[blockIndex]; + checkSums[blockIndex] = computeLongCheckSum(block, IntStream.range(0, block.getPositionCount()).toArray()); + } + } + default -> { + throw new IllegalStateException("illegal data type [" + dataType + "]"); + } + } + + return new BenchmarkBlocks(blocks, checkSums); + } + + private static int[][] createTraversalOrders(Block[] blocks, boolean randomized) { + int[][] orders = new int[blocks.length][]; + + for (int i = 0; i < blocks.length; i++) { + IntStream positionsStream = IntStream.range(0, blocks[i].getPositionCount()); + + if (randomized) { + List positions = new java.util.ArrayList<>(positionsStream.boxed().toList()); + Collections.shuffle(positions, random); + orders[i] = positions.stream().mapToInt(x -> x).toArray(); + } else { + orders[i] = positionsStream.toArray(); + } + } + + return orders; + } + + private static int[] randomFirstValueIndexes(int totalPositions) { + ArrayList firstValueIndexes = new ArrayList<>(); + firstValueIndexes.add(0); + + int currentPosition = 0; + int nextPosition; + while (currentPosition < totalPositions) { + if (random.nextDouble() < MV_PERCENTAGE) { + nextPosition = Math.min(currentPosition + 1 + random.nextInt(MAX_MV_ELEMENTS), totalPositions); + } else { + nextPosition = currentPosition + 1; + } + firstValueIndexes.add(nextPosition); + currentPosition = nextPosition; + } + + return firstValueIndexes.stream().mapToInt(x -> x).toArray(); + } + + private static BitSet randomNulls(int positionCount) { + BitSet nulls = new BitSet(positionCount); + for (int i = 0; i < positionCount; i++) { + if (random.nextDouble() < NULL_PERCENTAGE) { + nulls.set(i); + } + } + + return nulls; + } + + private static void run(String dataType, BenchmarkBlocks data, int[][] traversalOrders, long[] resultCheckSums) { + switch (dataType) { + case "boolean" -> { + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + BooleanBlock block = (BooleanBlock) data.blocks[blockIndex]; + + resultCheckSums[blockIndex] = computeBooleanCheckSum(block, traversalOrders[blockIndex]); + } + } + case "BytesRef" -> { + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + BytesRefBlock block = (BytesRefBlock) data.blocks[blockIndex]; + + resultCheckSums[blockIndex] = computeBytesRefCheckSum(block, traversalOrders[blockIndex]); + } + } + case "double" -> { + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + DoubleBlock block = (DoubleBlock) data.blocks[blockIndex]; + + resultCheckSums[blockIndex] = computeDoubleCheckSum(block, traversalOrders[blockIndex]); + } + } + case "int" -> { + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + IntBlock block = (IntBlock) data.blocks[blockIndex]; + + resultCheckSums[blockIndex] = computeIntCheckSum(block, traversalOrders[blockIndex]); + } + } + case "long" -> { + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + LongBlock block = (LongBlock) data.blocks[blockIndex]; + + resultCheckSums[blockIndex] = computeLongCheckSum(block, traversalOrders[blockIndex]); + } + } + default -> { + throw new IllegalStateException(); + } + } + } + + private static void assertCheckSums(BenchmarkBlocks data, long[] actualCheckSums) { + for (int blockIndex = 0; blockIndex < NUM_BLOCKS_PER_ITERATION; blockIndex++) { + if (actualCheckSums[blockIndex] != data.checkSums[blockIndex]) { + throw new AssertionError("checksums do not match for block [" + blockIndex + "]"); + } + } + } + + private static long computeBooleanCheckSum(BooleanBlock block, int[] traversalOrder) { + long sum = 0; + + for (int position : traversalOrder) { + if (block.isNull(position)) { + continue; + } + int start = block.getFirstValueIndex(position); + int end = start + block.getValueCount(position); + for (int i = start; i < end; i++) { + sum += block.getBoolean(i) ? 1 : 0; + } + } + + return sum; + } + + private static long computeBytesRefCheckSum(BytesRefBlock block, int[] traversalOrder) { + long sum = 0; + BytesRef currentValue = new BytesRef(); + + for (int position : traversalOrder) { + if (block.isNull(position)) { + continue; + } + int start = block.getFirstValueIndex(position); + int end = start + block.getValueCount(position); + for (int i = start; i < end; i++) { + block.getBytesRef(i, currentValue); + sum += currentValue.length > 0 ? currentValue.bytes[0] : 0; + } + } + + return sum; + } + + private static long computeDoubleCheckSum(DoubleBlock block, int[] traversalOrder) { + double sum = 0; + + for (int position : traversalOrder) { + if (block.isNull(position)) { + continue; + } + int start = block.getFirstValueIndex(position); + int end = start + block.getValueCount(position); + for (int i = start; i < end; i++) { + sum += block.getDouble(i); + } + } + + return (long) sum; + } + + private static long computeIntCheckSum(IntBlock block, int[] traversalOrder) { + int sum = 0; + + for (int position : traversalOrder) { + if (block.isNull(position)) { + continue; + } + int start = block.getFirstValueIndex(position); + int end = start + block.getValueCount(position); + for (int i = start; i < end; i++) { + sum += block.getInt(i); + } + } + + return sum; + } + + private static long computeLongCheckSum(LongBlock block, int[] traversalOrder) { + long sum = 0; + + for (int position : traversalOrder) { + if (block.isNull(position)) { + continue; + } + int start = block.getFirstValueIndex(position); + int end = start + block.getValueCount(position); + for (int i = start; i < end; i++) { + sum += block.getLong(i); + } + } + + return sum; + } + + private static boolean isRandom(String accessType) { + return accessType.equalsIgnoreCase("random"); + } + + /** + * Must be a subset of {@link BlockBenchmark#RELEVANT_TYPE_BLOCK_COMBINATIONS} + */ + @Param( + { + "boolean/array", + "boolean/array-multivalue-null", + "boolean/big-array", + "boolean/big-array-multivalue-null", + "boolean/vector", + "boolean/vector-big-array", + "boolean/vector-const", + "BytesRef/array", + "BytesRef/array-multivalue-null", + "BytesRef/vector", + "BytesRef/vector-const", + "double/array", + "double/array-multivalue-null", + "double/big-array", + "double/big-array-multivalue-null", + "double/vector", + "double/vector-big-array", + "double/vector-const", + "int/array", + "int/array-multivalue-null", + "int/big-array", + "int/big-array-multivalue-null", + "int/vector", + "int/vector-big-array", + "int/vector-const", + "long/array", + "long/array-multivalue-null", + "long/big-array", + "long/big-array-multivalue-null", + "long/vector", + "long/vector-big-array", + "long/vector-const" } + ) + public String dataTypeAndBlockKind; + + @Param({ "sequential", "random" }) + public String accessType; + + private BenchmarkBlocks data; + + private int[][] traversalOrders; + + private final long[] actualCheckSums = new long[NUM_BLOCKS_PER_ITERATION]; + + @Setup + public void setup() { + String[] params = dataTypeAndBlockKind.split("/"); + String dataType = params[0]; + String blockKind = params[1]; + + data = buildBlocks(dataType, blockKind, BLOCK_TOTAL_POSITIONS); + traversalOrders = createTraversalOrders(data.blocks, isRandom(accessType)); + } + + @Benchmark + @OperationsPerInvocation(NUM_BLOCKS_PER_ITERATION * BLOCK_TOTAL_POSITIONS) + public void run() { + String[] params = dataTypeAndBlockKind.split("/"); + String dataType = params[0]; + + run(dataType, data, traversalOrders, actualCheckSums); + } + + @TearDown(Level.Iteration) + public void assertCheckSums() { + assertCheckSums(data, actualCheckSums); + } +} diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/EvalBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/EvalBenchmark.java index 3a1142ad87d2f..1765897ba35e7 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/EvalBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/EvalBenchmark.java @@ -14,7 +14,6 @@ import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.BooleanVector; -import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.LongVector; import org.elasticsearch.compute.data.Page; @@ -59,6 +58,12 @@ @State(Scope.Thread) @Fork(1) public class EvalBenchmark { + private static final BigArrays BIG_ARRAYS = BigArrays.NON_RECYCLING_INSTANCE; // TODO real big arrays? + private static final BlockFactory blockFactory = BlockFactory.getInstance( + new NoopCircuitBreaker("noop"), + BigArrays.NON_RECYCLING_INSTANCE + ); + private static final int BLOCK_LENGTH = 8 * 1024; static final DriverContext driverContext = new DriverContext( @@ -207,15 +212,15 @@ private static void checkExpected(String operation, Page actual) { private static Page page(String operation) { return switch (operation) { case "abs", "add", "date_trunc", "equal_to_const" -> { - var builder = LongBlock.newBlockBuilder(BLOCK_LENGTH); + var builder = blockFactory.newLongBlockBuilder(BLOCK_LENGTH); for (int i = 0; i < BLOCK_LENGTH; i++) { builder.appendLong(i * 100_000); } yield new Page(builder.build()); } case "long_equal_to_long" -> { - var lhs = LongBlock.newBlockBuilder(BLOCK_LENGTH); - var rhs = LongBlock.newBlockBuilder(BLOCK_LENGTH); + var lhs = blockFactory.newLongBlockBuilder(BLOCK_LENGTH); + var rhs = blockFactory.newLongBlockBuilder(BLOCK_LENGTH); for (int i = 0; i < BLOCK_LENGTH; i++) { lhs.appendLong(i * 100_000); rhs.appendLong(i * 100_000); @@ -223,8 +228,8 @@ private static Page page(String operation) { yield new Page(lhs.build(), rhs.build()); } case "long_equal_to_int" -> { - var lhs = LongBlock.newBlockBuilder(BLOCK_LENGTH); - var rhs = IntBlock.newBlockBuilder(BLOCK_LENGTH); + var lhs = blockFactory.newLongBlockBuilder(BLOCK_LENGTH); + var rhs = blockFactory.newIntBlockBuilder(BLOCK_LENGTH); for (int i = 0; i < BLOCK_LENGTH; i++) { lhs.appendLong(i * 100_000); rhs.appendInt(i * 100_000); @@ -232,7 +237,7 @@ private static Page page(String operation) { yield new Page(lhs.build(), rhs.build()); } case "mv_min", "mv_min_ascending" -> { - var builder = LongBlock.newBlockBuilder(BLOCK_LENGTH); + var builder = blockFactory.newLongBlockBuilder(BLOCK_LENGTH); if (operation.endsWith("ascending")) { builder.mvOrdering(Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING); } diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/MultivalueDedupeBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/MultivalueDedupeBenchmark.java index 09cdc8b269ad3..c32aa1184ddaa 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/MultivalueDedupeBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/MultivalueDedupeBenchmark.java @@ -10,6 +10,8 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.breaker.NoopCircuitBreaker; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BooleanBlock; @@ -43,6 +45,12 @@ @State(Scope.Thread) @Fork(1) public class MultivalueDedupeBenchmark { + private static final BigArrays BIG_ARRAYS = BigArrays.NON_RECYCLING_INSTANCE; // TODO real big arrays? + private static final BlockFactory blockFactory = BlockFactory.getInstance( + new NoopCircuitBreaker("noop"), + BigArrays.NON_RECYCLING_INSTANCE + ); + @Param({ "BOOLEAN", "BYTES_REF", "DOUBLE", "INT", "LONG" }) private ElementType elementType; @@ -58,7 +66,7 @@ public class MultivalueDedupeBenchmark { public void setup() { this.block = switch (elementType) { case BOOLEAN -> { - BooleanBlock.Builder builder = BooleanBlock.newBlockBuilder(AggregatorBenchmark.BLOCK_LENGTH * (size + repeats)); + BooleanBlock.Builder builder = blockFactory.newBooleanBlockBuilder(AggregatorBenchmark.BLOCK_LENGTH * (size + repeats)); for (int p = 0; p < AggregatorBenchmark.BLOCK_LENGTH; p++) { List values = new ArrayList<>(); for (int i = 0; i < size; i++) { @@ -77,7 +85,7 @@ public void setup() { yield builder.build(); } case BYTES_REF -> { - BytesRefBlock.Builder builder = BytesRefBlock.newBlockBuilder(AggregatorBenchmark.BLOCK_LENGTH * (size + repeats)); + BytesRefBlock.Builder builder = blockFactory.newBytesRefBlockBuilder(AggregatorBenchmark.BLOCK_LENGTH * (size + repeats)); for (int p = 0; p < AggregatorBenchmark.BLOCK_LENGTH; p++) { List values = new ArrayList<>(); for (int i = 0; i < size; i++) { @@ -96,7 +104,7 @@ public void setup() { yield builder.build(); } case DOUBLE -> { - DoubleBlock.Builder builder = DoubleBlock.newBlockBuilder(AggregatorBenchmark.BLOCK_LENGTH * (size + repeats)); + DoubleBlock.Builder builder = blockFactory.newDoubleBlockBuilder(AggregatorBenchmark.BLOCK_LENGTH * (size + repeats)); for (int p = 0; p < AggregatorBenchmark.BLOCK_LENGTH; p++) { List values = new ArrayList<>(); for (int i = 0; i < size; i++) { @@ -115,7 +123,7 @@ public void setup() { yield builder.build(); } case INT -> { - IntBlock.Builder builder = IntBlock.newBlockBuilder(AggregatorBenchmark.BLOCK_LENGTH * (size + repeats)); + IntBlock.Builder builder = blockFactory.newIntBlockBuilder(AggregatorBenchmark.BLOCK_LENGTH * (size + repeats)); for (int p = 0; p < AggregatorBenchmark.BLOCK_LENGTH; p++) { List values = new ArrayList<>(); for (int i = 0; i < size; i++) { @@ -134,7 +142,7 @@ public void setup() { yield builder.build(); } case LONG -> { - LongBlock.Builder builder = LongBlock.newBlockBuilder(AggregatorBenchmark.BLOCK_LENGTH * (size + repeats)); + LongBlock.Builder builder = blockFactory.newLongBlockBuilder(AggregatorBenchmark.BLOCK_LENGTH * (size + repeats)); for (int p = 0; p < AggregatorBenchmark.BLOCK_LENGTH; p++) { List values = new ArrayList<>(); for (long i = 0; i < size; i++) { @@ -159,18 +167,18 @@ public void setup() { @Benchmark @OperationsPerInvocation(AggregatorBenchmark.BLOCK_LENGTH) public void adaptive() { - MultivalueDedupe.dedupeToBlockAdaptive(block, BlockFactory.getNonBreakingInstance()).close(); + MultivalueDedupe.dedupeToBlockAdaptive(block, blockFactory).close(); } @Benchmark @OperationsPerInvocation(AggregatorBenchmark.BLOCK_LENGTH) public void copyAndSort() { - MultivalueDedupe.dedupeToBlockUsingCopyAndSort(block, BlockFactory.getNonBreakingInstance()).close(); + MultivalueDedupe.dedupeToBlockUsingCopyAndSort(block, blockFactory).close(); } @Benchmark @OperationsPerInvocation(AggregatorBenchmark.BLOCK_LENGTH) public void copyMissing() { - MultivalueDedupe.dedupeToBlockUsingCopyMissing(block, BlockFactory.getNonBreakingInstance()).close(); + MultivalueDedupe.dedupeToBlockUsingCopyMissing(block, blockFactory).close(); } } diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/TopNBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/TopNBenchmark.java index d723ea3e1a6b3..3d5a36ea288b4 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/TopNBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/TopNBenchmark.java @@ -10,16 +10,15 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.BytesRefBlock; -import org.elasticsearch.compute.data.DoubleBlock; import org.elasticsearch.compute.data.ElementType; -import org.elasticsearch.compute.data.IntBlock; -import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.Operator; import org.elasticsearch.compute.operator.topn.TopNEncoder; @@ -51,6 +50,12 @@ @State(Scope.Thread) @Fork(1) public class TopNBenchmark { + private static final BigArrays BIG_ARRAYS = BigArrays.NON_RECYCLING_INSTANCE; // TODO real big arrays? + private static final BlockFactory blockFactory = BlockFactory.getInstance( + new NoopCircuitBreaker("noop"), + BigArrays.NON_RECYCLING_INSTANCE + ); + private static final int BLOCK_LENGTH = 8 * 1024; private static final String LONGS = "longs"; @@ -110,7 +115,7 @@ private static Operator operator(String data, int topCount) { ClusterSettings.createBuiltInClusterSettings() ); return new TopNOperator( - BlockFactory.getNonBreakingInstance(), + blockFactory, breakerService.getBreaker(CircuitBreaker.REQUEST), topCount, elementTypes, @@ -137,35 +142,35 @@ private static Page page(String data) { private static Block block(String data) { return switch (data) { case LONGS -> { - var builder = LongBlock.newBlockBuilder(BLOCK_LENGTH); + var builder = blockFactory.newLongBlockBuilder(BLOCK_LENGTH); for (int i = 0; i < BLOCK_LENGTH; i++) { builder.appendLong(i); } yield builder.build(); } case INTS -> { - var builder = IntBlock.newBlockBuilder(BLOCK_LENGTH); + var builder = blockFactory.newIntBlockBuilder(BLOCK_LENGTH); for (int i = 0; i < BLOCK_LENGTH; i++) { builder.appendInt(i); } yield builder.build(); } case DOUBLES -> { - var builder = DoubleBlock.newBlockBuilder(BLOCK_LENGTH); + var builder = blockFactory.newDoubleBlockBuilder(BLOCK_LENGTH); for (int i = 0; i < BLOCK_LENGTH; i++) { builder.appendDouble(i); } yield builder.build(); } case BOOLEANS -> { - BooleanBlock.Builder builder = BooleanBlock.newBlockBuilder(BLOCK_LENGTH); + BooleanBlock.Builder builder = blockFactory.newBooleanBlockBuilder(BLOCK_LENGTH); for (int i = 0; i < BLOCK_LENGTH; i++) { builder.appendBoolean(i % 2 == 1); } yield builder.build(); } case BYTES_REFS -> { - BytesRefBlock.Builder builder = BytesRefBlock.newBlockBuilder(BLOCK_LENGTH); + BytesRefBlock.Builder builder = blockFactory.newBytesRefBlockBuilder(BLOCK_LENGTH); for (int i = 0; i < BLOCK_LENGTH; i++) { builder.appendBytesRef(new BytesRef(Integer.toString(i))); } diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/ValuesSourceReaderBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/ValuesSourceReaderBenchmark.java index afe8377d3e58c..66389c9e11ded 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/ValuesSourceReaderBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/ValuesSourceReaderBenchmark.java @@ -22,7 +22,9 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.NumericUtils; +import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.BytesRefVector; @@ -78,6 +80,11 @@ public class ValuesSourceReaderBenchmark { private static final int BLOCK_LENGTH = 16 * 1024; private static final int INDEX_SIZE = 10 * BLOCK_LENGTH; private static final int COMMIT_INTERVAL = 500; + private static final BigArrays BIG_ARRAYS = BigArrays.NON_RECYCLING_INSTANCE; + private static final BlockFactory blockFactory = BlockFactory.getInstance( + new NoopCircuitBreaker("noop"), + BigArrays.NON_RECYCLING_INSTANCE + ); static { // Smoke test all the expected values and force loading subclasses more like prod @@ -241,7 +248,7 @@ private static BlockLoader numericBlockLoader(String name, Where where, NumberFi @OperationsPerInvocation(INDEX_SIZE) public void benchmark() { ValuesSourceReaderOperator op = new ValuesSourceReaderOperator( - BlockFactory.getNonBreakingInstance(), + blockFactory, fields(name), List.of(new ValuesSourceReaderOperator.ShardContext(reader, () -> { throw new UnsupportedOperationException("can't load _source here"); @@ -374,7 +381,7 @@ private void setupPages() { pages = new ArrayList<>(); switch (layout) { case "in_order" -> { - IntVector.Builder docs = IntVector.newVectorBuilder(BLOCK_LENGTH); + IntVector.Builder docs = blockFactory.newIntVectorBuilder(BLOCK_LENGTH); for (LeafReaderContext ctx : reader.leaves()) { int begin = 0; while (begin < ctx.reader().maxDoc()) { @@ -385,14 +392,14 @@ private void setupPages() { pages.add( new Page( new DocVector( - IntBlock.newConstantBlockWith(0, end - begin).asVector(), - IntBlock.newConstantBlockWith(ctx.ord, end - begin).asVector(), + blockFactory.newConstantIntBlockWith(0, end - begin).asVector(), + blockFactory.newConstantIntBlockWith(ctx.ord, end - begin).asVector(), docs.build(), true ).asBlock() ) ); - docs = IntVector.newVectorBuilder(BLOCK_LENGTH); + docs = blockFactory.newIntVectorBuilder(BLOCK_LENGTH); begin = end; } } @@ -403,8 +410,8 @@ record ItrAndOrd(PrimitiveIterator.OfInt itr, int ord) {} for (LeafReaderContext ctx : reader.leaves()) { docItrs.add(new ItrAndOrd(IntStream.range(0, ctx.reader().maxDoc()).iterator(), ctx.ord)); } - IntVector.Builder docs = IntVector.newVectorBuilder(BLOCK_LENGTH); - IntVector.Builder leafs = IntVector.newVectorBuilder(BLOCK_LENGTH); + IntVector.Builder docs = blockFactory.newIntVectorBuilder(BLOCK_LENGTH); + IntVector.Builder leafs = blockFactory.newIntVectorBuilder(BLOCK_LENGTH); int size = 0; while (docItrs.isEmpty() == false) { Iterator itrItr = docItrs.iterator(); @@ -420,12 +427,11 @@ record ItrAndOrd(PrimitiveIterator.OfInt itr, int ord) {} if (size >= BLOCK_LENGTH) { pages.add( new Page( - new DocVector(IntBlock.newConstantBlockWith(0, size).asVector(), leafs.build(), docs.build(), null) - .asBlock() + new DocVector(blockFactory.newConstantIntVector(0, size), leafs.build(), docs.build(), null).asBlock() ) ); - docs = IntVector.newVectorBuilder(BLOCK_LENGTH); - leafs = IntVector.newVectorBuilder(BLOCK_LENGTH); + docs = blockFactory.newIntVectorBuilder(BLOCK_LENGTH); + leafs = blockFactory.newIntVectorBuilder(BLOCK_LENGTH); size = 0; } } @@ -434,7 +440,7 @@ record ItrAndOrd(PrimitiveIterator.OfInt itr, int ord) {} pages.add( new Page( new DocVector( - IntBlock.newConstantBlockWith(0, size).asVector(), + blockFactory.newConstantIntBlockWith(0, size).asVector(), leafs.build().asBlock().asVector(), docs.build(), null @@ -460,9 +466,9 @@ record ItrAndOrd(PrimitiveIterator.OfInt itr, int ord) {} pages.add( new Page( new DocVector( - IntBlock.newConstantBlockWith(0, 1).asVector(), - IntBlock.newConstantBlockWith(next.ord, 1).asVector(), - IntBlock.newConstantBlockWith(next.itr.nextInt(), 1).asVector(), + blockFactory.newConstantIntVector(0, 1), + blockFactory.newConstantIntVector(next.ord, 1), + blockFactory.newConstantIntVector(next.itr.nextInt(), 1), true ).asBlock() ) diff --git a/build-tools-internal/build.gradle b/build-tools-internal/build.gradle index d0c52945801d3..a3b41283764a1 100644 --- a/build-tools-internal/build.gradle +++ b/build-tools-internal/build.gradle @@ -295,6 +295,8 @@ dependencies { compileOnly buildLibs.checkstyle compileOnly buildLibs.reflections + implementation 'com.github.javaparser:javaparser-core:3.18.0' + runtimeOnly "org.elasticsearch.gradle:reaper:$version" testImplementation buildLibs.checkstyle testImplementation buildLibs.wiremock diff --git a/build-tools-internal/src/main/groovy/elasticsearch.fips.gradle b/build-tools-internal/src/main/groovy/elasticsearch.fips.gradle index f691d4bd996a7..aaae18401685a 100644 --- a/build-tools-internal/src/main/groovy/elasticsearch.fips.gradle +++ b/build-tools-internal/src/main/groovy/elasticsearch.fips.gradle @@ -79,6 +79,7 @@ if (BuildParams.inFipsJvm) { // with no x-pack. Tests having security explicitly enabled/disabled will override this setting setting 'xpack.security.enabled', 'false' setting 'xpack.security.fips_mode.enabled', 'true' + setting 'xpack.security.fips_mode.required_providers', '["BCFIPS", "BCJSSE"]' setting 'xpack.license.self_generated.type', 'trial' setting 'xpack.security.authc.password_hashing.algorithm', 'pbkdf2_stretch' keystorePassword 'keystore-password' diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java index bad3ebb11a0dd..f0604ab33ceec 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchBuildCompletePlugin.java @@ -134,7 +134,7 @@ public void execute(BuildFinishedFlowAction.Parameters parameters) throws FileNo } uploadFile.getParentFile().mkdirs(); createBuildArchiveTar(parameters.getFilteredFiles().get(), parameters.getProjectDir().get(), uploadFile); - if (uploadFile.exists() && System.getenv("BUILDKITE").equals("true")) { + if (uploadFile.exists() && "true".equals(System.getenv("BUILDKITE"))) { String uploadFilePath = "build/" + uploadFile.getName(); try { System.out.println("Uploading buildkite artifact: " + uploadFilePath + "..."); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java index f1804064b7e07..31b62c4ac700f 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java @@ -108,10 +108,7 @@ public void execute(Task t) { "--add-opens=java.base/java.nio.file=ALL-UNNAMED", "--add-opens=java.base/java.time=ALL-UNNAMED", "--add-opens=java.management/java.lang.management=ALL-UNNAMED", - "-XX:+HeapDumpOnOutOfMemoryError", - // REMOVE once bumped to a JDK greater than 21.0.1, https://github.com/elastic/elasticsearch/issues/103004 - "-XX:CompileCommand=exclude,org.apache.lucene.util.MSBRadixSorter::computeCommonPrefixLengthAndBuildHistogram", - "-XX:CompileCommand=exclude,org.apache.lucene.util.RadixSelector::computeCommonPrefixLengthAndBuildHistogram" + "-XX:+HeapDumpOnOutOfMemoryError" ); test.getJvmArgumentProviders().add(new SimpleCommandLineArgumentProvider("-XX:HeapDumpPath=" + heapdumpDir)); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/RestrictedBuildApiService.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/RestrictedBuildApiService.java index 8042bdd64dabb..23afcab7bec7c 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/RestrictedBuildApiService.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/RestrictedBuildApiService.java @@ -123,10 +123,6 @@ private static ListMultimap, String> createLegacyRestTestBasePluginUsag map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:eql:qa:ccs-rolling-upgrade"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:eql:qa:correctness"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:eql:qa:mixed-node"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:esql:qa:security"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:esql:qa:server:heap-attack"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:esql:qa:server:multi-node"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:esql:qa:server:single-node"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:fleet:qa:rest"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:identity-provider:qa:idp-rest-tests"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:ilm:qa:multi-cluster"); @@ -166,7 +162,6 @@ private static ListMultimap, String> createLegacyRestTestBasePluginUsag map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:vector-tile:qa:multi-cluster"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:watcher:qa:rest"); map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:watcher:qa:with-security"); - map.put(LegacyRestTestBasePlugin.class, ":x-pack:plugin:esql:qa:server:mixed-cluster"); return map; } diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/packer/CacheTestFixtureResourcesPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/packer/CacheTestFixtureResourcesPlugin.java index f8ab8eef1004c..c8ce9d5ca2c71 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/packer/CacheTestFixtureResourcesPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/packer/CacheTestFixtureResourcesPlugin.java @@ -12,6 +12,7 @@ import org.elasticsearch.gradle.internal.ResolveAllDependencies; import org.gradle.api.Plugin; import org.gradle.api.Project; +import org.gradle.api.artifacts.Dependency; import org.gradle.api.artifacts.dsl.DependencyHandler; import org.gradle.api.plugins.JavaPlugin; import org.gradle.api.plugins.JavaPluginExtension; @@ -26,9 +27,12 @@ public void apply(Project project) { var cacheTestFixturesConfiguration = project.getConfigurations().create(CACHE_TEST_FIXTURES); cacheTestFixturesConfiguration.defaultDependencies(deps -> { DependencyHandler dependencyHandler = project.getDependencies(); - deps.add(dependencyHandler.create("org.reflections:reflections:" + VersionProperties.getVersions().get("reflections"))); - deps.add(dependencyHandler.create("org.javassist:javassist:" + VersionProperties.getVersions().get("javassist"))); + Dependency reflections = dependencyHandler.create( + "org.reflections:reflections:" + VersionProperties.getVersions().get("reflections") + ); + deps.add(reflections); }); + project.getPlugins().withType(JavaPlugin.class, javaPlugin -> { var cacheTestFixtures = project.getTasks().register(CACHE_TEST_FIXTURES, CacheCacheableTestFixtures.class, (t) -> { var testSourceSet = project.getExtensions() diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java index f9f831439f2ca..6c978edd48c29 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/ReleaseToolsPlugin.java @@ -47,6 +47,9 @@ public void apply(Project project) { final Version version = VersionProperties.getElasticsearchVersion(); + project.getTasks() + .register("updateVersions", UpdateVersionsTask.class, t -> project.getTasks().named("spotlessApply").get().mustRunAfter(t)); + final FileTree yamlFiles = projectDirectory.dir("docs/changelog") .getAsFileTree() .matching(new PatternSet().include("**/*.yml", "**/*.yaml")); diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/UpdateVersionsTask.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/UpdateVersionsTask.java new file mode 100644 index 0000000000000..f8073f384b871 --- /dev/null +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/release/UpdateVersionsTask.java @@ -0,0 +1,215 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.release; + +import com.github.javaparser.StaticJavaParser; +import com.github.javaparser.ast.CompilationUnit; +import com.github.javaparser.ast.NodeList; +import com.github.javaparser.ast.body.ClassOrInterfaceDeclaration; +import com.github.javaparser.ast.body.FieldDeclaration; +import com.github.javaparser.ast.body.VariableDeclarator; +import com.github.javaparser.ast.expr.NameExpr; +import com.github.javaparser.printer.lexicalpreservation.LexicalPreservingPrinter; +import com.google.common.annotations.VisibleForTesting; + +import org.elasticsearch.gradle.Version; +import org.gradle.api.DefaultTask; +import org.gradle.api.logging.Logger; +import org.gradle.api.logging.Logging; +import org.gradle.api.tasks.TaskAction; +import org.gradle.api.tasks.options.Option; +import org.gradle.initialization.layout.BuildLayout; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.Map; +import java.util.NavigableMap; +import java.util.Objects; +import java.util.Optional; +import java.util.TreeMap; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; + +import javax.annotation.Nullable; +import javax.inject.Inject; + +public class UpdateVersionsTask extends DefaultTask { + private static final Logger LOGGER = Logging.getLogger(UpdateVersionsTask.class); + + static final String SERVER_MODULE_PATH = "server/src/main/java/"; + static final String VERSION_FILE_PATH = SERVER_MODULE_PATH + "org/elasticsearch/Version.java"; + + static final Pattern VERSION_FIELD = Pattern.compile("V_(\\d+)_(\\d+)_(\\d+)(?:_(\\w+))?"); + + final Path rootDir; + + @Nullable + private Version addVersion; + private boolean setCurrent; + @Nullable + private Version removeVersion; + + @Inject + public UpdateVersionsTask(BuildLayout layout) { + rootDir = layout.getRootDirectory().toPath(); + } + + @Option(option = "add-version", description = "Specifies the version to add") + public void addVersion(String version) { + this.addVersion = Version.fromString(version); + } + + @Option(option = "set-current", description = "Set the 'current' constant to the new version") + public void setCurrent(boolean setCurrent) { + this.setCurrent = setCurrent; + } + + @Option(option = "remove-version", description = "Specifies the version to remove") + public void removeVersion(String version) { + this.removeVersion = Version.fromString(version); + } + + static String toVersionField(Version version) { + return String.format("V_%d_%d_%d", version.getMajor(), version.getMinor(), version.getRevision()); + } + + static Optional parseVersionField(CharSequence field) { + Matcher m = VERSION_FIELD.matcher(field); + if (m.find() == false) return Optional.empty(); + + return Optional.of( + new Version(Integer.parseInt(m.group(1)), Integer.parseInt(m.group(2)), Integer.parseInt(m.group(3)), m.group(4)) + ); + } + + @TaskAction + public void executeTask() throws IOException { + if (addVersion == null && removeVersion == null) { + throw new IllegalArgumentException("No versions to add or remove specified"); + } + if (setCurrent && addVersion == null) { + throw new IllegalArgumentException("No new version added to set as the current version"); + } + if (Objects.equals(addVersion, removeVersion)) { + throw new IllegalArgumentException("Same version specified to add and remove"); + } + + Path versionJava = rootDir.resolve(VERSION_FILE_PATH); + CompilationUnit file = LexicalPreservingPrinter.setup(StaticJavaParser.parse(versionJava)); + + Optional modifiedFile = Optional.empty(); + if (addVersion != null) { + LOGGER.lifecycle("Adding new version [{}] to [{}]", addVersion, versionJava); + var added = addVersionConstant(modifiedFile.orElse(file), addVersion, setCurrent); + if (added.isPresent()) { + modifiedFile = added; + } + } + if (removeVersion != null) { + LOGGER.lifecycle("Removing version [{}] from [{}]", removeVersion, versionJava); + var removed = removeVersionConstant(modifiedFile.orElse(file), removeVersion); + if (removed.isPresent()) { + modifiedFile = removed; + } + } + + if (modifiedFile.isPresent()) { + writeOutNewContents(versionJava, modifiedFile.get()); + } + } + + @VisibleForTesting + static Optional addVersionConstant(CompilationUnit versionJava, Version version, boolean updateCurrent) { + String newFieldName = toVersionField(version); + + ClassOrInterfaceDeclaration versionClass = versionJava.getClassByName("Version").get(); + if (versionClass.getFieldByName(newFieldName).isPresent()) { + LOGGER.lifecycle("New version constant [{}] already present, skipping", newFieldName); + return Optional.empty(); + } + + NavigableMap versions = versionClass.getFields() + .stream() + .map(f -> Map.entry(f, parseVersionField(f.getVariable(0).getNameAsString()))) + .filter(e -> e.getValue().isPresent()) + .collect(Collectors.toMap(e -> e.getValue().get(), Map.Entry::getKey, (v1, v2) -> { + throw new IllegalArgumentException("Duplicate version constants " + v1); + }, TreeMap::new)); + + // find the version this should be inserted after + var previousVersion = versions.lowerEntry(version); + if (previousVersion == null) { + throw new IllegalStateException(String.format("Could not find previous version to [%s]", version)); + } + FieldDeclaration newVersion = createNewVersionConstant( + previousVersion.getValue(), + newFieldName, + String.format("%d_%02d_%02d_99", version.getMajor(), version.getMinor(), version.getRevision()) + ); + versionClass.getMembers().addAfter(newVersion, previousVersion.getValue()); + + if (updateCurrent) { + versionClass.getFieldByName("CURRENT") + .orElseThrow(() -> new IllegalArgumentException("Could not find CURRENT constant")) + .getVariable(0) + .setInitializer(new NameExpr(newFieldName)); + } + + return Optional.of(versionJava); + } + + private static FieldDeclaration createNewVersionConstant(FieldDeclaration lastVersion, String newName, String newExpr) { + return new FieldDeclaration( + new NodeList<>(lastVersion.getModifiers()), + new VariableDeclarator( + lastVersion.getCommonType(), + newName, + StaticJavaParser.parseExpression(String.format("new Version(%s)", newExpr)) + ) + ); + } + + @VisibleForTesting + static Optional removeVersionConstant(CompilationUnit versionJava, Version version) { + String removeFieldName = toVersionField(version); + + ClassOrInterfaceDeclaration versionClass = versionJava.getClassByName("Version").get(); + var declaration = versionClass.getFieldByName(removeFieldName); + if (declaration.isEmpty()) { + LOGGER.lifecycle("Version constant [{}] not found, skipping", removeFieldName); + return Optional.empty(); + } + + // check if this is referenced by CURRENT + String currentReference = versionClass.getFieldByName("CURRENT") + .orElseThrow(() -> new IllegalArgumentException("Could not find CURRENT constant")) + .getVariable(0) + .getInitializer() + .get() + .asNameExpr() + .getNameAsString(); + if (currentReference.equals(removeFieldName)) { + throw new IllegalArgumentException(String.format("Cannot remove version [%s], it is referenced by CURRENT", version)); + } + + declaration.get().remove(); + + return Optional.of(versionJava); + } + + static void writeOutNewContents(Path file, CompilationUnit unit) throws IOException { + if (unit.containsData(LexicalPreservingPrinter.NODE_TEXT_DATA) == false) { + throw new IllegalArgumentException("CompilationUnit has no lexical information for output"); + } + Files.writeString(file, LexicalPreservingPrinter.print(unit), StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); + } +} diff --git a/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/UpdateVersionsTaskTests.java b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/UpdateVersionsTaskTests.java new file mode 100644 index 0000000000000..97441990d47c2 --- /dev/null +++ b/build-tools-internal/src/test/java/org/elasticsearch/gradle/internal/release/UpdateVersionsTaskTests.java @@ -0,0 +1,244 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.gradle.internal.release; + +import com.github.javaparser.StaticJavaParser; +import com.github.javaparser.ast.CompilationUnit; +import com.github.javaparser.ast.Node; +import com.github.javaparser.ast.body.FieldDeclaration; +import com.github.javaparser.printer.lexicalpreservation.LexicalPreservingPrinter; + +import org.elasticsearch.gradle.Version; +import org.junit.Test; + +import java.io.StringWriter; +import java.nio.file.Path; +import java.util.List; +import java.util.Optional; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.hasToString; +import static org.hamcrest.Matchers.is; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThrows; + +public class UpdateVersionsTaskTests { + + @Test + public void addVersion_versionExists() { + final String versionJava = """ + public class Version { + public static final Version V_8_10_0 = new Version(8_10_00_99); + public static final Version V_8_10_1 = new Version(8_10_01_99); + public static final Version V_8_11_0 = new Version(8_11_00_99); + public static final Version CURRENT = V_8_11_0; + }"""; + + CompilationUnit unit = StaticJavaParser.parse(versionJava); + + var newUnit = UpdateVersionsTask.addVersionConstant(unit, Version.fromString("8.10.1"), false); + assertThat(newUnit.isPresent(), is(false)); + } + + @Test + public void addVersion_oldVersion() { + final String versionJava = """ + public class Version { + public static final Version V_8_10_0 = new Version(8_10_00_99); + public static final Version V_8_10_1 = new Version(8_10_01_99); + public static final Version V_8_11_0 = new Version(8_11_00_99); + public static final Version CURRENT = V_8_11_0; + }"""; + final String updatedVersionJava = """ + public class Version { + + public static final Version V_8_10_0 = new Version(8_10_00_99); + + public static final Version V_8_10_1 = new Version(8_10_01_99); + + public static final Version V_8_10_2 = new Version(8_10_02_99); + + public static final Version V_8_11_0 = new Version(8_11_00_99); + + public static final Version CURRENT = V_8_11_0; + } + """; + + CompilationUnit unit = StaticJavaParser.parse(versionJava); + + UpdateVersionsTask.addVersionConstant(unit, Version.fromString("8.10.2"), false); + + assertThat(unit, hasToString(updatedVersionJava)); + } + + @Test + public void addVersion_newVersion_current() { + final String versionJava = """ + public class Version { + public static final Version V_8_10_0 = new Version(8_10_00_99); + public static final Version V_8_10_1 = new Version(8_10_01_99); + public static final Version V_8_11_0 = new Version(8_11_00_99); + public static final Version CURRENT = V_8_11_0; + }"""; + final String updatedVersionJava = """ + public class Version { + + public static final Version V_8_10_0 = new Version(8_10_00_99); + + public static final Version V_8_10_1 = new Version(8_10_01_99); + + public static final Version V_8_11_0 = new Version(8_11_00_99); + + public static final Version V_8_11_1 = new Version(8_11_01_99); + + public static final Version CURRENT = V_8_11_1; + } + """; + + CompilationUnit unit = StaticJavaParser.parse(versionJava); + + UpdateVersionsTask.addVersionConstant(unit, Version.fromString("8.11.1"), true); + + assertThat(unit, hasToString(updatedVersionJava)); + } + + @Test + public void removeVersion_versionDoesntExist() { + final String versionJava = """ + public class Version { + public static final Version V_8_10_0 = new Version(8_10_00_99); + public static final Version V_8_10_1 = new Version(8_10_01_99); + public static final Version V_8_11_0 = new Version(8_11_00_99); + public static final Version CURRENT = V_8_11_0; + }"""; + + CompilationUnit unit = StaticJavaParser.parse(versionJava); + + var newUnit = UpdateVersionsTask.removeVersionConstant(unit, Version.fromString("8.10.2")); + assertThat(newUnit.isPresent(), is(false)); + } + + @Test + public void removeVersion_versionIsCurrent() { + final String versionJava = """ + public class Version { + public static final Version V_8_10_0 = new Version(8_10_00_99); + public static final Version V_8_10_1 = new Version(8_10_01_99); + public static final Version V_8_11_0 = new Version(8_11_00_99); + public static final Version CURRENT = V_8_11_0; + }"""; + + CompilationUnit unit = StaticJavaParser.parse(versionJava); + + var ex = assertThrows( + IllegalArgumentException.class, + () -> UpdateVersionsTask.removeVersionConstant(unit, Version.fromString("8.11.0")) + ); + assertThat(ex.getMessage(), equalTo("Cannot remove version [8.11.0], it is referenced by CURRENT")); + } + + @Test + public void removeVersion() { + final String versionJava = """ + public class Version { + public static final Version V_8_10_0 = new Version(8_10_00_99); + public static final Version V_8_10_1 = new Version(8_10_01_99); + public static final Version V_8_11_0 = new Version(8_11_00_99); + public static final Version CURRENT = V_8_11_0; + }"""; + final String updatedVersionJava = """ + public class Version { + + public static final Version V_8_10_0 = new Version(8_10_00_99); + + public static final Version V_8_11_0 = new Version(8_11_00_99); + + public static final Version CURRENT = V_8_11_0; + } + """; + + CompilationUnit unit = StaticJavaParser.parse(versionJava); + + UpdateVersionsTask.removeVersionConstant(unit, Version.fromString("8.10.1")); + + assertThat(unit, hasToString(updatedVersionJava)); + } + + @Test + public void updateVersionFile_addsCorrectly() throws Exception { + Version newVersion = new Version(50, 10, 20); + String versionField = UpdateVersionsTask.toVersionField(newVersion); + + Path versionFile = Path.of("..", UpdateVersionsTask.VERSION_FILE_PATH); + CompilationUnit unit = LexicalPreservingPrinter.setup(StaticJavaParser.parse(versionFile)); + assertFalse("Test version already exists in the file", findFirstField(unit, versionField).isPresent()); + + List existingFields = unit.findAll(FieldDeclaration.class); + + var result = UpdateVersionsTask.addVersionConstant(unit, newVersion, true); + assertThat(result.isPresent(), is(true)); + + // write out & parse back in again + StringWriter writer = new StringWriter(); + LexicalPreservingPrinter.print(unit, writer); + unit = StaticJavaParser.parse(writer.toString()); + + // a field has been added + assertThat(unit.findAll(FieldDeclaration.class), hasSize(existingFields.size() + 1)); + // the field has the right name + var field = findFirstField(unit, versionField); + assertThat(field.isPresent(), is(true)); + // the field has the right constant + assertThat( + field.get().getVariable(0).getInitializer().get(), + hasToString( + String.format("new Version(%d_%02d_%02d_99)", newVersion.getMajor(), newVersion.getMinor(), newVersion.getRevision()) + ) + ); + // and CURRENT has been updated + var current = findFirstField(unit, "CURRENT"); + assertThat(current.get().getVariable(0).getInitializer().get(), hasToString(versionField)); + } + + @Test + public void updateVersionFile_removesCorrectly() throws Exception { + Path versionFile = Path.of("..", UpdateVersionsTask.VERSION_FILE_PATH); + CompilationUnit unit = LexicalPreservingPrinter.setup(StaticJavaParser.parse(versionFile)); + + List existingFields = unit.findAll(FieldDeclaration.class); + + var staticVersionFields = unit.findAll( + FieldDeclaration.class, + f -> f.isStatic() && f.getVariable(0).getTypeAsString().equals("Version") + ); + // remove the last-but-two static version field (skip CURRENT and the latest version) + String constant = staticVersionFields.get(staticVersionFields.size() - 3).getVariable(0).getNameAsString(); + + Version versionToRemove = UpdateVersionsTask.parseVersionField(constant).orElseThrow(AssertionError::new); + var result = UpdateVersionsTask.removeVersionConstant(unit, versionToRemove); + assertThat(result.isPresent(), is(true)); + + // write out & parse back in again + StringWriter writer = new StringWriter(); + LexicalPreservingPrinter.print(unit, writer); + unit = StaticJavaParser.parse(writer.toString()); + + // a field has been removed + assertThat(unit.findAll(FieldDeclaration.class), hasSize(existingFields.size() - 1)); + // the removed field does not exist + var field = findFirstField(unit, constant); + assertThat(field.isPresent(), is(false)); + } + + private static Optional findFirstField(Node node, String name) { + return node.findFirst(FieldDeclaration.class, f -> f.getVariable(0).getName().getIdentifier().equals(name)); + } +} diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties index c34bdc95046b3..54bc80e0c08c2 100644 --- a/build-tools-internal/version.properties +++ b/build-tools-internal/version.properties @@ -1,5 +1,5 @@ elasticsearch = 8.13.0 -lucene = 9.9.0-snapshot-bb4fec631e6 +lucene = 9.9.1 bundled_jdk_vendor = openjdk bundled_jdk = 21.0.1+12@415e3f918a1f4062a0074a2794853d0d @@ -48,8 +48,7 @@ ductTape = 1.0.8 commonsCompress = 1.24.0 # packer caching build logic -reflections = 0.9.12 -javassist = 3.28.0-GA +reflections = 0.10.2 # benchmark dependencies jmh = 1.26 diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/RestNoopSearchAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/RestNoopSearchAction.java index b39007c3a3691..0e91a063596e3 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/RestNoopSearchAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/RestNoopSearchAction.java @@ -12,7 +12,7 @@ import org.elasticsearch.plugin.noop.NoopPlugin; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.util.List; @@ -39,6 +39,10 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) { SearchRequest searchRequest = new SearchRequest(); - return channel -> client.execute(NoopPlugin.NOOP_SEARCH_ACTION, searchRequest, new RestChunkedToXContentListener<>(channel)); + return channel -> client.execute( + NoopPlugin.NOOP_SEARCH_ACTION, + searchRequest, + new RestRefCountedChunkedToXContentListener<>(channel) + ); } } diff --git a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/TransportNoopSearchAction.java b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/TransportNoopSearchAction.java index baefb15e6373a..193a4ca818035 100644 --- a/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/TransportNoopSearchAction.java +++ b/client/client-benchmark-noop-api-plugin/src/main/java/org/elasticsearch/plugin/noop/action/search/TransportNoopSearchAction.java @@ -21,7 +21,6 @@ import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.profile.SearchProfileResults; import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.tasks.Task; @@ -45,15 +44,13 @@ public TransportNoopSearchAction(TransportService transportService, ActionFilter protected void doExecute(Task task, SearchRequest request, ActionListener listener) { listener.onResponse( new SearchResponse( - new InternalSearchResponse( - new SearchHits(new SearchHit[0], new TotalHits(0L, TotalHits.Relation.EQUAL_TO), 0.0f), - InternalAggregations.EMPTY, - new Suggest(Collections.emptyList()), - new SearchProfileResults(Collections.emptyMap()), - false, - false, - 1 - ), + new SearchHits(new SearchHit[0], new TotalHits(0L, TotalHits.Relation.EQUAL_TO), 0.0f), + InternalAggregations.EMPTY, + new Suggest(Collections.emptyList()), + false, + false, + new SearchProfileResults(Collections.emptyMap()), + 1, "", 1, 1, diff --git a/distribution/src/config/jvm.options b/distribution/src/config/jvm.options index 9e26582d58439..c5e905f461f45 100644 --- a/distribution/src/config/jvm.options +++ b/distribution/src/config/jvm.options @@ -58,10 +58,6 @@ # result in less optimal vector performance 20-:--add-modules=jdk.incubator.vector -# REMOVE once bumped to a JDK greater than 21.0.1, https://github.com/elastic/elasticsearch/issues/103004 -19-21:-XX:CompileCommand=exclude,org.apache.lucene.util.MSBRadixSorter::computeCommonPrefixLengthAndBuildHistogram -19-21:-XX:CompileCommand=exclude,org.apache.lucene.util.RadixSelector::computeCommonPrefixLengthAndBuildHistogram - ## heap dumps # generate a heap dump when an allocation from the Java heap fails; heap dumps diff --git a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/PluginsConfig.java b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/PluginsConfig.java index 8edd5f701706c..168e5ba3806f3 100644 --- a/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/PluginsConfig.java +++ b/distribution/tools/plugin-cli/src/main/java/org/elasticsearch/plugins/cli/PluginsConfig.java @@ -160,12 +160,11 @@ static PluginsConfig parseConfig(Path configPath, XContent xContent) throws IOEx parser.declareStringOrNull(PluginsConfig::setProxy, new ParseField("proxy")); parser.declareObjectArrayOrNull(PluginsConfig::setPlugins, descriptorParser, new ParseField("plugins")); - final XContentParser yamlXContentParser = xContent.createParser( - XContentParserConfiguration.EMPTY, - Files.newInputStream(configPath) - ); - - return parser.parse(yamlXContentParser, null); + try ( + XContentParser yamlXContentParser = xContent.createParser(XContentParserConfiguration.EMPTY, Files.newInputStream(configPath)) + ) { + return parser.parse(yamlXContentParser, null); + } } /** diff --git a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/MachineDependentHeap.java b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/MachineDependentHeap.java index a30f3115be5c9..87c4883ca3073 100644 --- a/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/MachineDependentHeap.java +++ b/distribution/tools/server-cli/src/main/java/org/elasticsearch/server/cli/MachineDependentHeap.java @@ -86,8 +86,7 @@ static class NodeRoleParser { @SuppressWarnings("unchecked") public static MachineNodeRole parse(InputStream config) { final Settings settings; - try { - var parser = YamlXContent.yamlXContent.createParser(XContentParserConfiguration.EMPTY, config); + try (var parser = YamlXContent.yamlXContent.createParser(XContentParserConfiguration.EMPTY, config)) { if (parser.currentToken() == null && parser.nextToken() == null) { settings = null; } else { diff --git a/docs/Versions.asciidoc b/docs/Versions.asciidoc index 3f44db9928434..420ee36359745 100644 --- a/docs/Versions.asciidoc +++ b/docs/Versions.asciidoc @@ -1,8 +1,8 @@ include::{docs-root}/shared/versions/stack/{source_branch}.asciidoc[] -:lucene_version: 9.9.0 -:lucene_version_path: 9_9_0 +:lucene_version: 9.9.1 +:lucene_version_path: 9_9_1 :jdk: 11.0.2 :jdk_major: 11 :build_type: tar diff --git a/docs/changelog/101640.yaml b/docs/changelog/101640.yaml new file mode 100644 index 0000000000000..6f61a3a3ffd84 --- /dev/null +++ b/docs/changelog/101640.yaml @@ -0,0 +1,5 @@ +pr: 101640 +summary: Support cross clusters query in ESQL +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/101717.yaml b/docs/changelog/101717.yaml new file mode 100644 index 0000000000000..7e97ef1049f88 --- /dev/null +++ b/docs/changelog/101717.yaml @@ -0,0 +1,5 @@ +pr: 101717 +summary: Pause shard snapshots on graceful shutdown +area: Snapshot/Restore +type: enhancement +issues: [] diff --git a/docs/changelog/102557.yaml b/docs/changelog/102557.yaml new file mode 100644 index 0000000000000..dfca1763064d4 --- /dev/null +++ b/docs/changelog/102557.yaml @@ -0,0 +1,5 @@ +pr: 102557 +summary: Metrics for search latencies +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/102584.yaml b/docs/changelog/102584.yaml new file mode 100644 index 0000000000000..44ff5dd9f7461 --- /dev/null +++ b/docs/changelog/102584.yaml @@ -0,0 +1,5 @@ +pr: 102584 +summary: Expose some ML metrics via APM +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/102798.yaml b/docs/changelog/102798.yaml new file mode 100644 index 0000000000000..986ad99f96a19 --- /dev/null +++ b/docs/changelog/102798.yaml @@ -0,0 +1,5 @@ +pr: 102798 +summary: Hot-reloadable remote cluster credentials +area: Security +type: enhancement +issues: [] diff --git a/docs/changelog/102824.yaml b/docs/changelog/102824.yaml new file mode 100644 index 0000000000000..21b39a4c3999d --- /dev/null +++ b/docs/changelog/102824.yaml @@ -0,0 +1,5 @@ +pr: 102824 +summary: Change detection aggregation improvements +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/103032.yaml b/docs/changelog/103032.yaml new file mode 100644 index 0000000000000..81d84fca0bdb0 --- /dev/null +++ b/docs/changelog/103032.yaml @@ -0,0 +1,5 @@ +pr: 103032 +summary: "x-pack/plugin/apm-data: Map some APM fields as flattened and fix error.grouping_name script" +area: Data streams +type: enhancement +issues: [] diff --git a/docs/changelog/103112.yaml b/docs/changelog/103112.yaml deleted file mode 100644 index dcb4cf604c179..0000000000000 --- a/docs/changelog/103112.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 103112 -summary: Add JIT compiler excludes for `computeCommonPrefixLengthAndBuildHistogram` -area: Search -type: bug -issues: [] diff --git a/docs/changelog/103150.yaml b/docs/changelog/103150.yaml new file mode 100644 index 0000000000000..3f42c882d89fb --- /dev/null +++ b/docs/changelog/103150.yaml @@ -0,0 +1,6 @@ +pr: 103150 +summary: "ES|QL: Fix NPE on single value detection" +area: ES|QL +type: bug +issues: + - 103141 diff --git a/docs/changelog/103325.yaml b/docs/changelog/103325.yaml new file mode 100644 index 0000000000000..7de6c41986490 --- /dev/null +++ b/docs/changelog/103325.yaml @@ -0,0 +1,6 @@ +pr: 103325 +summary: Added Duplicate Word Check Feature to Analysis Nori +area: Search +type: feature +issues: + - 103321 diff --git a/docs/changelog/103340.yaml b/docs/changelog/103340.yaml new file mode 100644 index 0000000000000..21280dbfc857d --- /dev/null +++ b/docs/changelog/103340.yaml @@ -0,0 +1,5 @@ +pr: 103340 +summary: Avoid humongous blocks +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/103387.yaml b/docs/changelog/103387.yaml new file mode 100644 index 0000000000000..77239fb9a3778 --- /dev/null +++ b/docs/changelog/103387.yaml @@ -0,0 +1,5 @@ +pr: 103387 +summary: Upgrade to Lucene 9.9.1 +area: Search +type: upgrade +issues: [] diff --git a/docs/changelog/103398.yaml b/docs/changelog/103398.yaml new file mode 100644 index 0000000000000..69452616ddc99 --- /dev/null +++ b/docs/changelog/103398.yaml @@ -0,0 +1,5 @@ +pr: 103398 +summary: ES|QL Async Query API +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/103434.yaml b/docs/changelog/103434.yaml new file mode 100644 index 0000000000000..56af604fe08f7 --- /dev/null +++ b/docs/changelog/103434.yaml @@ -0,0 +1,11 @@ +pr: 103434 +summary: Lower the `look_ahead_time` index setting's max value from 7 days to 2 hours. +area: TSDB +type: breaking +issues: [] +breaking: + title: Lower the `look_ahead_time` index setting's max value + area: Index setting + details: "Lower the `look_ahead_time` index setting's max value from 7 days to 2 hours." + impact: "Any value between 2 hours and 7 days will be as a look ahead time of 2 hours is defined" + notable: false diff --git a/docs/changelog/103461.yaml b/docs/changelog/103461.yaml new file mode 100644 index 0000000000000..3a1bf30aa90c9 --- /dev/null +++ b/docs/changelog/103461.yaml @@ -0,0 +1,5 @@ +pr: 103461 +summary: Add support for Well Known Binary (WKB) in the fields API for spatial fields +area: Geo +type: enhancement +issues: [] diff --git a/docs/changelog/103508.yaml b/docs/changelog/103508.yaml new file mode 100644 index 0000000000000..9c6f79ef75657 --- /dev/null +++ b/docs/changelog/103508.yaml @@ -0,0 +1,5 @@ +pr: 103508 +summary: "[Connectors API] Fix `ClassCastException` when creating a new sync job" +area: Application +type: bug +issues: [] diff --git a/docs/changelog/103520.yaml b/docs/changelog/103520.yaml new file mode 100644 index 0000000000000..0ef7124eb1ed2 --- /dev/null +++ b/docs/changelog/103520.yaml @@ -0,0 +1,5 @@ +pr: 103520 +summary: Request indexing memory pressure in APM node metrics publisher +area: Distributed +type: bug +issues: [] diff --git a/docs/changelog/103530.yaml b/docs/changelog/103530.yaml new file mode 100644 index 0000000000000..6feb04467b03e --- /dev/null +++ b/docs/changelog/103530.yaml @@ -0,0 +1,5 @@ +pr: 103530 +summary: Exclude quantiles when fetching model snapshots where possible +area: Machine Learning +type: bug +issues: [] diff --git a/docs/changelog/103538.yaml b/docs/changelog/103538.yaml new file mode 100644 index 0000000000000..5aaed771d5ee4 --- /dev/null +++ b/docs/changelog/103538.yaml @@ -0,0 +1,6 @@ +pr: 103538 +summary: "ESQL: Improve pushdown of certain filters" +area: ES|QL +type: bug +issues: + - 103536 diff --git a/docs/changelog/103546.yaml b/docs/changelog/103546.yaml new file mode 100644 index 0000000000000..08584e8555bd4 --- /dev/null +++ b/docs/changelog/103546.yaml @@ -0,0 +1,5 @@ +pr: 103546 +summary: Handle timeout on standalone rewrite calls +area: Search +type: bug +issues: [] diff --git a/docs/changelog/103555.yaml b/docs/changelog/103555.yaml new file mode 100644 index 0000000000000..2b0dc2692e252 --- /dev/null +++ b/docs/changelog/103555.yaml @@ -0,0 +1,6 @@ +pr: 103555 +summary: "[Security Solution] Allow write permission for `kibana_system` role on endpoint\ + \ response index" +area: Authorization +type: enhancement +issues: [] diff --git a/docs/changelog/103574.yaml b/docs/changelog/103574.yaml new file mode 100644 index 0000000000000..ed6ad237f49a2 --- /dev/null +++ b/docs/changelog/103574.yaml @@ -0,0 +1,5 @@ +pr: 103574 +summary: Samples should check if the aggregations result is empty or null +area: EQL +type: bug +issues: [] diff --git a/docs/changelog/103580.yaml b/docs/changelog/103580.yaml new file mode 100644 index 0000000000000..6fd0328017d1f --- /dev/null +++ b/docs/changelog/103580.yaml @@ -0,0 +1,6 @@ +pr: 103580 +summary: Copy counter field properties to downsampled index +area: Downsampling +type: bug +issues: + - 103569 diff --git a/docs/changelog/103591.yaml b/docs/changelog/103591.yaml new file mode 100644 index 0000000000000..41b6e362c5713 --- /dev/null +++ b/docs/changelog/103591.yaml @@ -0,0 +1,6 @@ +pr: 103591 +summary: Wait for the model results on graceful shutdown +area: Machine Learning +type: bug +issues: + - 103414 diff --git a/docs/changelog/103592.yaml b/docs/changelog/103592.yaml new file mode 100644 index 0000000000000..21e06f1f5a10d --- /dev/null +++ b/docs/changelog/103592.yaml @@ -0,0 +1,5 @@ +pr: 103592 +summary: Remove deprecated Block APIs +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/103601.yaml b/docs/changelog/103601.yaml new file mode 100644 index 0000000000000..bf7aaaf835e00 --- /dev/null +++ b/docs/changelog/103601.yaml @@ -0,0 +1,7 @@ +pr: 103601 +summary: Introduce Elasticsearch `PostingFormat` based on Lucene 90 positing format + using PFOR +area: Search +type: bug +issues: + - 103002 diff --git a/docs/changelog/103610.yaml b/docs/changelog/103610.yaml new file mode 100644 index 0000000000000..1ed38cc2822bd --- /dev/null +++ b/docs/changelog/103610.yaml @@ -0,0 +1,6 @@ +pr: 103610 +summary: "ESQL: allow `null` in date math" +area: ES|QL +type: bug +issues: + - 103085 diff --git a/docs/changelog/103611.yaml b/docs/changelog/103611.yaml new file mode 100644 index 0000000000000..51c77cd286d66 --- /dev/null +++ b/docs/changelog/103611.yaml @@ -0,0 +1,6 @@ +pr: 103611 +summary: Fix NPE on missing event queries +area: EQL +type: bug +issues: + - 103608 diff --git a/docs/changelog/103615.yaml b/docs/changelog/103615.yaml new file mode 100644 index 0000000000000..69498c749687f --- /dev/null +++ b/docs/changelog/103615.yaml @@ -0,0 +1,5 @@ +pr: 103615 +summary: Fix downsample api by returning a failure in case one or more downsample persistent tasks failed +area: Downsampling +type: bug +issues: [] diff --git a/docs/changelog/103628.yaml b/docs/changelog/103628.yaml new file mode 100644 index 0000000000000..42259c7bcde46 --- /dev/null +++ b/docs/changelog/103628.yaml @@ -0,0 +1,5 @@ +pr: 103628 +summary: Add ES|QL async delete API +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/103633.yaml b/docs/changelog/103633.yaml new file mode 100644 index 0000000000000..9e36451caafd8 --- /dev/null +++ b/docs/changelog/103633.yaml @@ -0,0 +1,5 @@ +pr: 103633 +summary: Update s3 latency metric to use micros +area: Search +type: enhancement +issues: [] diff --git a/docs/changelog/103643.yaml b/docs/changelog/103643.yaml new file mode 100644 index 0000000000000..966fb57acf566 --- /dev/null +++ b/docs/changelog/103643.yaml @@ -0,0 +1,5 @@ +pr: 103643 +summary: "[Profiling] Use shard request cache consistently" +area: Application +type: enhancement +issues: [] diff --git a/docs/changelog/103646.yaml b/docs/changelog/103646.yaml new file mode 100644 index 0000000000000..b7a6fae025771 --- /dev/null +++ b/docs/changelog/103646.yaml @@ -0,0 +1,5 @@ +pr: 103646 +summary: Add index mapping parameter for `counted_keyword` +area: Aggregations +type: enhancement +issues: [] diff --git a/docs/changelog/103669.yaml b/docs/changelog/103669.yaml new file mode 100644 index 0000000000000..57361b9d842e4 --- /dev/null +++ b/docs/changelog/103669.yaml @@ -0,0 +1,5 @@ +pr: 103669 +summary: Validate inference model ids +area: Machine Learning +type: enhancement +issues: [] diff --git a/docs/changelog/103670.yaml b/docs/changelog/103670.yaml new file mode 100644 index 0000000000000..ad3f0519b5d19 --- /dev/null +++ b/docs/changelog/103670.yaml @@ -0,0 +1,5 @@ +pr: 103670 +summary: "ESQL: Improve local folding of aggregates" +area: ES|QL +type: bug +issues: [] diff --git a/docs/changelog/103673.yaml b/docs/changelog/103673.yaml new file mode 100644 index 0000000000000..f786b57eba411 --- /dev/null +++ b/docs/changelog/103673.yaml @@ -0,0 +1,6 @@ +pr: 103673 +summary: "ESQL: Infer not null for aggregated fields" +area: ES|QL +type: enhancement +issues: + - 102787 diff --git a/docs/changelog/103690.yaml b/docs/changelog/103690.yaml new file mode 100644 index 0000000000000..fa9076789c1cd --- /dev/null +++ b/docs/changelog/103690.yaml @@ -0,0 +1,5 @@ +pr: 103690 +summary: Restore inter-segment search concurrency with synthetic source is enabled +area: Search +type: bug +issues: [] diff --git a/docs/changelog/103710.yaml b/docs/changelog/103710.yaml new file mode 100644 index 0000000000000..539b9f553ccc2 --- /dev/null +++ b/docs/changelog/103710.yaml @@ -0,0 +1,5 @@ +pr: 103710 +summary: List hidden shard stores by default +area: Store +type: enhancement +issues: [] diff --git a/docs/changelog/103720.yaml b/docs/changelog/103720.yaml new file mode 100644 index 0000000000000..e0ee879988fa7 --- /dev/null +++ b/docs/changelog/103720.yaml @@ -0,0 +1,6 @@ +pr: 103720 +summary: Add "step":"ERROR" to ILM explain response for missing policy +area: ILM+SLM +type: enhancement +issues: + - 99030 diff --git a/docs/changelog/103727.yaml b/docs/changelog/103727.yaml new file mode 100644 index 0000000000000..f943ee7906d58 --- /dev/null +++ b/docs/changelog/103727.yaml @@ -0,0 +1,5 @@ +pr: 103727 +summary: "ESQL: Track the rest of `DocVector`" +area: ES|QL +type: enhancement +issues: [] diff --git a/docs/changelog/103758.yaml b/docs/changelog/103758.yaml new file mode 100644 index 0000000000000..e77f228f134a0 --- /dev/null +++ b/docs/changelog/103758.yaml @@ -0,0 +1,5 @@ +pr: 103758 +summary: Fix the transport version of `PlanStreamOutput` +area: ES|QL +type: bug +issues: [] diff --git a/docs/changelog/103783.yaml b/docs/changelog/103783.yaml new file mode 100644 index 0000000000000..47c32dd639310 --- /dev/null +++ b/docs/changelog/103783.yaml @@ -0,0 +1,5 @@ +pr: 103783 +summary: "[Profiling] Mark all templates as managed" +area: Application +type: enhancement +issues: [] diff --git a/docs/changelog/103807.yaml b/docs/changelog/103807.yaml new file mode 100644 index 0000000000000..3849edcc00ced --- /dev/null +++ b/docs/changelog/103807.yaml @@ -0,0 +1,6 @@ +pr: 103807 +summary: "ESQL: Add single value checks on LIKE/RLIKE pushdown" +area: ES|QL +type: bug +issues: + - 103806 diff --git a/docs/changelog/103846.yaml b/docs/changelog/103846.yaml new file mode 100644 index 0000000000000..0d34efabc0278 --- /dev/null +++ b/docs/changelog/103846.yaml @@ -0,0 +1,5 @@ +pr: 103846 +summary: Support sampling in `counted_terms` aggregation +area: Aggregations +type: enhancement +issues: [] diff --git a/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc index ce3d0a367dc4e..3efb8f6de9b3e 100644 --- a/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/synonym-graph-tokenfilter.asciidoc @@ -45,6 +45,13 @@ Use `synonyms_set` configuration option to provide a synonym set created via Syn } ---- +[WARNING] +====== +Synonyms sets must exist before they can be added to indices. +If an index is created referencing a nonexistent synonyms set, the index will remain in a partially created and inoperable state. +The only way to recover from this scenario is to ensure the synonyms set exists then either delete and re-create the index, or close and re-open the index. +====== + Use `synonyms_path` to provide a synonym file : [source,JSON] diff --git a/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc index ce055d38092ff..046cd297b5092 100644 --- a/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/synonym-tokenfilter.asciidoc @@ -33,6 +33,13 @@ Use `synonyms_set` configuration option to provide a synonym set created via Syn } ---- +[WARNING] +====== +Synonyms sets must exist before they can be added to indices. +If an index is created referencing a nonexistent synonyms set, the index will remain in a partially created and inoperable state. +The only way to recover from this scenario is to ensure the synonyms set exists then either delete and re-create the index, or close and re-open the index. +====== + Use `synonyms_path` to provide a synonym file : [source,JSON] diff --git a/docs/reference/connector/apis/check-in-connector-api.asciidoc b/docs/reference/connector/apis/check-in-connector-api.asciidoc new file mode 100644 index 0000000000000..c0c021f1304dc --- /dev/null +++ b/docs/reference/connector/apis/check-in-connector-api.asciidoc @@ -0,0 +1,76 @@ +[[check-in-connector-api]] +=== Check in connector API + +preview::[] + +++++ +Check in a connector +++++ + +Updates the `last_seen` field of a connector with current timestamp. + +[[check-in-connector-api-request]] +==== {api-request-title} + +`PUT _connector//_check_in` + +[[check-in-connector-api-prereq]] +==== {api-prereq-title} + +* To sync data using connectors, it's essential to have the Elastic connectors service running. +* The `connector_id` parameter should reference an existing connector. + +[[check-in-connector-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) + + +[[check-in-connector-api-response-codes]] +==== {api-response-codes-title} + +`200`:: +Connector `last_seen` field was successfully updated with a current timestamp. + +`400`:: +The `connector_id` was not provided. + +`404` (Missing resources):: +No connector matching `connector_id` could be found. + +[[check-in-connector-api-example]] +==== {api-examples-title} + +The following example updates the `last_seen` property with current timestamp for the connector with ID `my-connector`: + +//// +[source, console] +-------------------------------------------------- +PUT _connector/my-connector +{ + "index_name": "search-google-drive", + "name": "My Connector", + "service_type": "google_drive" +} +-------------------------------------------------- +// TESTSETUP + +[source,console] +-------------------------------------------------- +DELETE _connector/my-connector +-------------------------------------------------- +// TEARDOWN +//// + +[source,console] +---- +PUT _connector/my-connector/_check_in +---- + +[source,console-result] +---- +{ + "result": "updated" +} +---- diff --git a/docs/reference/connector/apis/connector-apis.asciidoc b/docs/reference/connector/apis/connector-apis.asciidoc index e31eb7899f5d9..e127dc07446b5 100644 --- a/docs/reference/connector/apis/connector-apis.asciidoc +++ b/docs/reference/connector/apis/connector-apis.asciidoc @@ -26,6 +26,14 @@ Use the following APIs to manage connectors: * <> * <> * <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> [discrete] @@ -44,9 +52,11 @@ Use the following APIs to manage sync jobs: * <> * <> * <> +* <> include::cancel-connector-sync-job-api.asciidoc[] +include::check-in-connector-api.asciidoc[] include::check-in-connector-sync-job-api.asciidoc[] include::create-connector-api.asciidoc[] include::create-connector-sync-job-api.asciidoc[] @@ -57,3 +67,11 @@ include::get-connector-sync-job-api.asciidoc[] include::list-connectors-api.asciidoc[] include::list-connector-sync-jobs-api.asciidoc[] include::set-connector-sync-job-error-api.asciidoc[] +include::set-connector-sync-job-stats-api.asciidoc[] +include::update-connector-configuration-api.asciidoc[] +include::update-connector-error-api.asciidoc[] +include::update-connector-filtering-api.asciidoc[] +include::update-connector-last-sync-api.asciidoc[] +include::update-connector-name-description-api.asciidoc[] +include::update-connector-pipeline-api.asciidoc[] +include::update-connector-scheduling-api.asciidoc[] diff --git a/docs/reference/connector/apis/delete-connector-api.asciidoc b/docs/reference/connector/apis/delete-connector-api.asciidoc index 2bda7da72cb72..6d3a120df785a 100644 --- a/docs/reference/connector/apis/delete-connector-api.asciidoc +++ b/docs/reference/connector/apis/delete-connector-api.asciidoc @@ -19,6 +19,7 @@ This is a destructive action that is not recoverable. ==== {api-prereq-title} * To sync data using connectors, it's essential to have the Elastic connectors service running. +* The `connector_id` parameter should reference an existing connector. [[delete-connector-api-path-params]] ==== {api-path-parms-title} diff --git a/docs/reference/connector/apis/set-connector-sync-job-stats-api.asciidoc b/docs/reference/connector/apis/set-connector-sync-job-stats-api.asciidoc new file mode 100644 index 0000000000000..0513155312bb4 --- /dev/null +++ b/docs/reference/connector/apis/set-connector-sync-job-stats-api.asciidoc @@ -0,0 +1,77 @@ +[[set-connector-sync-job-stats-api]] +=== Set connector sync job stats API +++++ +Set connector sync job stats +++++ + +Sets connector sync job stats. + +[[set-connector-sync-job-stats-api-request]] +==== {api-request-title} +`PUT _connector/_sync_job//_stats` + +[[set-connector-sync-job-stats-api-prereqs]] +==== {api-prereq-title} + +* To sync data using connectors, it's essential to have the Elastic connectors service running. +* The `connector_sync_job_id` parameter should reference an existing connector sync job. + +[[set-connector-sync-job-stats-api-desc]] +==== {api-description-title} + +Sets the stats for a connector sync job. +Stats include: `deleted_document_count`, `indexed_document_count`, `indexed_document_volume` and `total_document_count`. +`last_seen` can also be updated using this API. +This API is mainly used by the connector service for updating sync job information. + +[[set-connector-sync-job-stats-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) + +[role="child_attributes"] +[[set-connector-sync-job-stats-api-request-body]] +==== {api-request-body-title} + +`deleted_document_count`:: +(Required, int) The number of documents the sync job deleted. + +`indexed_document_count`:: +(Required, int) The number of documents the sync job indexed. + +`indexed_document_volume`:: +(Required, int) The total size of the data (in MiB) the sync job indexed. + +`total_document_count`:: +(Optional, int) The total number of documents in the target index after the sync job finished. + +`last_seen`:: +(Optional, instant) The timestamp to set the connector sync job's `last_seen` property. + +[[set-connector-sync-job-stats-api-response-codes]] +==== {api-response-codes-title} + +`200`:: +Indicates that the connector sync job stats were successfully updated. + +`404`:: +No connector sync job matching `connector_sync_job_id` could be found. + +[[set-connector-sync-job-stats-api-example]] +==== {api-examples-title} + +The following example sets all mandatory and optional stats for the connector sync job `my-connector-sync-job`: + +[source,console] +---- +PUT _connector/_sync_job/my-connector-sync-job/_stats +{ + "deleted_document_count": 10, + "indexed_document_count": 20, + "indexed_document_volume": 1000, + "total_document_count": 2000, + "last_seen": "2023-01-02T10:00:00Z" +} +---- +// TEST[skip:there's no way to clean up after creating a connector sync job, as we don't know the id ahead of time. Therefore, skip this test.] diff --git a/docs/reference/connector/apis/update-connector-configuration-api.asciidoc b/docs/reference/connector/apis/update-connector-configuration-api.asciidoc new file mode 100644 index 0000000000000..6d6591a6f00bc --- /dev/null +++ b/docs/reference/connector/apis/update-connector-configuration-api.asciidoc @@ -0,0 +1,154 @@ +[[update-connector-configuration-api]] +=== Update connector configuration API + +preview::[] + +++++ +Update connector configuration +++++ + +Updates the `configuration` of a connector. + + +[[update-connector-configuration-api-request]] +==== {api-request-title} + +`PUT _connector//_configuration` + +[[update-connector-configuration-api-prereq]] +==== {api-prereq-title} + +* To sync data using connectors, it's essential to have the Elastic connectors service running. +* The `connector_id` parameter should reference an existing connector. +* The configuration fields definition must be compatible with the specific connector type being used. + +[[update-connector-configuration-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) + +[role="child_attributes"] +[[update-connector-configuration-api-request-body]] +==== {api-request-body-title} + +`configuration`:: +(Required, object) The configuration for the connector. The configuration field is a map where each key represents a specific configuration field name, and the value is a `ConnectorConfiguration` object. + +Each `ConnectorConfiguration` object contains the following attributes: + +* `category` (Optional, string) The category of the configuration field. This helps in grouping related configurations together in the user interface. + +* `default_value` (Required, string | number | bool) The default value for the configuration. This value is used if the value field is empty, applicable only for non-required fields. + +* `depends_on` (Required, array of `ConfigurationDependency`) An array of dependencies on other configurations. A field will not be enabled unless these dependencies are met. Each dependency specifies a field key and the required value for the dependency to be considered fulfilled. + +* `display` (Required, string) The display type for the UI element that represents this configuration. This defines how the field should be rendered in the user interface. Supported types are: `text`, `textbox`, `textarea`, `numeric`, `toggle` and `dropdown`. + +* `label` (Required, string) The display label for the configuration field. This label is shown in the user interface, adjacent to the field. + +* `options` (Required, array of `ConfigurationSelectOption`) An array of options for list-type fields. These options are used for inputs in the user interface, each having a label for display and a value. + +* `order` (Required, number) The order in which this configuration appears in the user interface. This helps in organizing fields logically. + +* `placeholder` (Required, string) Placeholder text for the configuration field. This text is displayed inside the field before a value is entered. + +* `required` (Required, boolean) Indicates whether the configuration is mandatory. If true, a value must be provided for the field. + +* `sensitive` (Required, boolean) Indicates whether the configuration contains sensitive information. Sensitive fields may be obfuscated in the user interface. + +* `tooltip` (Optional, string) Tooltip text providing additional information about the configuration. This text appears when the user hovers over the info icon next to the configuration field. + +* `type` (Required, string) The type of the configuration field, such as `str`, `int`, `bool`, `list`. This defines the data type and format of the field's value. + +* `ui_restrictions` (Required, array of strings) A list of UI restrictions. These restrictions define where in the user interface this field should be available or restricted. + +* `validations` (Required, array of `ConfigurationValidation`) An array of rules for validating the field's value. Each validation specifies a type and a constraint that the field's value must meet. + +* `value` (Required, string | number | bool) The current value of the configuration. This is the actual value set for the field and is used by the connector during its operations. + +`ConfigurationDependency` represents a dependency that a configuration field has on another field's value. It contains the following attributes: + +* `field` (Required, string) The name of the field in the configuration that this dependency relates to. + +* `value` (Required, string | number | bool) The required value of the specified field for this dependency to be met. + +`ConfigurationSelectOption` defines an option within a selectable configuration field. It contains the following attributes: + +* `label` (Required, string) The display label for the option. + +* `value` (Required, string) The actual value associated with the option. + +`ConfigurationValidation` specifies validation rules for configuration fields. Each ConfigurationValidation instance enforces a specific type of validation based on its type and constraint. It contains the following attributes: + +* `constraint` (Required, string | number) The validation constraint. The nature of this constraint depends on the validation type. It could be a numeric value, a list, a regular expression pattern. + +* `type` (Required, ConfigurationValidationType) The type of validation to be performed. Possible values include: `less_than`, `greater_than`, `list_type`, `included_in`, `regex` and `unset`. + + +[[update-connector-configuration-api-response-codes]] +==== {api-response-codes-title} + +`200`:: +Connector configuration was successfully updated. + +`400`:: +The `connector_id` was not provided or the request payload was malformed. + +`404` (Missing resources):: +No connector matching `connector_id` could be found. + +[[update-connector-configuration-api-example]] +==== {api-examples-title} + +The following example updates the `configuration` for the connector with ID `my-connector`: + +//// +[source, console] +-------------------------------------------------- +PUT _connector/my-connector +{ + "index_name": "search-google-drive", + "name": "My Connector", + "service_type": "google_drive" +} +-------------------------------------------------- +// TESTSETUP + +[source,console] +-------------------------------------------------- +DELETE _connector/my-connector +-------------------------------------------------- +// TEARDOWN +//// + +[source,console] +---- +PUT _connector/my-connector/_configuration +{ + "configuration": { + "service_account_credentials": { + "default_value": null, + "depends_on": [], + "display": "textarea", + "label": "Google Drive service account JSON", + "options": [], + "order": 1, + "required": true, + "sensitive": true, + "tooltip": "This connectors authenticates as a service account to synchronize content from Google Drive.", + "type": "str", + "ui_restrictions": [], + "validations": [], + "value": "...service account JSON..." + } + } +} +---- + +[source,console-result] +---- +{ + "result": "updated" +} +---- diff --git a/docs/reference/connector/apis/update-connector-error-api.asciidoc b/docs/reference/connector/apis/update-connector-error-api.asciidoc new file mode 100644 index 0000000000000..19bc15f0dc60a --- /dev/null +++ b/docs/reference/connector/apis/update-connector-error-api.asciidoc @@ -0,0 +1,86 @@ +[[update-connector-error-api]] +=== Update connector error API + +preview::[] + +++++ +Update connector error +++++ + +Updates the `error` field of a connector. + +[[update-connector-error-api-request]] +==== {api-request-title} + +`PUT _connector//_error` + +[[update-connector-error-api-prereq]] +==== {api-prereq-title} + +* To sync data using connectors, it's essential to have the Elastic connectors service running. +* The `connector_id` parameter should reference an existing connector. + +[[update-connector-error-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) + +[role="child_attributes"] +[[update-connector-error-api-request-body]] +==== {api-request-body-title} + +`error`:: +(Required, string) A messaged related to the last error encountered by the connector. + + +[[update-connector-error-api-response-codes]] +==== {api-response-codes-title} + +`200`:: +Connector `error` field was successfully updated. + +`400`:: +The `connector_id` was not provided or the request payload was malformed. + +`404` (Missing resources):: +No connector matching `connector_id` could be found. + +[[update-connector-error-api-example]] +==== {api-examples-title} + +The following example updates the `error` field for the connector with ID `my-connector`: + +//// +[source, console] +-------------------------------------------------- +PUT _connector/my-connector +{ + "index_name": "search-google-drive", + "name": "My Connector", + "service_type": "google_drive" +} +-------------------------------------------------- +// TESTSETUP + +[source,console] +-------------------------------------------------- +DELETE _connector/my-connector +-------------------------------------------------- +// TEARDOWN +//// + +[source,console] +---- +PUT _connector/my-connector/_error +{ + "error": "Houston, we have a problem!" +} +---- + +[source,console-result] +---- +{ + "result": "updated" +} +---- diff --git a/docs/reference/connector/apis/update-connector-filtering-api.asciidoc b/docs/reference/connector/apis/update-connector-filtering-api.asciidoc new file mode 100644 index 0000000000000..d4c7bb16a3304 --- /dev/null +++ b/docs/reference/connector/apis/update-connector-filtering-api.asciidoc @@ -0,0 +1,186 @@ +[[update-connector-filtering-api]] +=== Update connector filtering API + +preview::[] + +++++ +Update connector filtering +++++ + +Updates the `filtering` configuration of a connector. Learn more about filtering in the {enterprise-search-ref}/sync-rules.html[sync rules] documentation. + +[[update-connector-filtering-api-request]] +==== {api-request-title} + +`PUT _connector//_filtering` + +[[update-connector-filtering-api-prereq]] +==== {api-prereq-title} + +* To sync data using connectors, it's essential to have the Elastic connectors service running. +* The `connector_id` parameter should reference an existing connector. + +[[update-connector-filtering-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) + +[role="child_attributes"] +[[update-connector-filtering-api-request-body]] +==== {api-request-body-title} + +`filtering`:: +(Required, array) The filtering configuration for the connector. This configuration determines the set of rules applied for filtering data during syncs. + +Each entry in the `filtering` array represents a set of filtering rules for a specific data domain and includes the following attributes: + +- `domain` (Required, string) + +Specifies the data domain to which these filtering rules apply. + +- `active` (Required, object) + +Contains the set of rules that are actively used for sync jobs. The `active` object includes: + + * `rules` (Required, array of objects) + + An array of individual filtering rule objects, each with the following sub-attributes: + ** `id` (Required, string) + + A unique identifier for the rule. + ** `policy` (Required, string) + + Specifies the policy, such as "include" or "exclude". + ** `field` (Required, string) + + The field in the document to which this rule applies. + ** `rule` (Required, string) + + The type of rule, such as "regex", "starts_with", "ends_with", "contains", "equals", "<", ">", etc. + ** `value` (Required, string) + + The value to be used in conjunction with the rule for matching the contents of the document's field. + ** `order` (Required, number) + + The order in which the rules are applied. The first rule to match has its policy applied. + ** `created_at` (Optional, datetime) + + The timestamp when the rule was added. + ** `updated_at` (Optional, datetime) + + The timestamp when the rule was last edited. + + * `advanced_snippet` (Optional, object) + + Used for {enterprise-search-ref}/sync-rules.html#sync-rules-advanced[advanced filtering] at query time, with the following sub-attributes: + ** `value` (Required, object) + + A JSON object passed directly to the connector for advanced filtering. + ** `created_at` (Optional, datetime) + + The timestamp when this JSON object was created. + ** `updated_at` (Optional, datetime) + + The timestamp when this JSON object was last edited. + + * `validation` (Optional, object) + + Provides validation status for the rules, including: + ** `state` (Required, string) + + Indicates the validation state: "edited", "valid", or "invalid". + ** `errors` (Optional, object) + + Contains details about any validation errors, with sub-attributes: + *** `ids` (Required, string) + + The ID(s) of any rules deemed invalid. + *** `messages` (Required, string) + + Messages explaining what is invalid about the rules. + +- `draft` (Optional, object) + +An object identical in structure to the `active` object, but used for drafting and editing filtering rules before they become active. + + +[[update-connector-filtering-api-response-codes]] +==== {api-response-codes-title} + +`200`:: +Connector `filtering` field was successfully updated. + +`400`:: +The `connector_id` was not provided or the request payload was malformed. + +`404` (Missing resources):: +No connector matching `connector_id` could be found. + +[[update-connector-filtering-api-example]] +==== {api-examples-title} + +The following example updates the `filtering` property for the connector with ID `my-connector`: + +//// +[source, console] +-------------------------------------------------- +PUT _connector/my-connector +{ + "index_name": "search-google-drive", + "name": "My Connector", + "service_type": "google_drive" +} +-------------------------------------------------- +// TESTSETUP + +[source,console] +-------------------------------------------------- +DELETE _connector/my-connector +-------------------------------------------------- +// TEARDOWN +//// + +[source,console] +---- +PUT _connector/my-connector/_filtering +{ + "filtering": [ + { + "active": { + "advanced_snippet": { + "created_at": "2023-11-09T15:13:08.231Z", + "updated_at": "2023-11-09T15:13:08.231Z", + "value": {} + }, + "rules": [ + { + "created_at": "2023-11-09T15:13:08.231Z", + "field": "_", + "id": "DEFAULT", + "order": 0, + "policy": "include", + "rule": "regex", + "updated_at": "2023-11-09T15:13:08.231Z", + "value": ".*" + } + ], + "validation": { + "errors": [], + "state": "valid" + } + }, + "domain": "DEFAULT", + "draft": { + "advanced_snippet": { + "created_at": "2023-11-09T15:13:08.231Z", + "updated_at": "2023-11-09T15:13:08.231Z", + "value": {} + }, + "rules": [ + { + "created_at": "2023-11-09T15:13:08.231Z", + "field": "_", + "id": "DEFAULT", + "order": 0, + "policy": "include", + "rule": "regex", + "updated_at": "2023-11-09T15:13:08.231Z", + "value": ".*" + } + ], + "validation": { + "errors": [], + "state": "valid" + } + } + } + ] +} +---- + +[source,console-result] +---- +{ + "result": "updated" +} +---- diff --git a/docs/reference/connector/apis/update-connector-last-sync-api.asciidoc b/docs/reference/connector/apis/update-connector-last-sync-api.asciidoc new file mode 100644 index 0000000000000..e9fffd22b21cd --- /dev/null +++ b/docs/reference/connector/apis/update-connector-last-sync-api.asciidoc @@ -0,0 +1,135 @@ +[[update-connector-last-sync-api]] +=== Update connector last sync stats API + +preview::[] + +++++ +Update connector last sync stats +++++ + +Updates the fields related to the last sync of a connector. + +This action is used for analytics and monitoring. + +[[update-connector-last-sync-api-request]] +==== {api-request-title} + +`PUT _connector//_last_sync` + +[[update-connector-last-sync-api-prereq]] +==== {api-prereq-title} + +* To sync data using connectors, it's essential to have the Elastic connectors service running. +* The `connector_id` parameter should reference an existing connector. + +[[update-connector-last-sync-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) + +[role="child_attributes"] +[[update-connector-last-sync-api-request-body]] +==== {api-request-body-title} + +`last_access_control_sync_error`:: +(Optional, string) The last error message related to access control sync, if any. + +`last_access_control_sync_scheduled_at`:: +(Optional, datetime) The datetime indicating when the last access control sync was scheduled. + +`last_access_control_sync_status`:: +(Optional, ConnectorSyncStatus) The status of the last access control sync. + +`last_deleted_document_count`:: +(Optional, long) The number of documents deleted in the last sync process. + +`last_incremental_sync_scheduled_at`:: +(Optional, datetime) The datetime when the last incremental sync was scheduled. + +`last_indexed_document_count`:: +(Optional, long) The number of documents indexed in the last sync. + +`last_sync_error`:: +(Optional, string) The last error message encountered during a sync process, if any. + +`last_sync_scheduled_at`:: +(Optional, datetime) The datetime when the last sync was scheduled. + +`last_sync_status`:: +(Optional, ConnectorSyncStatus) The status of the last sync. + +`last_synced`:: +(Optional, datetime) The datetime of the last successful synchronization. + + +The value of `ConnectorSyncStatus` is one of the following lowercase strings representing different sync states: + +* `canceling`: The sync process is in the process of being canceled. +* `canceled`: The sync process has been canceled. +* `completed`: The sync process completed successfully. +* `error`: An error occurred during the sync process. +* `in_progress`: The sync process is currently underway. +* `pending`: The sync is pending and has not yet started. +* `suspended`: The sync process has been temporarily suspended. + + +[[update-connector-last-sync-api-response-codes]] +==== {api-response-codes-title} + +`200`:: +Connector last sync stats were successfully updated. + +`400`:: +The `connector_id` was not provided or the request payload was malformed. + +`404` (Missing resources):: +No connector matching `connector_id` could be found. + +[[update-connector-last-sync-api-example]] +==== {api-examples-title} + +The following example updates the last sync stats for the connector with ID `my-connector`: + +//// +[source, console] +-------------------------------------------------- +PUT _connector/my-connector +{ + "index_name": "search-google-drive", + "name": "My Connector", + "service_type": "google_drive" +} +-------------------------------------------------- +// TESTSETUP + +[source,console] +-------------------------------------------------- +DELETE _connector/my-connector +-------------------------------------------------- +// TEARDOWN +//// + +[source,console] +---- +PUT _connector/my-connector/_last_sync +{ + "last_access_control_sync_error": "Houston, we have a problem!", + "last_access_control_sync_scheduled_at": "2023-11-09T15:13:08.231Z", + "last_access_control_sync_status": "pending", + "last_deleted_document_count": 42, + "last_incremental_sync_scheduled_at": "2023-11-09T15:13:08.231Z", + "last_indexed_document_count": 42, + "last_sync_error": "Houston, we have a problem!", + "last_sync_scheduled_at": "2024-11-09T15:13:08.231Z", + "last_sync_status": "completed", + "last_synced": "2024-11-09T15:13:08.231Z" +} +---- + +[source,console-result] +---- +{ + "result": "updated" +} +---- diff --git a/docs/reference/connector/apis/update-connector-name-description-api.asciidoc b/docs/reference/connector/apis/update-connector-name-description-api.asciidoc new file mode 100644 index 0000000000000..d45fb545e168b --- /dev/null +++ b/docs/reference/connector/apis/update-connector-name-description-api.asciidoc @@ -0,0 +1,90 @@ +[[update-connector-name-description-api]] +=== Update connector name and description API + +preview::[] + +++++ +Update connector name and description +++++ + +Updates the `name` and `description` fields of a connector. + +[[update-connector-name-description-api-request]] +==== {api-request-title} + +`PUT _connector//_name` + +[[update-connector-name-description-api-prereq]] +==== {api-prereq-title} + +* To sync data using connectors, it's essential to have the Elastic connectors service running. +* The `connector_id` parameter should reference an existing connector. + +[[update-connector-name-description-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) + +[role="child_attributes"] +[[update-connector-name-description-api-request-body]] +==== {api-request-body-title} + +`name`:: +(Required, string) Name of the connector. + +`description`:: +(Optional, string) Description of the connector. + + +[[update-connector-name-description-api-response-codes]] +==== {api-response-codes-title} + +`200`:: +Connector `name` and `description` fields were successfully updated. + +`400`:: +The `connector_id` was not provided or the request payload was malformed. + +`404` (Missing resources):: +No connector matching `connector_id` could be found. + +[[update-connector-name-description-api-example]] +==== {api-examples-title} + +The following example updates the `name` and `description` fields for the connector with ID `my-connector`: + +//// +[source, console] +-------------------------------------------------- +PUT _connector/my-connector +{ + "index_name": "search-google-drive", + "name": "My Connector", + "service_type": "google_drive" +} +-------------------------------------------------- +// TESTSETUP + +[source,console] +-------------------------------------------------- +DELETE _connector/my-connector +-------------------------------------------------- +// TEARDOWN +//// + +[source,console] +---- +PUT _connector/my-connector/_name +{ + "name": "Custom connector", + "description": "This is my customized connector" +} +---- + +[source,console-result] +---- +{ + "result": "updated" +} +---- diff --git a/docs/reference/connector/apis/update-connector-pipeline-api.asciidoc b/docs/reference/connector/apis/update-connector-pipeline-api.asciidoc new file mode 100644 index 0000000000000..6938506703da8 --- /dev/null +++ b/docs/reference/connector/apis/update-connector-pipeline-api.asciidoc @@ -0,0 +1,103 @@ +[[update-connector-pipeline-api]] +=== Update connector pipeline API + +preview::[] + +++++ +Update connector pipeline +++++ + +Updates the `pipeline` configuration of a connector. + +When you create a new connector, the configuration of an <> is populated with default settings. + +[[update-connector-pipeline-api-request]] +==== {api-request-title} + +`PUT _connector//_pipeline` + +[[update-connector-pipeline-api-prereq]] +==== {api-prereq-title} + +* To sync data using connectors, it's essential to have the Elastic connectors service running. +* The `connector_id` parameter should reference an existing connector. + +[[update-connector-pipeline-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) + +[role="child_attributes"] +[[update-connector-pipeline-api-request-body]] +==== {api-request-body-title} + +`pipeline`:: +(Required, object) The pipeline configuration of the connector. The pipeline determines how data is processed during ingestion into Elasticsearch. + +Pipeline configuration must include the following attributes: + +- `extract_binary_content` (Required, boolean) A flag indicating whether to extract binary content during ingestion. + +- `name` (Required, string) The name of the ingest pipeline. + +- `reduce_whitespace` (Required, boolean) A flag indicating whether to reduce extra whitespace in the ingested content. + +- `run_ml_inference` (Required, boolean) A flag indicating whether to run machine learning inference on the ingested content. + + +[[update-connector-pipeline-api-response-codes]] +==== {api-response-codes-title} + +`200`:: +Connector `pipeline` field was successfully updated. + +`400`:: +The `connector_id` was not provided or the request payload was malformed. + +`404` (Missing resources):: +No connector matching `connector_id` could be found. + +[[update-connector-pipeline-api-example]] +==== {api-examples-title} + +The following example updates the `pipeline` property for the connector with ID `my-connector`: + +//// +[source, console] +-------------------------------------------------- +PUT _connector/my-connector +{ + "index_name": "search-google-drive", + "name": "My Connector", + "service_type": "google_drive" +} +-------------------------------------------------- +// TESTSETUP + +[source,console] +-------------------------------------------------- +DELETE _connector/my-connector +-------------------------------------------------- +// TEARDOWN +//// + +[source,console] +---- +PUT _connector/my-connector/_pipeline +{ + "pipeline": { + "extract_binary_content": true, + "name": "my-connector-pipeline", + "reduce_whitespace": true, + "run_ml_inference": true + } +} +---- + +[source,console-result] +---- +{ + "result": "updated" +} +---- diff --git a/docs/reference/connector/apis/update-connector-scheduling-api.asciidoc b/docs/reference/connector/apis/update-connector-scheduling-api.asciidoc new file mode 100644 index 0000000000000..c47e6d4c0367b --- /dev/null +++ b/docs/reference/connector/apis/update-connector-scheduling-api.asciidoc @@ -0,0 +1,113 @@ +[[update-connector-scheduling-api]] +=== Update connector scheduling API + +preview::[] + +++++ +Update connector scheduling +++++ + +Updates the `scheduling` configuration of a connector. + +[[update-connector-scheduling-api-request]] +==== {api-request-title} + +`PUT _connector//_scheduling` + +[[update-connector-scheduling-api-prereq]] +==== {api-prereq-title} + +* To sync data using connectors, it's essential to have the Elastic connectors service running. +* The `connector_id` parameter should reference an existing connector. + +[[update-connector-scheduling-api-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) + +[role="child_attributes"] +[[update-connector-scheduling-api-request-body]] +==== {api-request-body-title} + +`scheduling`:: +(Required, object) The scheduling configuration for the connector. This configuration determines frequency of synchronization operations for the connector. + +The scheduling configuration includes the following attributes, each represented as a `ScheduleConfig` object: + +- `access_control` (Required, `ScheduleConfig` object) Defines the schedule for synchronizing access control settings of the connector. + +- `full` (Required, `ScheduleConfig` object) Defines the schedule for a full content syncs. + +- `incremental` (Required, `ScheduleConfig` object) Defines the schedule for incremental content syncs. + +Each `ScheduleConfig` object includes the following sub-attributes: + + - `enabled` (Required, boolean) A flag that enables or disables the scheduling. + + - `interval` (Required, string) A CRON expression representing the sync schedule. This expression defines the grequency at which the sync operations should occur. It must be provided in a valid CRON format. + + +[[update-connector-scheduling-api-response-codes]] +==== {api-response-codes-title} + +`200`:: +Connector `scheduling` field was successfully updated. + +`400`:: +The `connector_id` was not provided or the request payload was malformed. + +`404` (Missing resources):: +No connector matching `connector_id` could be found. + +[[update-connector-scheduling-api-example]] +==== {api-examples-title} + +The following example updates the `scheduling` property for the connector with ID `my-connector`: + +//// +[source, console] +-------------------------------------------------- +PUT _connector/my-connector +{ + "index_name": "search-google-drive", + "name": "My Connector", + "service_type": "google_drive" +} +-------------------------------------------------- +// TESTSETUP + +[source,console] +-------------------------------------------------- +DELETE _connector/my-connector +-------------------------------------------------- +// TEARDOWN +//// + +[source,console] +---- +PUT _connector/my-connector/_scheduling +{ + "scheduling": { + "access_control": { + "enabled": true, + "interval": "0 10 0 * * ?" + }, + "full": { + "enabled": true, + "interval": "0 20 0 * * ?" + }, + "incremental": { + "enabled": false, + "interval": "0 30 0 * * ?" + } + } +} +---- + +[source,console-result] +---- +{ + "result": "updated" +} +---- diff --git a/docs/reference/data-streams/set-up-tsds.asciidoc b/docs/reference/data-streams/set-up-tsds.asciidoc index c175da2e991e9..ed6b79653e61f 100644 --- a/docs/reference/data-streams/set-up-tsds.asciidoc +++ b/docs/reference/data-streams/set-up-tsds.asciidoc @@ -176,9 +176,7 @@ PUT _component_template/my-weather-sensor-mappings Optionally, the index settings component template for a TSDS can include: * Your lifecycle policy in the `index.lifecycle.name` index setting. -* The <> index setting. -* The <> index setting. -* Other index settings, such as <>, for your TSDS's +* Other index settings, such as <>, for your TSDS's backing indices. IMPORTANT: Don't specify the `index.routing_path` index setting in a component @@ -191,8 +189,7 @@ PUT _component_template/my-weather-sensor-settings { "template": { "settings": { - "index.lifecycle.name": "my-lifecycle-policy", - "index.look_ahead_time": "3h" + "index.lifecycle.name": "my-lifecycle-policy" } }, "_meta": { diff --git a/docs/reference/data-streams/tsds-index-settings.asciidoc b/docs/reference/data-streams/tsds-index-settings.asciidoc index c0cae9e365114..98976231661ec 100644 --- a/docs/reference/data-streams/tsds-index-settings.asciidoc +++ b/docs/reference/data-streams/tsds-index-settings.asciidoc @@ -28,13 +28,13 @@ value (exclusive) accepted by the index. Only indices with an `index.mode` of `index.look_ahead_time`:: (<<_static_index_settings,Static>>, <>) Interval used to calculate the `index.time_series.end_time` for a TSDS's write -index. Defaults to `2h` (2 hours). Accepts `1m` (one minute) to `7d` (seven -days). Only indices with an `index.mode` of `time_series` support this setting. +index. Defaults to `2h` (2 hours). Accepts `1m` (one minute) to `2h` (two +hours). Only indices with an `index.mode` of `time_series` support this setting. For more information, refer to <>. Additionally this setting can not be less than `time_series.poll_interval` cluster setting. NOTE: Increasing the `look_ahead_time` will also increase the amount of time {ilm-cap} -waits before being able to proceed with executing the actions that expect the +waits before being able to proceed with executing the actions that expect the index to not receive any writes anymore. For more information, refer to <>. [[index-look-back-time]] diff --git a/docs/reference/esql/esql-async-query-api.asciidoc b/docs/reference/esql/esql-async-query-api.asciidoc new file mode 100644 index 0000000000000..da9c6e3cf3136 --- /dev/null +++ b/docs/reference/esql/esql-async-query-api.asciidoc @@ -0,0 +1,92 @@ +[[esql-async-query-api]] +== {esql} async query API +++++ +{esql} async query API +++++ + +Runs an async {esql} search. + +The async query API lets you asynchronously execute a search request, +monitor its progress, and retrieve results as they become available. + +Executing an <> is commonly quite fast, +however searches across large data sets or frozen data can take some time. +To avoid long waits, run an async {esql} search. + +Searches initiated by this API may return search results or not. The +`wait_for_completion_timeout` property determines how long to wait for +the search results. The default value is 1 second. If the results are +not available by this time, a search id is return which can be later +used to retrieve the results. + +Initiates an async search for an <> +query. The API accepts the same parameters and request body as the +<>. + +[source,console] +---- +POST /_query/async +{ + "query": """ + FROM library + | EVAL year = DATE_TRUNC(1 YEARS, release_date) + | STATS MAX(page_count) BY year + | SORT year + | LIMIT 5 + """, + "wait_for_completion_timeout": "2s" +} +---- +// TEST[setup:library] + +If the results are not available within the timeout period, 2 seconds in +this case, the search returns no results but rather a response that +includes: + + * A search ID + * An `is_running` value of true, indicating the search is ongoing + +The query continues to run in the background without blocking other +requests. + +[source,console-result] +---- +{ + "id": "FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=", + "is_running": true +} +---- +// TEST[skip: no access to search ID - may return response values] + +To check the progress of an async search, use the <> with the search ID. Specify how long you'd like for +complete results in the `wait_for_completion_timeout` parameter. + +[source,console] +---- +GET /_query/async/FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=?wait_for_completion_timeout=30s +---- +// TEST[skip: no access to search ID - may return response values] + +If the response's `is_running` value is `false`, the async search has +finished, and the results are returned. + +[source,console-result] +---- +{ + "id": "FmNJRUZ1YWZCU3dHY1BIOUhaenVSRkEaaXFlZ3h4c1RTWFNocDdnY2FSaERnUTozNDE=", + "is_running": false, + "columns": ... +} +---- +// TEST[skip: no access to search ID - may return response values] + +Use the <> to +delete an async search before the `keep_alive` period ends. If the query +is still running, {es} cancels it. + +[source,console] +---- +DELETE /_query/async/delete/FmdMX2pIang3UWhLRU5QS0lqdlppYncaMUpYQ05oSkpTc3kwZ21EdC1tbFJXQToxOTI= +---- +// TEST[skip: no access to search ID] diff --git a/docs/reference/esql/esql-get-started.asciidoc b/docs/reference/esql/esql-get-started.asciidoc index 6e467e1e7312d..454edc3af2be2 100644 --- a/docs/reference/esql/esql-get-started.asciidoc +++ b/docs/reference/esql/esql-get-started.asciidoc @@ -5,6 +5,8 @@ Getting started ++++ +preview::["Do not use {esql} on production environments. This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features."] + This guide shows how you can use {esql} to query and aggregate your data. [discrete] diff --git a/docs/reference/esql/functions/abs.asciidoc b/docs/reference/esql/functions/abs.asciidoc index 3adb7dff07043..32b49bc287a83 100644 --- a/docs/reference/esql/functions/abs.asciidoc +++ b/docs/reference/esql/functions/abs.asciidoc @@ -1,18 +1,41 @@ [discrete] [[esql-abs]] === `ABS` + +*Syntax* + [.text-center] image::esql/functions/signature/abs.svg[Embedded,opts=inline] +*Parameters* + +`n`:: +Numeric expression. If `null`, the function returns `null`. + +*Description* + Returns the absolute value. -[source,esql] +*Supported types* + +include::types/abs.asciidoc[] + +*Examples* + +[source.merge.styled,esql] ---- -FROM employees -| KEEP first_name, last_name, height -| EVAL abs_height = ABS(0.0 - height) +include::{esql-specs}/math.csv-spec[tag=docsAbs] ---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/math.csv-spec[tag=docsAbs-result] +|=== -Supported types: - -include::types/abs.asciidoc[] +[source.merge.styled,esql] +---- +include::{esql-specs}/math.csv-spec[tag=docsAbsEmployees] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/math.csv-spec[tag=docsAbsEmployees-result] +|=== \ No newline at end of file diff --git a/docs/reference/esql/functions/asin.asciidoc b/docs/reference/esql/functions/asin.asciidoc index f03b5276b7dd6..222f6879785ef 100644 --- a/docs/reference/esql/functions/asin.asciidoc +++ b/docs/reference/esql/functions/asin.asciidoc @@ -1,10 +1,28 @@ [discrete] [[esql-asin]] === `ASIN` + +*Syntax* + [.text-center] image::esql/functions/signature/asin.svg[Embedded,opts=inline] -Inverse https://en.wikipedia.org/wiki/Inverse_trigonometric_functions[sine] trigonometric function. +*Parameters* + +`n`:: +Numeric expression. If `null`, the function returns `null`. + +*Description* + +Returns the +https://en.wikipedia.org/wiki/Inverse_trigonometric_functions[arcsine] +of the input numeric expression as an angle, expressed in radians. + +*Supported types* + +include::types/asin.asciidoc[] + +*Example* [source.merge.styled,esql] ---- @@ -14,7 +32,3 @@ include::{esql-specs}/floats.csv-spec[tag=asin] |=== include::{esql-specs}/floats.csv-spec[tag=asin-result] |=== - -Supported types: - -include::types/asin.asciidoc[] diff --git a/docs/reference/esql/functions/atan.asciidoc b/docs/reference/esql/functions/atan.asciidoc index 3813e096aeba1..bdbbd07cbba60 100644 --- a/docs/reference/esql/functions/atan.asciidoc +++ b/docs/reference/esql/functions/atan.asciidoc @@ -1,10 +1,28 @@ [discrete] [[esql-atan]] === `ATAN` + +*Syntax* + [.text-center] image::esql/functions/signature/atan.svg[Embedded,opts=inline] -Inverse https://en.wikipedia.org/wiki/Inverse_trigonometric_functions[tangent] trigonometric function. +*Parameters* + +`n`:: +Numeric expression. If `null`, the function returns `null`. + +*Description* + +Returns the +https://en.wikipedia.org/wiki/Inverse_trigonometric_functions[arctangent] of the +input numeric expression as an angle, expressed in radians. + +*Supported types* + +include::types/atan.asciidoc[] + +*Example* [source.merge.styled,esql] ---- @@ -13,8 +31,4 @@ include::{esql-specs}/floats.csv-spec[tag=atan] [%header.monospaced.styled,format=dsv,separator=|] |=== include::{esql-specs}/floats.csv-spec[tag=atan-result] -|=== - -Supported types: - -include::types/atan.asciidoc[] +|=== \ No newline at end of file diff --git a/docs/reference/esql/functions/atan2.asciidoc b/docs/reference/esql/functions/atan2.asciidoc index e78a219333344..3ecc0ff86fe26 100644 --- a/docs/reference/esql/functions/atan2.asciidoc +++ b/docs/reference/esql/functions/atan2.asciidoc @@ -1,11 +1,31 @@ [discrete] [[esql-atan2]] === `ATAN2` + +*Syntax* + [.text-center] image::esql/functions/signature/atan2.svg[Embedded,opts=inline] -The https://en.wikipedia.org/wiki/Atan2[angle] between the positive x-axis and the -ray from the origin to the point (x , y) in the Cartesian plane. +*Parameters* + +`y`:: +Numeric expression. If `null`, the function returns `null`. + +`x`:: +Numeric expression. If `null`, the function returns `null`. + +*Description* + +The https://en.wikipedia.org/wiki/Atan2[angle] between the positive x-axis and +the ray from the origin to the point (x , y) in the Cartesian plane, expressed +in radians. + +*Supported types* + +include::types/atan2.asciidoc[] + +*Example* [source.merge.styled,esql] ---- @@ -15,7 +35,3 @@ include::{esql-specs}/floats.csv-spec[tag=atan2] |=== include::{esql-specs}/floats.csv-spec[tag=atan2-result] |=== - -Supported types: - -include::types/atan2.asciidoc[] diff --git a/docs/reference/esql/functions/auto_bucket.asciidoc b/docs/reference/esql/functions/auto_bucket.asciidoc index 47e453f382229..2301939cf5050 100644 --- a/docs/reference/esql/functions/auto_bucket.asciidoc +++ b/docs/reference/esql/functions/auto_bucket.asciidoc @@ -1,72 +1,118 @@ [discrete] [[esql-auto_bucket]] === `AUTO_BUCKET` -Creates human-friendly buckets and returns a `datetime` value for each row that -corresponds to the resulting bucket the row falls into. Combine `AUTO_BUCKET` -with <> to create a date histogram. -You provide a target number of buckets, a start date, and an end date, and it -picks an appropriate bucket size to generate the target number of buckets or -fewer. For example, this asks for at most 20 buckets over a whole year, which -picks monthly buckets: +*Syntax* + +[source,esql] +---- +AUTO_BUCKET(field, buckets, from, to) +---- + +*Parameters* + +`field`:: +Numeric or date column from which to derive buckets. + +`buckets`:: +Target number of buckets. + +`from`:: +Start of the range. Can be a number or a date expressed as a string. + +`to`:: +End of the range. Can be a number or a date expressed as a string. + +*Description* + +Creates human-friendly buckets and returns a value for each row that corresponds +to the resulting bucket the row falls into. + +Using a target number of buckets, a start of a range, and an end of a range, +`AUTO_BUCKET` picks an appropriate bucket size to generate the target number of +buckets or fewer. For example, asking for at most 20 buckets over a year results +in monthly buckets: [source.merge.styled,esql] ---- -include::{esql-specs}/date.csv-spec[tag=auto_bucket_month] +include::{esql-specs}/date.csv-spec[tag=docsAutoBucketMonth] ---- [%header.monospaced.styled,format=dsv,separator=|] |=== -include::{esql-specs}/date.csv-spec[tag=auto_bucket_month-result] +include::{esql-specs}/date.csv-spec[tag=docsAutoBucketMonth-result] |=== The goal isn't to provide *exactly* the target number of buckets, it's to pick a -range that people are comfortable with that provides at most the target number of -buckets. +range that people are comfortable with that provides at most the target number +of buckets. -If you ask for more buckets then `AUTO_BUCKET` can pick a smaller range. For example, -asking for at most 100 buckets in a year will get you week long buckets: +Combine `AUTO_BUCKET` with +<> to create a histogram: [source.merge.styled,esql] ---- -include::{esql-specs}/date.csv-spec[tag=auto_bucket_week] +include::{esql-specs}/date.csv-spec[tag=docsAutoBucketMonthlyHistogram] ---- [%header.monospaced.styled,format=dsv,separator=|] |=== -include::{esql-specs}/date.csv-spec[tag=auto_bucket_week-result] +include::{esql-specs}/date.csv-spec[tag=docsAutoBucketMonthlyHistogram-result] |=== -`AUTO_BUCKET` does not filter any rows. It only uses the provided time range to -pick a good bucket size. For rows with a date outside of the range, it returns a -`datetime` that corresponds to a bucket outside the range. Combine `AUTO_BUCKET` -with <> to filter rows. +NOTE: `AUTO_BUCKET` does not create buckets that don't match any documents. +That's why this example is missing `1985-03-01` and other dates. -A more complete example might look like: +Asking for more buckets can result in a smaller range. For example, asking for +at most 100 buckets in a year results in weekly buckets: [source.merge.styled,esql] ---- -include::{esql-specs}/date.csv-spec[tag=auto_bucket_in_agg] +include::{esql-specs}/date.csv-spec[tag=docsAutoBucketWeeklyHistogram] ---- [%header.monospaced.styled,format=dsv,separator=|] |=== -include::{esql-specs}/date.csv-spec[tag=auto_bucket_in_agg-result] +include::{esql-specs}/date.csv-spec[tag=docsAutoBucketWeeklyHistogram-result] |=== -NOTE: `AUTO_BUCKET` does not create buckets that don't match any documents. That's -why the example above is missing `1985-03-01` and other dates. +NOTE: `AUTO_BUCKET` does not filter any rows. It only uses the provided range to +pick a good bucket size. For rows with a value outside of the range, it returns +a bucket value that corresponds to a bucket outside the range. Combine +`AUTO_BUCKET` with <> to filter rows. -==== Numeric fields +`AUTO_BUCKET` can also operate on numeric fields. For example, to create a +salary histogram: -`auto_bucket` can also operate on numeric fields like this: [source.merge.styled,esql] ---- -include::{esql-specs}/ints.csv-spec[tag=auto_bucket] +include::{esql-specs}/ints.csv-spec[tag=docsAutoBucketNumeric] ---- [%header.monospaced.styled,format=dsv,separator=|] |=== -include::{esql-specs}/ints.csv-spec[tag=auto_bucket-result] +include::{esql-specs}/ints.csv-spec[tag=docsAutoBucketNumeric-result] |=== -Unlike the example above where you are intentionally filtering on a date range, -you rarely want to filter on a numeric range. So you have find the `min` and `max` -separately. We don't yet have an easy way to do that automatically. Improvements -coming! +Unlike the earlier example that intentionally filters on a date range, you +rarely want to filter on a numeric range. You have to find the `min` and `max` +separately. {esql} doesn't yet have an easy way to do that automatically. + +*Examples* + +Create hourly buckets for the last 24 hours, and calculate the number of events +per hour: + + +[source.styled,esql] +---- +include::{esql-specs}/date.csv-spec[tag=docsAutoBucketLast24hr] +---- + +Create monthly buckets for the year 1985, and calculate the average salary by +hiring month: + +[source.merge.styled,esql] +---- +include::{esql-specs}/date.csv-spec[tag=auto_bucket_in_agg] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/date.csv-spec[tag=auto_bucket_in_agg-result] +|=== diff --git a/docs/reference/esql/functions/avg.asciidoc b/docs/reference/esql/functions/avg.asciidoc index 972d30545ceb4..6345be99c5d6d 100644 --- a/docs/reference/esql/functions/avg.asciidoc +++ b/docs/reference/esql/functions/avg.asciidoc @@ -1,8 +1,27 @@ [discrete] [[esql-agg-avg]] === `AVG` + +*Syntax* + +[source,esql] +---- +AVG(column) +---- + +`column`:: +Numeric column. If `null`, the function returns `null`. + +*Description* + The average of a numeric field. +*Supported types* + +The result is always a `double` no matter the input type. + +*Example* + [source.merge.styled,esql] ---- include::{esql-specs}/stats.csv-spec[tag=avg] @@ -11,5 +30,3 @@ include::{esql-specs}/stats.csv-spec[tag=avg] |=== include::{esql-specs}/stats.csv-spec[tag=avg-result] |=== - -The result is always a `double` not matter the input type. diff --git a/docs/reference/esql/functions/case.asciidoc b/docs/reference/esql/functions/case.asciidoc index 84ff083147cb9..b5fda636135b2 100644 --- a/docs/reference/esql/functions/case.asciidoc +++ b/docs/reference/esql/functions/case.asciidoc @@ -32,6 +32,8 @@ no condition matches, the function returns `null`. *Example* +Determine whether employees are monolingual, bilingual, or polyglot: + [source,esql] [source.merge.styled,esql] ---- @@ -41,3 +43,28 @@ include::{esql-specs}/docs.csv-spec[tag=case] |=== include::{esql-specs}/docs.csv-spec[tag=case-result] |=== + +Calculate the total connection success rate based on log messages: + +[source,esql] +[source.merge.styled,esql] +---- +include::{esql-specs}/conditional.csv-spec[tag=docsCaseSuccessRate] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/conditional.csv-spec[tag=docsCaseSuccessRate-result] +|=== + +Calculate an hourly error rate as a percentage of the total number of log +messages: + +[source,esql] +[source.merge.styled,esql] +---- +include::{esql-specs}/conditional.csv-spec[tag=docsCaseHourlyErrorRate] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/conditional.csv-spec[tag=docsCaseHourlyErrorRate-result] +|=== diff --git a/docs/reference/esql/functions/ceil.asciidoc b/docs/reference/esql/functions/ceil.asciidoc index f977e544e6c3f..bc132e6bf47e6 100644 --- a/docs/reference/esql/functions/ceil.asciidoc +++ b/docs/reference/esql/functions/ceil.asciidoc @@ -1,11 +1,32 @@ [discrete] [[esql-ceil]] === `CEIL` + +*Syntax* + [.text-center] image::esql/functions/signature/ceil.svg[Embedded,opts=inline] +*Parameters* + +`n`:: +Numeric expression. If `null`, the function returns `null`. + +*Description* + Round a number up to the nearest integer. +NOTE: This is a noop for `long` (including unsigned) and `integer`. + For `double` this picks the closest `double` value to the integer + similar to {javadoc}/java.base/java/lang/Math.html#ceil(double)[Math.ceil]. + +*Supported types* + +include::types/ceil.asciidoc[] + + +*Example* + [source.merge.styled,esql] ---- include::{esql-specs}/math.csv-spec[tag=ceil] @@ -14,11 +35,3 @@ include::{esql-specs}/math.csv-spec[tag=ceil] |=== include::{esql-specs}/math.csv-spec[tag=ceil-result] |=== - -NOTE: This is a noop for `long` (including unsigned) and `integer`. - For `double` this picks the the closest `double` value to the integer ala - {javadoc}/java.base/java/lang/Math.html#ceil(double)[Math.ceil]. - -Supported types: - -include::types/ceil.asciidoc[] diff --git a/docs/reference/esql/functions/cidr_match.asciidoc b/docs/reference/esql/functions/cidr_match.asciidoc index 5072a6eef7fd5..1c7fbb57a0044 100644 --- a/docs/reference/esql/functions/cidr_match.asciidoc +++ b/docs/reference/esql/functions/cidr_match.asciidoc @@ -2,15 +2,33 @@ [[esql-cidr_match]] === `CIDR_MATCH` +*Syntax* + +[source,esql] +---- +CIDR_MATCH(ip, block1[, ..., blockN]) +---- + +*Parameters* + +`ip`:: +IP address of type `ip` (both IPv4 and IPv6 are supported). + +`blockX`:: +CIDR block to test the IP against. + +*Description* + Returns `true` if the provided IP is contained in one of the provided CIDR blocks. -`CIDR_MATCH` accepts two or more arguments. The first argument is the IP -address of type `ip` (both IPv4 and IPv6 are supported). Subsequent arguments -are the CIDR blocks to test the IP against. +*Example* -[source,esql] +[source.merge.styled,esql] ---- -FROM hosts -| WHERE CIDR_MATCH(ip, "127.0.0.2/32", "127.0.0.3/32") +include::{esql-specs}/ip.csv-spec[tag=cdirMatchMultipleArgs] ---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/ip.csv-spec[tag=cdirMatchMultipleArgs-result] +|=== diff --git a/docs/reference/esql/functions/coalesce.asciidoc b/docs/reference/esql/functions/coalesce.asciidoc index 550780eaa070d..1121a75209151 100644 --- a/docs/reference/esql/functions/coalesce.asciidoc +++ b/docs/reference/esql/functions/coalesce.asciidoc @@ -2,7 +2,24 @@ [[esql-coalesce]] === `COALESCE` -Returns the first non-null value. +*Syntax* + +[source,esql] +---- +COALESCE(expression1 [, ..., expressionN]) +---- + +*Parameters* + +`expressionX`:: +Expression to evaluate. + +*Description* + +Returns the first of its arguments that is not null. If all arguments are null, +it returns `null`. + +*Example* [source.merge.styled,esql] ---- diff --git a/docs/reference/esql/functions/concat.asciidoc b/docs/reference/esql/functions/concat.asciidoc index 4864f5623a170..0b30211a72be2 100644 --- a/docs/reference/esql/functions/concat.asciidoc +++ b/docs/reference/esql/functions/concat.asciidoc @@ -1,11 +1,30 @@ [discrete] [[esql-concat]] === `CONCAT` -Concatenates two or more strings. + +*Syntax* [source,esql] ---- -FROM employees -| KEEP first_name, last_name, height -| EVAL fullname = CONCAT(first_name, " ", last_name) +CONCAT(string1, string2[, ..., stringN]) +---- + +*Parameters* + +`stringX`:: +Strings to concatenate. + +*Description* + +Concatenates two or more strings. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/eval.csv-spec[tag=docsConcat] ---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/eval.csv-spec[tag=docsConcat-result] +|=== diff --git a/docs/reference/esql/functions/cos.asciidoc b/docs/reference/esql/functions/cos.asciidoc index 7227f57e28120..f7874d46c558a 100644 --- a/docs/reference/esql/functions/cos.asciidoc +++ b/docs/reference/esql/functions/cos.asciidoc @@ -1,10 +1,27 @@ [discrete] [[esql-cos]] === `COS` + +*Syntax* + [.text-center] image::esql/functions/signature/cos.svg[Embedded,opts=inline] -https://en.wikipedia.org/wiki/Sine_and_cosine[Cosine] trigonometric function. Input expected in radians. +*Parameters* + +`n`:: +Numeric expression. If `null`, the function returns `null`. + +*Description* + +Returns the https://en.wikipedia.org/wiki/Sine_and_cosine[cosine] of `n`. Input +expected in radians. + +*Supported types* + +include::types/cos.asciidoc[] + +*Example* [source.merge.styled,esql] ---- @@ -14,7 +31,3 @@ include::{esql-specs}/floats.csv-spec[tag=cos] |=== include::{esql-specs}/floats.csv-spec[tag=cos-result] |=== - -Supported types: - -include::types/cos.asciidoc[] diff --git a/docs/reference/esql/functions/cosh.asciidoc b/docs/reference/esql/functions/cosh.asciidoc index 7bf0840958655..ae813e91ec9bb 100644 --- a/docs/reference/esql/functions/cosh.asciidoc +++ b/docs/reference/esql/functions/cosh.asciidoc @@ -1,10 +1,27 @@ [discrete] [[esql-cosh]] === `COSH` + +*Syntax* + [.text-center] image::esql/functions/signature/cosh.svg[Embedded,opts=inline] -https://en.wikipedia.org/wiki/Hyperbolic_functions[Cosine] hyperbolic function. +*Parameters* + +`n`:: +Numeric expression. If `null`, the function returns `null`. + +*Supported types* + +include::types/cosh.asciidoc[] + +*Description* + +Returns the https://en.wikipedia.org/wiki/Hyperbolic_functions[hyperbolic +cosine]. + +*Example* [source.merge.styled,esql] ---- @@ -14,7 +31,3 @@ include::{esql-specs}/floats.csv-spec[tag=cosh] |=== include::{esql-specs}/floats.csv-spec[tag=cosh-result] |=== - -Supported types: - -include::types/cosh.asciidoc[] diff --git a/docs/reference/esql/functions/count-distinct.asciidoc b/docs/reference/esql/functions/count-distinct.asciidoc index b5b1659140f63..14fa6eff39d4c 100644 --- a/docs/reference/esql/functions/count-distinct.asciidoc +++ b/docs/reference/esql/functions/count-distinct.asciidoc @@ -1,21 +1,28 @@ [discrete] [[esql-agg-count-distinct]] === `COUNT_DISTINCT` -The approximate number of distinct values. -[source.merge.styled,esql] +*Syntax* + +[source,esql] ---- -include::{esql-specs}/stats_count_distinct.csv-spec[tag=count-distinct] +COUNT_DISTINCT(column[, precision]) ---- -[%header.monospaced.styled,format=dsv,separator=|] -|=== -include::{esql-specs}/stats_count_distinct.csv-spec[tag=count-distinct-result] -|=== -Can take any field type as input and the result is always a `long` not matter -the input type. +*Parameters* + +`column`:: +Column for which to count the number of distinct values. + +`precision`:: +Precision. Refer to <>. + +*Description* + +Returns the approximate number of distinct values. [discrete] +[[esql-agg-count-distinct-approximate]] ==== Counts are approximate Computing exact counts requires loading values into a set and returning its @@ -30,11 +37,25 @@ properties: include::../../aggregations/metrics/cardinality-aggregation.asciidoc[tag=explanation] -[discrete] -==== Precision is configurable - The `COUNT_DISTINCT` function takes an optional second parameter to configure the -precision discussed previously. +precision. + +*Supported types* + +Can take any field type as input. + +*Examples* + +[source.merge.styled,esql] +---- +include::{esql-specs}/stats_count_distinct.csv-spec[tag=count-distinct] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/stats_count_distinct.csv-spec[tag=count-distinct-result] +|=== + +With the optional second parameter to configure the precision: [source.merge.styled,esql] ---- diff --git a/docs/reference/esql/functions/count.asciidoc b/docs/reference/esql/functions/count.asciidoc index a148df07edb4d..70b13d7fc16b3 100644 --- a/docs/reference/esql/functions/count.asciidoc +++ b/docs/reference/esql/functions/count.asciidoc @@ -1,7 +1,29 @@ [discrete] [[esql-agg-count]] === `COUNT` -Counts field values. + +*Syntax* + +[source,esql] +---- +COUNT([input]) +---- + +*Parameters* + +`input`:: +Column or literal for which to count the number of values. If omitted, returns a +count all (the number of rows). + +*Description* + +Returns the total number (count) of input values. + +*Supported types* + +Can take any field type as input. + +*Examples* [source.merge.styled,esql] ---- @@ -12,10 +34,7 @@ include::{esql-specs}/stats.csv-spec[tag=count] include::{esql-specs}/stats.csv-spec[tag=count-result] |=== -Can take any field type as input and the result is always a `long` not matter -the input type. - -To count the number of rows, use `COUNT(*)`: +To count the number of rows, use `COUNT()` or `COUNT(*)`: [source.merge.styled,esql] ---- @@ -24,4 +43,4 @@ include::{esql-specs}/docs.csv-spec[tag=countAll] [%header.monospaced.styled,format=dsv,separator=|] |=== include::{esql-specs}/docs.csv-spec[tag=countAll-result] -|=== \ No newline at end of file +|=== diff --git a/docs/reference/esql/functions/date_extract.asciidoc b/docs/reference/esql/functions/date_extract.asciidoc index 89ef1cf261094..ce949483494a5 100644 --- a/docs/reference/esql/functions/date_extract.asciidoc +++ b/docs/reference/esql/functions/date_extract.asciidoc @@ -1,15 +1,56 @@ [discrete] [[esql-date_extract]] === `DATE_EXTRACT` -Extracts parts of a date, like year, month, day, hour. -The supported field types are those provided by https://docs.oracle.com/javase/8/docs/api/java/time/temporal/ChronoField.html[java.time.temporal.ChronoField]. + +*Syntax* + +[source,esql] +---- +DATE_EXTRACT(date_part, date) +---- + +*Parameters* + +`date_part`:: +Part of the date to extract. Can be: `aligned_day_of_week_in_month`, +`aligned_day_of_week_in_year`, `aligned_week_of_month`, `aligned_week_of_year`, +`ampm_of_day`, `clock_hour_of_ampm`, `clock_hour_of_day`, `day_of_month`, +`day_of_week`, `day_of_year`, `epoch_day`, `era`, `hour_of_ampm`, `hour_of_day`, +`instant_seconds`, `micro_of_day`, `micro_of_second`, `milli_of_day`, +`milli_of_second`, `minute_of_day`, `minute_of_hour`, `month_of_year`, +`nano_of_day`, `nano_of_second`, `offset_seconds`, `proleptic_month`, +`second_of_day`, `second_of_minute`, `year`, or `year_of_era`. Refer to +https://docs.oracle.com/javase/8/docs/api/java/time/temporal/ChronoField.html[java.time.temporal.ChronoField] +for a description of these values. ++ +If `null`, the function returns `null`. + +`date`:: +Date expression. If `null`, the function returns `null`. + +*Description* + +Extracts parts of a date, like year, month, day, hour. + +*Examples* [source.merge.styled,esql] ---- -include::{esql-specs}/docs.csv-spec[tag=dateExtract] +include::{esql-specs}/date.csv-spec[tag=dateExtract] ---- [%header.monospaced.styled,format=dsv,separator=|] |=== -include::{esql-specs}/docs.csv-spec[tag=dateExtract-result] +include::{esql-specs}/date.csv-spec[tag=dateExtract-result] |=== +Find all events that occurred outside of business hours (before 9 AM or after 5 +PM), on any given date: + +[source.merge.styled,esql] +---- +include::{esql-specs}/date.csv-spec[tag=docsDateExtractBusinessHours] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/date.csv-spec[tag=docsDateExtractBusinessHours-result] +|=== diff --git a/docs/reference/esql/functions/date_format.asciidoc b/docs/reference/esql/functions/date_format.asciidoc index 5a87f31412cc8..4a0d36d133a4c 100644 --- a/docs/reference/esql/functions/date_format.asciidoc +++ b/docs/reference/esql/functions/date_format.asciidoc @@ -1,12 +1,35 @@ [discrete] [[esql-date_format]] === `DATE_FORMAT` -Returns a string representation of a date in the provided format. If no format -is specified, the `yyyy-MM-dd'T'HH:mm:ss.SSSZ` format is used. + +*Syntax* [source,esql] ---- -FROM employees -| KEEP first_name, last_name, hire_date -| EVAL hired = DATE_FORMAT("YYYY-MM-dd", hire_date) +DATE_FORMAT([format,] date) +---- + +*Parameters* + +`format`:: +Date format (optional). If no format is specified, the +`yyyy-MM-dd'T'HH:mm:ss.SSSZ` format is used. If `null`, the function returns +`null`. + +`date`:: +Date expression. If `null`, the function returns `null`. + +*Description* + +Returns a string representation of a date, in the provided format. + +*Example* + +[source.merge.styled,esql] +---- +include::{esql-specs}/date.csv-spec[tag=docsDateFormat] ---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/date.csv-spec[tag=docsDateFormat-result] +|=== diff --git a/docs/reference/esql/functions/date_trunc.asciidoc b/docs/reference/esql/functions/date_trunc.asciidoc index ad0e1eb1170b4..4aa228dc14e65 100644 --- a/docs/reference/esql/functions/date_trunc.asciidoc +++ b/docs/reference/esql/functions/date_trunc.asciidoc @@ -1,13 +1,57 @@ [discrete] [[esql-date_trunc]] === `DATE_TRUNC` -Rounds down a date to the closest interval. Intervals can be expressed using the -<>. + +*Syntax* [source,esql] ---- -FROM employees -| EVAL year_hired = DATE_TRUNC(1 year, hire_date) -| STATS COUNT(emp_no) BY year_hired -| SORT year_hired +DATE_TRUNC(interval, date) +---- + +*Parameters* + +`interval`:: +Interval, expressed using the <>. If `null`, the function returns `null`. + +`date`:: +Date expression. If `null`, the function returns `null`. + +*Description* + +Rounds down a date to the closest interval. + +*Examples* + +[source.merge.styled,esql] +---- +include::{esql-specs}/date.csv-spec[tag=docsDateTrunc] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/date.csv-spec[tag=docsDateTrunc-result] +|=== + +Combine `DATE_TRUNC` with <> to create date histograms. For +example, the number of hires per year: + +[source.merge.styled,esql] +---- +include::{esql-specs}/date.csv-spec[tag=docsDateTruncHistogram] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/date.csv-spec[tag=docsDateTruncHistogram-result] +|=== + +Or an hourly error rate: + +[source.merge.styled,esql] +---- +include::{esql-specs}/conditional.csv-spec[tag=docsCaseHourlyErrorRate] ---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/conditional.csv-spec[tag=docsCaseHourlyErrorRate-result] +|=== diff --git a/docs/reference/esql/functions/is_finite.asciidoc b/docs/reference/esql/functions/is_finite.asciidoc index f7b7ad73a3952..482c7bcd3d61b 100644 --- a/docs/reference/esql/functions/is_finite.asciidoc +++ b/docs/reference/esql/functions/is_finite.asciidoc @@ -1,8 +1,27 @@ [discrete] [[esql-is_finite]] === `IS_FINITE` + +*Syntax* + +[.text-center] +image::esql/functions/signature/is_finite.svg[Embedded,opts=inline] + +*Parameters* + +`n`:: +Numeric expression. If `null`, the function returns `null`. + +*Description* + Returns a boolean that indicates whether its input is a finite number. +*Supported types* + +include::types/is_finite.asciidoc[] + +*Example* + [source,esql] ---- ROW d = 1.0 diff --git a/docs/reference/esql/functions/is_infinite.asciidoc b/docs/reference/esql/functions/is_infinite.asciidoc index 56158a786c020..69f0ab7ba98a3 100644 --- a/docs/reference/esql/functions/is_infinite.asciidoc +++ b/docs/reference/esql/functions/is_infinite.asciidoc @@ -1,7 +1,26 @@ [discrete] [[esql-is_infinite]] === `IS_INFINITE` -Returns a boolean that indicates whether its input is infinite. + +*Syntax* + +[.text-center] +image::esql/functions/signature/is_infinite.svg[Embedded,opts=inline] + +*Parameters* + +`n`:: +Numeric expression. If `null`, the function returns `null`. + +*Description* + +Returns a boolean that indicates whether its input is an infinite number. + +*Supported types* + +include::types/is_infinite.asciidoc[] + +*Example* [source,esql] ---- diff --git a/docs/reference/esql/functions/is_nan.asciidoc b/docs/reference/esql/functions/is_nan.asciidoc index 25b50a9e96bba..dbe93b9dbb817 100644 --- a/docs/reference/esql/functions/is_nan.asciidoc +++ b/docs/reference/esql/functions/is_nan.asciidoc @@ -1,7 +1,26 @@ [discrete] [[esql-is_nan]] === `IS_NAN` -Returns a boolean that indicates whether its input is not a number. + +*Syntax* + +[.text-center] +image::esql/functions/signature/is_nan.svg[Embedded,opts=inline] + +*Parameters* + +`n`:: +Numeric expression. If `null`, the function returns `null`. + +*Description* + +Returns a boolean that indicates whether its input is {wikipedia}/NaN[Not-a-Number] (NaN). + +*Supported types* + +include::types/is_nan.asciidoc[] + +*Example* [source,esql] ---- diff --git a/docs/reference/esql/functions/signature/is_finite.svg b/docs/reference/esql/functions/signature/is_finite.svg index 0ff65a876d21f..36a8f1f34de9b 100644 --- a/docs/reference/esql/functions/signature/is_finite.svg +++ b/docs/reference/esql/functions/signature/is_finite.svg @@ -1 +1 @@ -IS_FINITE(arg1) \ No newline at end of file +IS_FINITE(n) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/is_infinite.svg b/docs/reference/esql/functions/signature/is_infinite.svg index aef9e3873c918..f3d8d44fde947 100644 --- a/docs/reference/esql/functions/signature/is_infinite.svg +++ b/docs/reference/esql/functions/signature/is_infinite.svg @@ -1 +1 @@ -IS_INFINITE(arg1) \ No newline at end of file +IS_INFINITE(n) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/is_nan.svg b/docs/reference/esql/functions/signature/is_nan.svg new file mode 100644 index 0000000000000..a3697ee9f8b2c --- /dev/null +++ b/docs/reference/esql/functions/signature/is_nan.svg @@ -0,0 +1 @@ +IS_NAN(n) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/to_degrees.svg b/docs/reference/esql/functions/signature/to_degrees.svg new file mode 100644 index 0000000000000..01fe0a4770156 --- /dev/null +++ b/docs/reference/esql/functions/signature/to_degrees.svg @@ -0,0 +1 @@ +TO_DEGREES(v) \ No newline at end of file diff --git a/docs/reference/esql/functions/types/add.asciidoc b/docs/reference/esql/functions/types/add.asciidoc index 7783d08bc3aaa..72f9018503bb7 100644 --- a/docs/reference/esql/functions/types/add.asciidoc +++ b/docs/reference/esql/functions/types/add.asciidoc @@ -2,11 +2,18 @@ |=== lhs | rhs | result date_period | date_period | date_period -date_period | datetime | datetime datetime | date_period | datetime datetime | time_duration | datetime double | double | double +double | integer | double +double | long | double +integer | double | double integer | integer | integer +integer | long | long +long | double | double +long | integer | long long | long | long +time_duration | datetime | datetime time_duration | time_duration | time_duration +unsigned_long | unsigned_long | unsigned_long |=== diff --git a/docs/reference/esql/functions/types/is_finite.asciidoc b/docs/reference/esql/functions/types/is_finite.asciidoc index 0c555059004c1..e4883bdc1c076 100644 --- a/docs/reference/esql/functions/types/is_finite.asciidoc +++ b/docs/reference/esql/functions/types/is_finite.asciidoc @@ -1,5 +1,5 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== -arg1 | result +n | result double | boolean |=== diff --git a/docs/reference/esql/functions/types/is_infinite.asciidoc b/docs/reference/esql/functions/types/is_infinite.asciidoc index 0c555059004c1..e4883bdc1c076 100644 --- a/docs/reference/esql/functions/types/is_infinite.asciidoc +++ b/docs/reference/esql/functions/types/is_infinite.asciidoc @@ -1,5 +1,5 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== -arg1 | result +n | result double | boolean |=== diff --git a/docs/reference/esql/functions/types/is_nan.asciidoc b/docs/reference/esql/functions/types/is_nan.asciidoc new file mode 100644 index 0000000000000..e4883bdc1c076 --- /dev/null +++ b/docs/reference/esql/functions/types/is_nan.asciidoc @@ -0,0 +1,5 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +n | result +double | boolean +|=== diff --git a/docs/reference/esql/functions/types/to_degrees.asciidoc b/docs/reference/esql/functions/types/to_degrees.asciidoc new file mode 100644 index 0000000000000..7cb7ca46022c2 --- /dev/null +++ b/docs/reference/esql/functions/types/to_degrees.asciidoc @@ -0,0 +1,8 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +v | result +double | double +integer | double +long | double +unsigned_long | double +|=== diff --git a/docs/reference/esql/index.asciidoc b/docs/reference/esql/index.asciidoc index dcbe426b1bcac..8fb20b981b93e 100644 --- a/docs/reference/esql/index.asciidoc +++ b/docs/reference/esql/index.asciidoc @@ -6,7 +6,7 @@ [partintro] -preview::[] +preview::["Do not use {esql} on production environments. This functionality is in technical preview and may be changed or removed in a future release. Elastic will work to fix any issues, but features in technical preview are not subject to the support SLA of official GA features."] The {es} Query Language ({esql}) provides a powerful way to filter, transform, and analyze data stored in {es}, and in the future in other runtimes. It is diff --git a/docs/reference/how-to/knn-search.asciidoc b/docs/reference/how-to/knn-search.asciidoc index 066008ce26110..6b157ace13f8a 100644 --- a/docs/reference/how-to/knn-search.asciidoc +++ b/docs/reference/how-to/knn-search.asciidoc @@ -10,21 +10,6 @@ Many of these recommendations help improve search speed. With approximate kNN, the indexing algorithm runs searches under the hood to create the vector index structures. So these same recommendations also help with indexing speed. -[discrete] -=== Prefer `dot_product` over `cosine` - -When indexing vectors for approximate kNN search, you need to specify the -<> for comparing the vectors. -If you'd like to compare vectors through cosine similarity, there are two -options. - -The `cosine` option accepts any float vector and computes the cosine -similarity. While this is convenient for testing, it's not the most efficient -approach. Instead, we recommend using the `dot_product` option to compute the -similarity. To use `dot_product`, all vectors need to be normalized in advance -to have length 1. The `dot_product` option is significantly faster, since it -avoids performing extra vector length computations during the search. - [discrete] === Ensure data nodes have enough memory @@ -52,12 +37,10 @@ of datasets and configurations that we use for our nightly benchmarks. include::search-speed.asciidoc[tag=warm-fs-cache] The following file extensions are used for the approximate kNN search: -+ --- + * `vec` and `veq` for vector values * `vex` for HNSW graph * `vem`, `vemf`, and `vemq` for metadata --- [discrete] === Reduce vector dimensionality diff --git a/docs/reference/images/ingest/document-enrichment-add-inference-pipeline.png b/docs/reference/images/ingest/document-enrichment-add-inference-pipeline.png index ddcf42e24ab83..336228fc0aef0 100644 Binary files a/docs/reference/images/ingest/document-enrichment-add-inference-pipeline.png and b/docs/reference/images/ingest/document-enrichment-add-inference-pipeline.png differ diff --git a/docs/reference/ingest/search-inference-processing.asciidoc b/docs/reference/ingest/search-inference-processing.asciidoc index 48505ab314c1e..006cc96294477 100644 --- a/docs/reference/ingest/search-inference-processing.asciidoc +++ b/docs/reference/ingest/search-inference-processing.asciidoc @@ -102,10 +102,13 @@ Here, you'll be able to: 1. Choose a name for your pipeline. - This name will need to be unique across the whole deployment. If you want this pipeline to be index-specific, we recommend including the name of your index in the pipeline name. + - If you do not set the pipeline name, a default unique name will be provided upon selecting a trained model. 2. Select the ML trained model you want to use. + - The model must be deployed before you can select it. + To begin deployment of a model, click the *Deploy* button. 3. Select one or more source fields as input for the inference processor. - If there are no source fields available, your index will need a <>. -4. (Optional) Choose a name for your target field. +4. (Optional) Choose a name for your target field(s). This is where the output of the inference model will be stored. Changing the default name is only possible if you have a single source field selected. 5. Add the source-target field mapping to the configuration by clicking the *Add* button. 6. Repeat steps 3-5 for each field mapping you want to add. @@ -123,51 +126,12 @@ These pipelines can also be viewed, edited, and deleted in Kibana via *Stack Man You may also use the <>. If you delete any of these pipelines outside of the *Content* UI in Kibana, make sure to edit the ML inference pipelines that reference them. -[discrete#ingest-pipeline-search-inference-update-mapping] -==== Update mappings to use ML inference pipelines - -After setting up an ML inference pipeline or attaching an existing one, it may be necessary to manually create the field mappings in order to support the referenced trained ML model's output. -This needs to happen before the pipeline is first used to index some documents, otherwise the model output fields could be inferred with the wrong type. - -[NOTE] -==== -This doesn't apply when you're creating a pipeline with the ELSER model, for which the index mappings are automatically updated in the process. -==== - -The required field name and type depends on the configuration of the pipeline and the trained model it uses. -For example, if you configure a `text_embedding` model, select `summary` as a source field, and `ml.inference.summary` as the target field, the inference output will be stored in `ml.inference..predicted_value` as a <> type. -In order to support semantic search on this field, it must be added to the mapping: - -[source,console] ----- -PUT my-index-0001/_mapping -{ - "properties": { - "ml.inference.summary.predicted_value": { <1> - "type": "dense_vector", <2> - "dims": 768, <3> - "index": true, - "similarity": "dot_product" - } - } -} ----- -// NOTCONSOLE -// TEST[skip:TODO] - -<1> The output of the ML model is stored in the configured target field suffixed with `predicted_value`. -<2> Choose a field type that is compatible with the inference output and supports your search use cases. -<3> Set additional properties as necessary. - -[TIP] -==== -You can check the shape of the generated output before indexing any documents while creating the ML inference pipeline under the *Test* tab. -Simply provide a sample document, click *Simulate*, and look for the `ml.inference` object in the results. -==== - [discrete#ingest-pipeline-search-inference-test-inference-pipeline] ==== Test your ML inference pipeline +You can verify the expected structure of the inference output before indexing any documents while creating the {ml} inference pipeline under the *Test* tab. +Provide a sample document, click *Simulate*, and look for the `ml.inference` object in the results. + To ensure the ML inference pipeline will be run when ingesting documents, you must make sure the documents you are ingesting have a field named `_run_ml_inference` that is set to `true` and you must set the pipeline to `{index_name}`. For connector and crawler indices, this will happen automatically if you've configured the settings appropriately for the pipeline name `{index_name}`. To manage these settings: diff --git a/docs/reference/query-dsl/exists-query.asciidoc b/docs/reference/query-dsl/exists-query.asciidoc index 75d1b07ea3851..9a9f642daa3f4 100644 --- a/docs/reference/query-dsl/exists-query.asciidoc +++ b/docs/reference/query-dsl/exists-query.asciidoc @@ -9,7 +9,7 @@ Returns documents that contain an indexed value for a field. An indexed value may not exist for a document's field due to a variety of reasons: * The field in the source JSON is `null` or `[]` -* The field has `"index" : false` set in the mapping +* The field has `"index" : false` and `"doc_values" : false` set in the mapping * The length of the field value exceeded an `ignore_above` setting in the mapping * The field value was malformed and `ignore_malformed` was defined in the mapping diff --git a/docs/reference/query-dsl/span-containing-query.asciidoc b/docs/reference/query-dsl/span-containing-query.asciidoc index ec1c0bdf0a8d6..8a8eeba12a7b2 100644 --- a/docs/reference/query-dsl/span-containing-query.asciidoc +++ b/docs/reference/query-dsl/span-containing-query.asciidoc @@ -4,8 +4,7 @@ Span containing ++++ -Returns matches which enclose another span query. The span containing -query maps to Lucene `SpanContainingQuery`. Here is an example: +Returns matches which enclose another span query. Here is an example: [source,console] -------------------------------------------------- diff --git a/docs/reference/query-dsl/span-field-masking-query.asciidoc b/docs/reference/query-dsl/span-field-masking-query.asciidoc index 3a869f64b45f3..b0a9a0a1d6207 100644 --- a/docs/reference/query-dsl/span-field-masking-query.asciidoc +++ b/docs/reference/query-dsl/span-field-masking-query.asciidoc @@ -4,11 +4,11 @@ Span field masking ++++ -Wrapper to allow span queries to participate in composite single-field span queries by 'lying' about their search field. The span field masking query maps to Lucene's `SpanFieldMaskingQuery` +Wrapper to allow span queries to participate in composite single-field span queries by 'lying' about their search field. This can be used to support queries like `span-near` or `span-or` across different fields, which is not ordinarily permitted. -Span field masking query is invaluable in conjunction with *multi-fields* when same content is indexed with multiple analyzers. For instance we could index a field with the standard analyzer which breaks text up into words, and again with the english analyzer which stems words into their root form. +Span field masking query is invaluable in conjunction with *multi-fields* when same content is indexed with multiple analyzers. For instance, we could index a field with the standard analyzer which breaks text up into words, and again with the english analyzer which stems words into their root form. Example: @@ -28,18 +28,33 @@ GET /_search "span_field_masking": { "query": { "span_term": { - "text.stems": "fox" + "text.stems": "fox" <1> } }, - "field": "text" + "field": "text" <2> } } ], "slop": 5, "in_order": false } + }, + "highlight": { + "require_field_match" : false, <3> + "fields": { + "*": {} + } } } -------------------------------------------------- +<1> Original field on which we do the search +<2> Masked field, which we are masking with the original field +<3> Use "require_field_match" : false to highlight the masked field + +Note: `span_field_masking` query may have unexpected scoring and highlighting +behaviour. This is because the query returns and highlights the masked field, +but scoring and highlighting are done using the terms statistics and offsets +of the original field. -Note: as span field masking query returns the masked field, scoring will be done using the norms of the field name supplied. This may lead to unexpected scoring behaviour. +Note: For highlighting to work the parameter: `require_field_match` should +be set to `false` on the highlighter. diff --git a/docs/reference/query-dsl/span-first-query.asciidoc b/docs/reference/query-dsl/span-first-query.asciidoc index 77e3f557fd982..0b6d4ef80adfb 100644 --- a/docs/reference/query-dsl/span-first-query.asciidoc +++ b/docs/reference/query-dsl/span-first-query.asciidoc @@ -4,8 +4,7 @@ Span first ++++ -Matches spans near the beginning of a field. The span first query maps -to Lucene `SpanFirstQuery`. Here is an example: +Matches spans near the beginning of a field. Here is an example: [source,console] -------------------------------------------------- @@ -19,7 +18,7 @@ GET /_search "end": 3 } } -} +} -------------------------------------------------- The `match` clause can be any other span type query. The `end` controls diff --git a/docs/reference/query-dsl/span-near-query.asciidoc b/docs/reference/query-dsl/span-near-query.asciidoc index 0a1aa7082fbb2..1c68cfa12f72c 100644 --- a/docs/reference/query-dsl/span-near-query.asciidoc +++ b/docs/reference/query-dsl/span-near-query.asciidoc @@ -6,8 +6,7 @@ Matches spans which are near one another. One can specify _slop_, the maximum number of intervening unmatched positions, as well as whether -matches are required to be in-order. The span near query maps to Lucene -`SpanNearQuery`. Here is an example: +matches are required to be in-order. Here is an example: [source,console] -------------------------------------------------- diff --git a/docs/reference/query-dsl/span-not-query.asciidoc b/docs/reference/query-dsl/span-not-query.asciidoc index 99814eba9d88a..c1ddf00a7a939 100644 --- a/docs/reference/query-dsl/span-not-query.asciidoc +++ b/docs/reference/query-dsl/span-not-query.asciidoc @@ -6,8 +6,8 @@ Removes matches which overlap with another span query or which are within x tokens before (controlled by the parameter `pre`) or y tokens -after (controlled by the parameter `post`) another SpanQuery. The span not -query maps to Lucene `SpanNotQuery`. Here is an example: +after (controlled by the parameter `post`) another SpanQuery. +Here is an example: [source,console] -------------------------------------------------- diff --git a/docs/reference/query-dsl/span-or-query.asciidoc b/docs/reference/query-dsl/span-or-query.asciidoc index 6c0e78ab266d9..4ab12073c5d2c 100644 --- a/docs/reference/query-dsl/span-or-query.asciidoc +++ b/docs/reference/query-dsl/span-or-query.asciidoc @@ -4,8 +4,7 @@ Span or ++++ -Matches the union of its span clauses. The span or query maps to Lucene -`SpanOrQuery`. Here is an example: +Matches the union of its span clauses. Here is an example: [source,console] -------------------------------------------------- diff --git a/docs/reference/query-dsl/span-term-query.asciidoc b/docs/reference/query-dsl/span-term-query.asciidoc index 0dac73c9f7019..8e5e49d14e452 100644 --- a/docs/reference/query-dsl/span-term-query.asciidoc +++ b/docs/reference/query-dsl/span-term-query.asciidoc @@ -4,8 +4,7 @@ Span term ++++ -Matches spans containing a term. The span term query maps to Lucene -`SpanTermQuery`. Here is an example: +Matches spans containing a term. Here is an example: [source,console] -------------------------------------------------- @@ -14,7 +13,7 @@ GET /_search "query": { "span_term" : { "user.id" : "kimchy" } } -} +} -------------------------------------------------- A boost can also be associated with the query: @@ -26,7 +25,7 @@ GET /_search "query": { "span_term" : { "user.id" : { "value" : "kimchy", "boost" : 2.0 } } } -} +} -------------------------------------------------- Or : @@ -38,5 +37,5 @@ GET /_search "query": { "span_term" : { "user.id" : { "term" : "kimchy", "boost" : 2.0 } } } -} +} -------------------------------------------------- diff --git a/docs/reference/query-dsl/span-within-query.asciidoc b/docs/reference/query-dsl/span-within-query.asciidoc index 62a12fc719613..0592e83117014 100644 --- a/docs/reference/query-dsl/span-within-query.asciidoc +++ b/docs/reference/query-dsl/span-within-query.asciidoc @@ -4,8 +4,8 @@ Span within ++++ -Returns matches which are enclosed inside another span query. The span within -query maps to Lucene `SpanWithinQuery`. Here is an example: +Returns matches which are enclosed inside another span query. +Here is an example: [source,console] -------------------------------------------------- diff --git a/docs/reference/query-dsl/special-queries.asciidoc b/docs/reference/query-dsl/special-queries.asciidoc index d46377f698359..8fb23ca4dbb64 100644 --- a/docs/reference/query-dsl/special-queries.asciidoc +++ b/docs/reference/query-dsl/special-queries.asciidoc @@ -60,3 +60,5 @@ include::wrapper-query.asciidoc[] include::pinned-query.asciidoc[] include::rule-query.asciidoc[] + +include::weighted-tokens-query.asciidoc[] diff --git a/docs/reference/query-dsl/text-expansion-query.asciidoc b/docs/reference/query-dsl/text-expansion-query.asciidoc index 46a9aafdd1af8..cb0a7c6ea9c01 100644 --- a/docs/reference/query-dsl/text-expansion-query.asciidoc +++ b/docs/reference/query-dsl/text-expansion-query.asciidoc @@ -62,7 +62,7 @@ Default: Disabled. Parameters for `` are: `tokens_freq_ratio_threshold`:: -(Optional, float) +(Optional, integer) preview:[] Tokens whose frequency is more than `tokens_freq_ratio_threshold` times the average frequency of all tokens in the specified field are considered outliers and pruned. This value must between 1 and 100. @@ -110,29 +110,96 @@ GET my-index/_search ---- // TEST[skip: TBD] -[discrete] -[[text-expansion-query-with-pruning-config-example]] -=== Example ELSER query with pruning configuration +Multiple `text_expansion` queries can be combined with each other or other query types. +This can be achieved by wrapping them in <> and using linear boosting: -The following is an extension to the above example that adds a preview:[] pruning configuration to the `text_expansion` query. -The pruning configuration identifies non-significant tokens to prune from the query in order to improve query performance. [source,console] ---- GET my-index/_search { - "query":{ - "text_expansion":{ - "ml.tokens":{ - "model_id":".elser_model_2", - "model_text":"How is the weather in Jamaica?" - }, - "pruning_config": { - "tokens_freq_ratio_threshold": 5, - "tokens_weight_threshold": 0.4, - "only_score_pruned_tokens": false - } + "query": { + "bool": { + "should": [ + { + "text_expansion": { + "ml.inference.title_expanded.predicted_value": { + "model_id": ".elser_model_2", + "model_text": "How is the weather in Jamaica?", + "boost": 1 + } + } + }, + { + "text_expansion": { + "ml.inference.description_expanded.predicted_value": { + "model_id": ".elser_model_2", + "model_text": "How is the weather in Jamaica?", + "boost": 1 + } + } + }, + { + "multi_match": { + "query": "How is the weather in Jamaica?", + "fields": [ + "title", + "description" + ], + "boost": 4 + } + } + ] + } + } +} +---- +// TEST[skip: TBD] + +This can also be achieved by using sub searches combined with <>. + +[source,console] +---- +GET my-index/_search +{ + "sub_searches": [ + { + "query": { + "multi_match": { + "query": "How is the weather in Jamaica?", + "fields": [ + "title", + "description" + ] + } } - } + }, + { + "query": { + "text_expansion": { + "ml.inference.title_expanded.predicted_value": { + "model_id": ".elser_model_2", + "model_text": "How is the weather in Jamaica?" + } + } + } + }, + { + "query": { + "text_expansion": { + "ml.inference.description_expanded.predicted_value": { + "model_id": ".elser_model_2", + "model_text": "How is the weather in Jamaica?" + } + } + } + } + ], + "rank": { + "rrf": { + "window_size": 10, + "rank_constant": 20 + } + } } ---- // TEST[skip: TBD] @@ -141,9 +208,13 @@ GET my-index/_search [[text-expansion-query-with-pruning-config-and-rescore-example]] === Example ELSER query with pruning configuration and rescore -The following is an extension to the above example that adds a <> function on top of the preview:[] pruning configuration to the `text_expansion` query. +The following is an extension to the above example that adds a preview:[] pruning configuration to the `text_expansion` query. The pruning configuration identifies non-significant tokens to prune from the query in order to improve query performance. -Rescoring the query with the tokens that were originally pruned from the query may improve overall search relevance when using this pruning strategy. + +Token pruning happens at the shard level. +While this should result in the same tokens being labeled as insignificant across shards, this is not guaranteed based on the composition of each shard. +Therefore, if you are running `text_expansion` with a `pruning_config` on a multi-shard index, we strongly recommend adding a <> function with the tokens that were originally pruned from the query. +This will help mitigate any shard-level inconsistency with pruned tokens and provide better relevance overall. [source,console] ---- @@ -188,30 +259,3 @@ GET my-index/_search ==== Depending on your data, the text expansion query may be faster with `track_total_hits: false`. ==== - -[discrete] -[[weighted-tokens-query-example]] -=== Example Weighted token query - -In order to quickly iterate during tests, we exposed a new preview:[] `weighted_tokens` query for evaluation of tokenized datasets. -While this is not a query that is intended for production use, it can be used to quickly evaluate relevance using various pruning configurations. - -[source,console] ----- -POST /docs/_search -{ - "query": { - "weighted_tokens": { - "query_expansion": { - "tokens": {"2161": 0.4679, "2621": 0.307, "2782": 0.1299, "2851": 0.1056, "3088": 0.3041, "3376": 0.1038, "3467": 0.4873, "3684": 0.8958, "4380": 0.334, "4542": 0.4636, "4633": 2.2805, "4785": 1.2628, "4860": 1.0655, "5133": 1.0709, "7139": 1.0016, "7224": 0.2486, "7387": 0.0985, "7394": 0.0542, "8915": 0.369, "9156": 2.8947, "10505": 0.2771, "11464": 0.3996, "13525": 0.0088, "14178": 0.8161, "16893": 0.1376, "17851": 1.5348, "19939": 0.6012}, - "pruning_config": { - "tokens_freq_ratio_threshold": 5, - "tokens_weight_threshold": 0.4, - "only_score_pruned_tokens": false - } - } - } - } -} ----- -//TEST[skip: TBD] diff --git a/docs/reference/query-dsl/weighted-tokens-query.asciidoc b/docs/reference/query-dsl/weighted-tokens-query.asciidoc new file mode 100644 index 0000000000000..cbd88eb3290dc --- /dev/null +++ b/docs/reference/query-dsl/weighted-tokens-query.asciidoc @@ -0,0 +1,122 @@ +[[query-dsl-weighted-tokens-query]] +=== Weighted tokens query +++++ +Weighted tokens +++++ + +preview::[] + +The weighted tokens query requires a list of token-weight pairs that are sent in with a query rather than calculated using a {nlp} model. +These token pairs are then used in a query against a <> or <> field. + +Weighted tokens queries are useful when you want to use an external query expansion model, or quickly prototype changes without reindexing a new model. + +[discrete] +[[weighted-tokens-query-ex-request]] +==== Example request + +[source,console] +---- +POST _search +{ + "query": { + "weighted_tokens": { + "query_expansion_field": { + "tokens": {"2161": 0.4679, "2621": 0.307, "2782": 0.1299, "2851": 0.1056, "3088": 0.3041, "3376": 0.1038, "3467": 0.4873, "3684": 0.8958, "4380": 0.334, "4542": 0.4636, "4633": 2.2805, "4785": 1.2628, "4860": 1.0655, "5133": 1.0709, "7139": 1.0016, "7224": 0.2486, "7387": 0.0985, "7394": 0.0542, "8915": 0.369, "9156": 2.8947, "10505": 0.2771, "11464": 0.3996, "13525": 0.0088, "14178": 0.8161, "16893": 0.1376, "17851": 1.5348, "19939": 0.6012}, + "pruning_config": { + "tokens_freq_ratio_threshold": 5, + "tokens_weight_threshold": 0.4, + "only_score_pruned_tokens": false + } + } + } + } +} +---- +// TEST[skip: TBD] + +[discrete] +[[weighted-token-query-params]] +=== Top level parameters for `weighted_token` + +``::: +(Required, dictionary) +A dictionary of token-weight pairs. + +`pruning_config` :::: +(Optional, object) +Optional pruning configuration. If enabled, this will omit non-significant tokens from the query in order to improve query performance. +Default: Disabled. ++ +-- +Parameters for `` are: + +`tokens_freq_ratio_threshold`:: +(Optional, integer) +Tokens whose frequency is more than `tokens_freq_ratio_threshold` times the average frequency of all tokens in the specified field are considered outliers and pruned. +This value must between 1 and 100. +Default: `5`. + +`tokens_weight_threshold`:: +(Optional, float) +Tokens whose weight is less than `tokens_weight_threshold` are considered nonsignificant and pruned. +This value must be between 0 and 1. +Default: `0.4`. + +`only_score_pruned_tokens`:: +(Optional, boolean) +If `true` we only input pruned tokens into scoring, and discard non-pruned tokens. +It is strongly recommended to set this to `false` for the main query, but this can be set to `true` for a rescore query to get more relevant results. +Default: `false`. + +NOTE: The default values for `tokens_freq_ratio_threshold` and `tokens_weight_threshold` were chosen based on tests using ELSER that provided the most optimal results. +-- + +[discrete] +[[weighted-tokens-query-with-pruning-config-and-rescore-example]] +==== Example weighted tokens query with pruning configuration and rescore + +The following example adds a pruning configuration to the `text_expansion` query. +The pruning configuration identifies non-significant tokens to prune from the query in order to improve query performance. + +Token pruning happens at the shard level. +While this should result in the same tokens being labeled as insignificant across shards, this is not guaranteed based on the composition of each shard. +Therefore, if you are running `text_expansion` with a `pruning_config` on a multi-shard index, we strongly recommend adding a <> function with the tokens that were originally pruned from the query. +This will help mitigate any shard-level inconsistency with pruned tokens and provide better relevance overall. + +[source,console] +---- +GET my-index/_search +{ + "query":{ + "weighted_tokens": { + "query_expansion_field": { + "tokens": {"2161": 0.4679, "2621": 0.307, "2782": 0.1299, "2851": 0.1056, "3088": 0.3041, "3376": 0.1038, "3467": 0.4873, "3684": 0.8958, "4380": 0.334, "4542": 0.4636, "4633": 2.2805, "4785": 1.2628, "4860": 1.0655, "5133": 1.0709, "7139": 1.0016, "7224": 0.2486, "7387": 0.0985, "7394": 0.0542, "8915": 0.369, "9156": 2.8947, "10505": 0.2771, "11464": 0.3996, "13525": 0.0088, "14178": 0.8161, "16893": 0.1376, "17851": 1.5348, "19939": 0.6012}, + "pruning_config": { + "tokens_freq_ratio_threshold": 5, + "tokens_weight_threshold": 0.4, + "only_score_pruned_tokens": false + } + } + } + }, + "rescore": { + "window_size": 100, + "query": { + "rescore_query": { + "weighted_tokens": { + "query_expansion_field": { + "tokens": {"2161": 0.4679, "2621": 0.307, "2782": 0.1299, "2851": 0.1056, "3088": 0.3041, "3376": 0.1038, "3467": 0.4873, "3684": 0.8958, "4380": 0.334, "4542": 0.4636, "4633": 2.2805, "4785": 1.2628, "4860": 1.0655, "5133": 1.0709, "7139": 1.0016, "7224": 0.2486, "7387": 0.0985, "7394": 0.0542, "8915": 0.369, "9156": 2.8947, "10505": 0.2771, "11464": 0.3996, "13525": 0.0088, "14178": 0.8161, "16893": 0.1376, "17851": 1.5348, "19939": 0.6012}, + "pruning_config": { + "tokens_freq_ratio_threshold": 5, + "tokens_weight_threshold": 0.4, + "only_score_pruned_tokens": true + } + } + } + } + } + } +} +---- +//TEST[skip: TBD] diff --git a/docs/reference/search/search-your-data/search-with-synonyms.asciidoc b/docs/reference/search/search-your-data/search-with-synonyms.asciidoc index fb6abd6d36099..16952f94890c7 100644 --- a/docs/reference/search/search-your-data/search-with-synonyms.asciidoc +++ b/docs/reference/search/search-your-data/search-with-synonyms.asciidoc @@ -75,6 +75,13 @@ A large number of inline synonyms increases cluster size unnecessarily and can l Once your synonyms sets are created, you can start configuring your token filters and analyzers to use them. +[WARNING] +====== +Synonyms sets must exist before they can be added to indices. +If an index is created referencing a nonexistent synonyms set, the index will remain in a partially created and inoperable state. +The only way to recover from this scenario is to ensure the synonyms set exists then either delete and re-create the index, or close and re-open the index. +====== + {es} uses synonyms as part of the <>. You can use two types of <> to include synonyms: diff --git a/docs/reference/settings/security-settings.asciidoc b/docs/reference/settings/security-settings.asciidoc index f4875fd096b00..78850f617ee65 100644 --- a/docs/reference/settings/security-settings.asciidoc +++ b/docs/reference/settings/security-settings.asciidoc @@ -71,6 +71,11 @@ the sensitive nature of the information. (<>) Enables fips mode of operation. Set this to `true` if you run this {es} instance in a FIPS 140-2 enabled JVM. For more information, see <>. Defaults to `false`. +`xpack.security.fips_mode.required_providers`:: +(<>) +Optionally enforce specific Java JCE/JSSE security providers. For example, set this to `["BCFIPS", "BCJSSE"]` (case-insensitive) to require +the Bouncy Castle FIPS JCE and JSSE security providers. Only applicable when `xpack.security.fips_mode.enabled` is set to `true`. + [discrete] [[password-hashing-settings]] ==== Password hashing settings diff --git a/docs/reference/setup/secure-settings.asciidoc b/docs/reference/setup/secure-settings.asciidoc index d51c0dd684871..22e828f96f5d2 100644 --- a/docs/reference/setup/secure-settings.asciidoc +++ b/docs/reference/setup/secure-settings.asciidoc @@ -6,11 +6,11 @@ their values is not sufficient. For this use case, {es} provides a keystore and the <> to manage the settings in the keystore. -IMPORTANT: Only some settings are designed to be read from the keystore. However, -the keystore has no validation to block unsupported settings. Adding unsupported -settings to the keystore causes {es} to fail to start. To see whether a setting -is supported in the keystore, look for a "Secure" qualifier in the setting -reference. +IMPORTANT: Only some settings are designed to be read from the keystore. +Adding unsupported settings to the keystore causes the validation in the +`_nodes/reload_secure_settings` API to fail and if not addressed, will +cause {es} to fail to start. To see whether a setting is supported in the +keystore, look for a "Secure" qualifier in the setting reference. All the modifications to the keystore take effect only after restarting {es}. @@ -42,12 +42,12 @@ POST _nodes/reload_secure_settings <1> The password that the {es} keystore is encrypted with. -This API decrypts and re-reads the entire keystore, on every cluster node, -but only the *reloadable* secure settings are applied. Changes to other -settings do not go into effect until the next restart. Once the call returns, -the reload has been completed, meaning that all internal data structures -dependent on these settings have been changed. Everything should look as if the -settings had the new value from the start. +This API decrypts, re-reads the entire keystore and validates all settings on +every cluster node, but only the *reloadable* secure settings are applied. +Changes to other settings do not go into effect until the next restart. Once +the call returns, the reload has been completed, meaning that all internal data +structures dependent on these settings have been changed. Everything should +look as if the settings had the new value from the start. When changing multiple *reloadable* secure settings, modify all of them on each cluster node, then issue a <> diff --git a/docs/reference/snapshot-restore/restore-snapshot.asciidoc b/docs/reference/snapshot-restore/restore-snapshot.asciidoc index 04f9f55ef13b4..632573de02b69 100644 --- a/docs/reference/snapshot-restore/restore-snapshot.asciidoc +++ b/docs/reference/snapshot-restore/restore-snapshot.asciidoc @@ -359,12 +359,7 @@ POST _watcher/_start ---- // TEST[continued] //// --- - -. {blank} -+ --- * Universal Profiling + Check if Universal Profiling index template management is enabled: @@ -385,22 +380,25 @@ PUT _cluster/settings } } ---- +-- -[[restore-create-file-realm-user]] -If you use {es} security features, log in to a node host, navigate to the {es} -installation directory, and add a user with the `superuser` role to the file -realm using the <> tool. +. [[restore-create-file-realm-user]]If you use {es} security features, log in to +a node host, navigate to the {es} installation directory, and add a user with +the `superuser` role to the file realm using the +<> tool. ++ For example, the following command creates a user named `restore_user`. ++ [source,sh] ---- ./bin/elasticsearch-users useradd restore_user -p my_password -r superuser ---- ++ Use this file realm user to authenticate requests until the restore operation is complete. --- . Use the <> to set <> to diff --git a/docs/src/yamlRestTest/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java b/docs/src/yamlRestTest/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java index 6df51189e918e..1d6df60df0f88 100644 --- a/docs/src/yamlRestTest/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java +++ b/docs/src/yamlRestTest/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java @@ -16,7 +16,6 @@ import org.apache.http.util.EntityUtils; import org.apache.lucene.tests.util.TimeUnits; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.RestClient; import org.elasticsearch.common.settings.SecureString; @@ -48,7 +47,6 @@ import java.util.Iterator; import java.util.List; import java.util.Map; -import java.util.function.Predicate; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; @@ -98,20 +96,9 @@ protected boolean randomizeContentType() { protected ClientYamlTestClient initClientYamlTestClient( final ClientYamlSuiteRestSpec restSpec, final RestClient restClient, - final List hosts, - final Version esVersion, - final Predicate clusterFeaturesPredicate, - final String os + final List hosts ) { - return new ClientYamlDocsTestClient( - restSpec, - restClient, - hosts, - esVersion, - clusterFeaturesPredicate, - os, - this::getClientBuilderWithSniffedHosts - ); + return new ClientYamlDocsTestClient(restSpec, restClient, hosts, this::getClientBuilderWithSniffedHosts); } @Before diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index 263602c9841a8..185ddcf0606dc 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -2664,9 +2664,9 @@ - - - + + + @@ -2674,9 +2674,9 @@ - - - + + + @@ -2684,9 +2684,9 @@ - - - + + + @@ -2694,9 +2694,9 @@ - - - + + + @@ -2704,9 +2704,9 @@ - - - + + + @@ -2714,9 +2714,9 @@ - - - + + + @@ -2724,9 +2724,9 @@ - - - + + + @@ -2734,9 +2734,9 @@ - - - + + + @@ -2744,9 +2744,9 @@ - - - + + + @@ -2754,9 +2754,9 @@ - - - + + + @@ -2764,9 +2764,9 @@ - - - + + + @@ -2774,9 +2774,9 @@ - - - + + + @@ -2784,9 +2784,9 @@ - - - + + + @@ -2794,9 +2794,9 @@ - - - + + + @@ -2804,9 +2804,9 @@ - - - + + + @@ -2814,9 +2814,9 @@ - - - + + + @@ -2824,9 +2824,9 @@ - - - + + + @@ -2834,9 +2834,9 @@ - - - + + + @@ -2844,9 +2844,9 @@ - - - + + + @@ -2854,9 +2854,9 @@ - - - + + + @@ -2864,9 +2864,9 @@ - - - + + + @@ -2874,9 +2874,9 @@ - - - + + + @@ -2884,9 +2884,9 @@ - - - + + + @@ -2894,9 +2894,9 @@ - - - + + + diff --git a/libs/cli/build.gradle b/libs/cli/build.gradle index c12ae87ee65fe..dc045ba09e531 100644 --- a/libs/cli/build.gradle +++ b/libs/cli/build.gradle @@ -11,9 +11,12 @@ apply plugin: 'elasticsearch.publish' dependencies { api 'net.sf.jopt-simple:jopt-simple:5.0.2' api project(':libs:elasticsearch-core') + + testImplementation(project(":test:framework")) { + exclude group: 'org.elasticsearch', module: 'elasticsearch-cli' + } } -tasks.named("test").configure { enabled = false } // Since CLI does not depend on :server, it cannot run the jarHell task tasks.named("jarHell").configure { enabled = false } diff --git a/libs/cli/src/main/java/org/elasticsearch/cli/Terminal.java b/libs/cli/src/main/java/org/elasticsearch/cli/Terminal.java index 856dfc6a5a078..69cb76636a996 100644 --- a/libs/cli/src/main/java/org/elasticsearch/cli/Terminal.java +++ b/libs/cli/src/main/java/org/elasticsearch/cli/Terminal.java @@ -18,6 +18,8 @@ import java.io.OutputStream; import java.io.PrintWriter; import java.io.Reader; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; import java.nio.charset.Charset; import java.util.Arrays; import java.util.Locale; @@ -274,8 +276,8 @@ public boolean isHeadless() { } private static class ConsoleTerminal extends Terminal { - - private static final Console CONSOLE = System.console(); + private static final int JDK_VERSION_WITH_IS_TERMINAL = 22; + private static final Console CONSOLE = detectTerminal(); ConsoleTerminal() { super(CONSOLE.reader(), CONSOLE.writer(), ERROR_WRITER); @@ -285,6 +287,23 @@ static boolean isSupported() { return CONSOLE != null; } + static Console detectTerminal() { + // JDK >= 22 returns a console even if the terminal is redirected unless using -Djdk.console=java.base + // https://bugs.openjdk.org/browse/JDK-8308591 + Console console = System.console(); + if (console != null && Runtime.version().feature() >= JDK_VERSION_WITH_IS_TERMINAL) { + try { + // verify the console is a terminal using isTerminal() on JDK >= 22 + // TODO: Remove reflection once Java 22 sources are supported, e.g. using a MRJAR + Method isTerminal = Console.class.getMethod("isTerminal"); + return Boolean.TRUE.equals(isTerminal.invoke(console)) ? console : null; + } catch (NoSuchMethodException | IllegalAccessException | InvocationTargetException e) { + throw new AssertionError(e); + } + } + return console; + } + @Override public String readText(String prompt) { return CONSOLE.readLine("%s", prompt); diff --git a/libs/cli/src/test/java/org/elasticsearch/cli/TerminalTests.java b/libs/cli/src/test/java/org/elasticsearch/cli/TerminalTests.java new file mode 100644 index 0000000000000..9c1faf911a829 --- /dev/null +++ b/libs/cli/src/test/java/org/elasticsearch/cli/TerminalTests.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cli; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.ESTestCase.WithoutSecurityManager; + +@WithoutSecurityManager +public class TerminalTests extends ESTestCase { + + public void testSystemTerminalIfRedirected() { + // Expect system terminal if redirected for tests. + // To force new behavior in JDK 22 this should run without security manager. + // Otherwise, JDK 22 doesn't provide a console if redirected. + assertEquals(Terminal.SystemTerminal.class, Terminal.DEFAULT.getClass()); + } +} diff --git a/libs/core/src/main/java/org/elasticsearch/core/Releasables.java b/libs/core/src/main/java/org/elasticsearch/core/Releasables.java index c2b48c4706573..0840258f0a86d 100644 --- a/libs/core/src/main/java/org/elasticsearch/core/Releasables.java +++ b/libs/core/src/main/java/org/elasticsearch/core/Releasables.java @@ -11,6 +11,7 @@ import java.io.IOException; import java.io.UncheckedIOException; import java.util.Arrays; +import java.util.Iterator; import java.util.concurrent.atomic.AtomicReference; /** Utility methods to work with {@link Releasable}s. */ @@ -103,6 +104,24 @@ public String toString() { }; } + /** + * Similar to {@link #wrap(Iterable)} except that it accepts an {@link Iterator} of releasables. The resulting resource must therefore + * only be released once. + */ + public static Releasable wrap(final Iterator releasables) { + return assertOnce(wrap(new Iterable<>() { + @Override + public Iterator iterator() { + return releasables; + } + + @Override + public String toString() { + return releasables.toString(); + } + })); + } + /** @see #wrap(Iterable) */ public static Releasable wrap(final Releasable... releasables) { return new Releasable() { diff --git a/libs/core/src/test/java/org/elasticsearch/core/ReleasablesTests.java b/libs/core/src/test/java/org/elasticsearch/core/ReleasablesTests.java index 1520b0224c116..d54c9b8104e8b 100644 --- a/libs/core/src/test/java/org/elasticsearch/core/ReleasablesTests.java +++ b/libs/core/src/test/java/org/elasticsearch/core/ReleasablesTests.java @@ -107,5 +107,27 @@ public String toString() { assertEquals("wrapped[list]", wrapIterable.toString()); wrapIterable.close(); assertEquals(5, count.get()); + + final var wrapIterator = Releasables.wrap(new Iterator<>() { + final Iterator innerIterator = List.of(releasable, releasable, releasable).iterator(); + + @Override + public boolean hasNext() { + return innerIterator.hasNext(); + } + + @Override + public Releasable next() { + return innerIterator.next(); + } + + @Override + public String toString() { + return "iterator"; + } + }); + assertEquals("wrapped[iterator]", wrapIterator.toString()); + wrapIterator.close(); + assertEquals(8, count.get()); } } diff --git a/libs/x-content/src/main/java/org/elasticsearch/xcontent/FilterXContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/xcontent/FilterXContentParser.java index 4a166a03ecdf0..96d186dd612b0 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/xcontent/FilterXContentParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/xcontent/FilterXContentParser.java @@ -236,7 +236,10 @@ public boolean isClosed() { @Override public void close() throws IOException { - delegate().close(); + var closeable = delegate(); + if (closeable != null) { + closeable.close(); + } } @Override diff --git a/libs/x-content/src/test/java/org/elasticsearch/xcontent/ConstructingObjectParserTests.java b/libs/x-content/src/test/java/org/elasticsearch/xcontent/ConstructingObjectParserTests.java index 446fb21471961..f0703c626c583 100644 --- a/libs/x-content/src/test/java/org/elasticsearch/xcontent/ConstructingObjectParserTests.java +++ b/libs/x-content/src/test/java/org/elasticsearch/xcontent/ConstructingObjectParserTests.java @@ -102,22 +102,24 @@ public void testRandomOrder() throws Exception { } public void testMissingAllConstructorArgs() throws IOException { - XContentParser parser = createParser(JsonXContent.jsonXContent, "{ \"mineral\": 1 }"); - ConstructingObjectParser objectParser = randomBoolean() - ? HasCtorArguments.PARSER - : HasCtorArguments.PARSER_VEGETABLE_OPTIONAL; - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> objectParser.apply(parser, null)); - if (objectParser == HasCtorArguments.PARSER) { - assertEquals("Required [animal, vegetable]", e.getMessage()); - } else { - assertEquals("Required [animal]", e.getMessage()); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, "{ \"mineral\": 1 }")) { + ConstructingObjectParser objectParser = randomBoolean() + ? HasCtorArguments.PARSER + : HasCtorArguments.PARSER_VEGETABLE_OPTIONAL; + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> objectParser.apply(parser, null)); + if (objectParser == HasCtorArguments.PARSER) { + assertEquals("Required [animal, vegetable]", e.getMessage()); + } else { + assertEquals("Required [animal]", e.getMessage()); + } } } public void testMissingAllConstructorArgsButNotRequired() throws IOException { - XContentParser parser = createParser(JsonXContent.jsonXContent, "{ \"mineral\": 1 }"); - HasCtorArguments parsed = HasCtorArguments.PARSER_ALL_OPTIONAL.apply(parser, null); - assertEquals(1, parsed.mineral); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, "{ \"mineral\": 1 }")) { + HasCtorArguments parsed = HasCtorArguments.PARSER_ALL_OPTIONAL.apply(parser, null); + assertEquals(1, parsed.mineral); + } } public void testMissingSecondConstructorArg() throws IOException { diff --git a/modules/apm/NAMING.md b/modules/apm/NAMING.md index d56a0a441e764..8e8d1bf2463e2 100644 --- a/modules/apm/NAMING.md +++ b/modules/apm/NAMING.md @@ -64,3 +64,10 @@ Attribute names should follow the same rules. In particular, these rules apply t * pluralization (when an attribute represents a measurement) For **pluralization**, when an attribute represents an entity, the attribute name should be singular (e.g.` es.security.realm_type`, not` es.security.realms_type` or `es.security.realm_types`), unless it represents a collection (e.g.` es.rest.request_headers`) + + +### List of previously registered metric names +You can inspect all previously registered metrics names with +`./gradlew run -Dtests.es.logger.org.elasticsearch.telemetry.apm=debug` +This should help you find out the already registered group that your meteric +might fit diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APMMeterRegistry.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APMMeterRegistry.java index cd6d3d209b3ed..382fc9417eac0 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APMMeterRegistry.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APMMeterRegistry.java @@ -10,6 +10,8 @@ import io.opentelemetry.api.metrics.Meter; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.telemetry.apm.internal.metrics.DoubleAsyncCounterAdapter; @@ -47,6 +49,7 @@ * {@link #setProvider(Meter)} is used to change the provider for all existing meterRegistrar. */ public class APMMeterRegistry implements MeterRegistry { + private static final Logger logger = LogManager.getLogger(APMMeterRegistry.class); private final Registrar doubleCounters = new Registrar<>(); private final Registrar doubleAsynchronousCounters = new Registrar<>(); private final Registrar doubleUpDownCounters = new Registrar<>(); @@ -207,6 +210,7 @@ public LongHistogram getLongHistogram(String name) { private > T register(Registrar registrar, T adapter) { assert registrars.contains(registrar) : "usage of unknown registrar"; + logger.debug("Registering an instrument with name: " + adapter.getName()); return registrar.register(adapter); } diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/tracing/APMTracer.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/tracing/APMTracer.java index 49fdc44681aa3..f021eb61ca753 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/tracing/APMTracer.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/tracing/APMTracer.java @@ -30,11 +30,11 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; import org.elasticsearch.tasks.Task; -import org.elasticsearch.telemetry.tracing.SpanId; +import org.elasticsearch.telemetry.tracing.TraceContext; +import org.elasticsearch.telemetry.tracing.Traceable; import java.security.AccessController; import java.security.PrivilegedAction; @@ -61,7 +61,7 @@ public class APMTracer extends AbstractLifecycleComponent implements org.elastic private static final Logger logger = LogManager.getLogger(APMTracer.class); /** Holds in-flight span information. */ - private final Map spans = ConcurrentCollections.newConcurrentMap(); + private final Map spans = ConcurrentCollections.newConcurrentMap(); private volatile boolean enabled; private volatile APMServices services; @@ -160,8 +160,9 @@ private void destroyApmServices() { } @Override - public void startTrace(ThreadContext threadContext, SpanId spanId, String spanName, @Nullable Map attributes) { - assert threadContext != null; + public void startTrace(TraceContext traceContext, Traceable traceable, String spanName, @Nullable Map attributes) { + assert traceContext != null; + String spanId = traceable.getSpanId(); assert spanId != null; assert spanName != null; @@ -182,21 +183,21 @@ public void startTrace(ThreadContext threadContext, SpanId spanId, String spanNa // A span can have a parent span, which here is modelled though a parent span context. // Setting this is important for seeing a complete trace in the APM UI. - final Context parentContext = getParentContext(threadContext); + final Context parentContext = getParentContext(traceContext); if (parentContext != null) { spanBuilder.setParent(parentContext); } - setSpanAttributes(threadContext, attributes, spanBuilder); + setSpanAttributes(traceContext, attributes, spanBuilder); - Instant startTime = threadContext.getTransient(Task.TRACE_START_TIME); + Instant startTime = traceContext.getTransient(Task.TRACE_START_TIME); if (startTime != null) { spanBuilder.setStartTimestamp(startTime); } final Span span = spanBuilder.startSpan(); final Context contextForNewSpan = Context.current().with(span); - updateThreadContext(threadContext, services, contextForNewSpan); + updateThreadContext(traceContext, services, contextForNewSpan); return contextForNewSpan; })); @@ -221,29 +222,29 @@ public void startTrace(String name, Map attributes) { spanBuilder.startSpan(); } - private static void updateThreadContext(ThreadContext threadContext, APMServices services, Context context) { + private static void updateThreadContext(TraceContext traceContext, APMServices services, Context context) { // The new span context can be used as the parent context directly within the same Java process... - threadContext.putTransient(Task.APM_TRACE_CONTEXT, context); + traceContext.putTransient(Task.APM_TRACE_CONTEXT, context); - // ...whereas for tasks sent to other ES nodes, we need to put trace HTTP headers into the threadContext so + // ...whereas for tasks sent to other ES nodes, we need to put trace HTTP headers into the traceContext so // that they can be propagated. - services.openTelemetry.getPropagators().getTextMapPropagator().inject(context, threadContext, (tc, key, value) -> { + services.openTelemetry.getPropagators().getTextMapPropagator().inject(context, traceContext, (tc, key, value) -> { if (isSupportedContextKey(key)) { tc.putHeader(key, value); } }); } - private Context getParentContext(ThreadContext threadContext) { + private Context getParentContext(TraceContext traceContext) { // https://github.com/open-telemetry/opentelemetry-java/discussions/2884#discussioncomment-381870 // If you just want to propagate across threads within the same process, you don't need context propagators (extract/inject). // You can just pass the Context object directly to another thread (it is immutable and thus thread-safe). // Attempt to fetch a local parent context first, otherwise look for a remote parent - Context parentContext = threadContext.getTransient("parent_" + Task.APM_TRACE_CONTEXT); + Context parentContext = traceContext.getTransient("parent_" + Task.APM_TRACE_CONTEXT); if (parentContext == null) { - final String traceParentHeader = threadContext.getTransient("parent_" + Task.TRACE_PARENT_HTTP_HEADER); - final String traceStateHeader = threadContext.getTransient("parent_" + Task.TRACE_STATE); + final String traceParentHeader = traceContext.getTransient("parent_" + Task.TRACE_PARENT_HTTP_HEADER); + final String traceStateHeader = traceContext.getTransient("parent_" + Task.TRACE_STATE); if (traceParentHeader != null) { final Map traceContextMap = Maps.newMapWithExpectedSize(2); @@ -276,12 +277,12 @@ private Context getParentContext(ThreadContext threadContext) { * However, if a scope is active, then the APM agent can capture additional information, so this method * exists to make it possible to use scopes in the few situation where it makes sense. * - * @param spanId the ID of a currently-open span for which to open a scope. + * @param traceable provides the ID of a currently-open span for which to open a scope. * @return a method to close the scope when you are finished with it. */ @Override - public Releasable withScope(SpanId spanId) { - final Context context = spans.get(spanId); + public Releasable withScope(Traceable traceable) { + final Context context = spans.get(traceable.getSpanId()); if (context != null) { var scope = AccessController.doPrivileged((PrivilegedAction) context::makeCurrent); return scope::close; @@ -327,60 +328,60 @@ private void setSpanAttributes(@Nullable Map spanAttributes, Spa spanBuilder.setAttribute(org.elasticsearch.telemetry.tracing.Tracer.AttributeKeys.CLUSTER_NAME, clusterName); } - private void setSpanAttributes(ThreadContext threadContext, @Nullable Map spanAttributes, SpanBuilder spanBuilder) { + private void setSpanAttributes(TraceContext traceContext, @Nullable Map spanAttributes, SpanBuilder spanBuilder) { setSpanAttributes(spanAttributes, spanBuilder); - final String xOpaqueId = threadContext.getHeader(Task.X_OPAQUE_ID_HTTP_HEADER); + final String xOpaqueId = traceContext.getHeader(Task.X_OPAQUE_ID_HTTP_HEADER); if (xOpaqueId != null) { spanBuilder.setAttribute("es.x-opaque-id", xOpaqueId); } } @Override - public void addError(SpanId spanId, Throwable throwable) { - final var span = Span.fromContextOrNull(spans.get(spanId)); + public void addError(Traceable traceable, Throwable throwable) { + final var span = Span.fromContextOrNull(spans.get(traceable.getSpanId())); if (span != null) { span.recordException(throwable); } } @Override - public void setAttribute(SpanId spanId, String key, boolean value) { - final var span = Span.fromContextOrNull(spans.get(spanId)); + public void setAttribute(Traceable traceable, String key, boolean value) { + final var span = Span.fromContextOrNull(spans.get(traceable.getSpanId())); if (span != null) { span.setAttribute(key, value); } } @Override - public void setAttribute(SpanId spanId, String key, double value) { - final var span = Span.fromContextOrNull(spans.get(spanId)); + public void setAttribute(Traceable traceable, String key, double value) { + final var span = Span.fromContextOrNull(spans.get(traceable.getSpanId())); if (span != null) { span.setAttribute(key, value); } } @Override - public void setAttribute(SpanId spanId, String key, long value) { - final var span = Span.fromContextOrNull(spans.get(spanId)); + public void setAttribute(Traceable traceable, String key, long value) { + final var span = Span.fromContextOrNull(spans.get(traceable.getSpanId())); if (span != null) { span.setAttribute(key, value); } } @Override - public void setAttribute(SpanId spanId, String key, String value) { - final var span = Span.fromContextOrNull(spans.get(spanId)); + public void setAttribute(Traceable traceable, String key, String value) { + final var span = Span.fromContextOrNull(spans.get(traceable.getSpanId())); if (span != null) { span.setAttribute(key, value); } } @Override - public void stopTrace(SpanId spanId) { - final var span = Span.fromContextOrNull(spans.remove(spanId)); + public void stopTrace(Traceable traceable) { + final var span = Span.fromContextOrNull(spans.remove(traceable.getSpanId())); if (span != null) { - logger.trace("Finishing trace [{}]", spanId); + logger.trace("Finishing trace [{}]", traceable); AccessController.doPrivileged((PrivilegedAction) () -> { span.end(); return null; @@ -400,8 +401,8 @@ public void stopTrace() { } @Override - public void addEvent(SpanId spanId, String eventName) { - final var span = Span.fromContextOrNull(spans.get(spanId)); + public void addEvent(Traceable traceable, String eventName) { + final var span = Span.fromContextOrNull(spans.get(traceable.getSpanId())); if (span != null) { span.addEvent(eventName); } @@ -425,7 +426,7 @@ private static boolean isSupportedContextKey(String key) { } // VisibleForTesting - Map getSpans() { + Map getSpans() { return spans; } diff --git a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/tracing/APMTracerTests.java b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/tracing/APMTracerTests.java index 8cb94b782756d..04a4e1b3f3a34 100644 --- a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/tracing/APMTracerTests.java +++ b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/tracing/APMTracerTests.java @@ -22,13 +22,14 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.tasks.Task; import org.elasticsearch.telemetry.apm.internal.APMAgentSettings; -import org.elasticsearch.telemetry.tracing.SpanId; +import org.elasticsearch.telemetry.tracing.Traceable; import org.elasticsearch.test.ESTestCase; import java.time.Instant; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.concurrent.TimeUnit; import java.util.stream.Stream; @@ -44,9 +45,9 @@ public class APMTracerTests extends ESTestCase { - private static final SpanId SPAN_ID1 = SpanId.forBareString("id1"); - private static final SpanId SPAN_ID2 = SpanId.forBareString("id2"); - private static final SpanId SPAN_ID3 = SpanId.forBareString("id3"); + private static final Traceable TRACEABLE1 = new TestTraceable("id1"); + private static final Traceable TRACEABLE2 = new TestTraceable("id2"); + private static final Traceable TRACEABLE3 = new TestTraceable("id3"); /** * Check that the tracer doesn't create spans when tracing is disabled. @@ -55,7 +56,7 @@ public void test_onTraceStarted_withTracingDisabled_doesNotStartTrace() { Settings settings = Settings.builder().put(APMAgentSettings.APM_ENABLED_SETTING.getKey(), false).build(); APMTracer apmTracer = buildTracer(settings); - apmTracer.startTrace(new ThreadContext(settings), SPAN_ID1, "name1", null); + apmTracer.startTrace(new ThreadContext(settings), TRACEABLE1, "name1", null); assertThat(apmTracer.getSpans(), anEmptyMap()); } @@ -70,7 +71,7 @@ public void test_onTraceStarted_withSpanNameOmitted_doesNotStartTrace() { .build(); APMTracer apmTracer = buildTracer(settings); - apmTracer.startTrace(new ThreadContext(settings), SPAN_ID1, "name1", null); + apmTracer.startTrace(new ThreadContext(settings), TRACEABLE1, "name1", null); assertThat(apmTracer.getSpans(), anEmptyMap()); } @@ -82,10 +83,10 @@ public void test_onTraceStarted_startsTrace() { Settings settings = Settings.builder().put(APMAgentSettings.APM_ENABLED_SETTING.getKey(), true).build(); APMTracer apmTracer = buildTracer(settings); - apmTracer.startTrace(new ThreadContext(settings), SPAN_ID1, "name1", null); + apmTracer.startTrace(new ThreadContext(settings), TRACEABLE1, "name1", null); assertThat(apmTracer.getSpans(), aMapWithSize(1)); - assertThat(apmTracer.getSpans(), hasKey(SPAN_ID1)); + assertThat(apmTracer.getSpans(), hasKey(TRACEABLE1.getSpanId())); } /** @@ -99,10 +100,10 @@ public void test_onTraceStartedWithStartTime_startsTrace() { // 1_000_000L because of "toNanos" conversions that overflow for large long millis Instant spanStartTime = Instant.ofEpochMilli(randomLongBetween(0, Long.MAX_VALUE / 1_000_000L)); threadContext.putTransient(Task.TRACE_START_TIME, spanStartTime); - apmTracer.startTrace(threadContext, SPAN_ID1, "name1", null); + apmTracer.startTrace(threadContext, TRACEABLE1, "name1", null); assertThat(apmTracer.getSpans(), aMapWithSize(1)); - assertThat(apmTracer.getSpans(), hasKey(SPAN_ID1)); + assertThat(apmTracer.getSpans(), hasKey(TRACEABLE1.getSpanId())); assertThat(((SpyAPMTracer) apmTracer).getSpanStartTime("name1"), is(spanStartTime)); } @@ -113,8 +114,8 @@ public void test_onTraceStopped_stopsTrace() { Settings settings = Settings.builder().put(APMAgentSettings.APM_ENABLED_SETTING.getKey(), true).build(); APMTracer apmTracer = buildTracer(settings); - apmTracer.startTrace(new ThreadContext(settings), SPAN_ID1, "name1", null); - apmTracer.stopTrace(SPAN_ID1); + apmTracer.startTrace(new ThreadContext(settings), TRACEABLE1, "name1", null); + apmTracer.stopTrace(TRACEABLE1); assertThat(apmTracer.getSpans(), anEmptyMap()); } @@ -131,7 +132,7 @@ public void test_whenTraceStarted_threadContextIsPopulated() { APMTracer apmTracer = buildTracer(settings); ThreadContext threadContext = new ThreadContext(settings); - apmTracer.startTrace(threadContext, SPAN_ID1, "name1", null); + apmTracer.startTrace(threadContext, TRACEABLE1, "name1", null); assertThat(threadContext.getTransient(Task.APM_TRACE_CONTEXT), notNullValue()); } @@ -152,13 +153,13 @@ public void test_whenTraceStarted_andSpanNameIncluded_thenSpanIsStarted() { .build(); APMTracer apmTracer = buildTracer(settings); - apmTracer.startTrace(new ThreadContext(settings), SPAN_ID1, "name-aaa", null); - apmTracer.startTrace(new ThreadContext(settings), SPAN_ID2, "name-bbb", null); - apmTracer.startTrace(new ThreadContext(settings), SPAN_ID3, "name-ccc", null); + apmTracer.startTrace(new ThreadContext(settings), TRACEABLE1, "name-aaa", null); + apmTracer.startTrace(new ThreadContext(settings), TRACEABLE2, "name-bbb", null); + apmTracer.startTrace(new ThreadContext(settings), TRACEABLE3, "name-ccc", null); - assertThat(apmTracer.getSpans(), hasKey(SPAN_ID1)); - assertThat(apmTracer.getSpans(), hasKey(SPAN_ID2)); - assertThat(apmTracer.getSpans(), not(hasKey(SPAN_ID3))); + assertThat(apmTracer.getSpans(), hasKey(TRACEABLE1.getSpanId())); + assertThat(apmTracer.getSpans(), hasKey(TRACEABLE2.getSpanId())); + assertThat(apmTracer.getSpans(), not(hasKey(TRACEABLE3.getSpanId()))); } /** @@ -175,7 +176,7 @@ public void test_whenTraceStarted_andSpanNameIncludedAndExcluded_thenSpanIsNotSt .build(); APMTracer apmTracer = buildTracer(settings); - apmTracer.startTrace(new ThreadContext(settings), SPAN_ID1, "name-aaa", null); + apmTracer.startTrace(new ThreadContext(settings), TRACEABLE1, "name-aaa", null); assertThat(apmTracer.getSpans(), not(hasKey("id1"))); } @@ -197,13 +198,13 @@ public void test_whenTraceStarted_andSpanNameExcluded_thenSpanIsNotStarted() { .build(); APMTracer apmTracer = buildTracer(settings); - apmTracer.startTrace(new ThreadContext(settings), SPAN_ID1, "name-aaa", null); - apmTracer.startTrace(new ThreadContext(settings), SPAN_ID2, "name-bbb", null); - apmTracer.startTrace(new ThreadContext(settings), SPAN_ID3, "name-ccc", null); + apmTracer.startTrace(new ThreadContext(settings), TRACEABLE1, "name-aaa", null); + apmTracer.startTrace(new ThreadContext(settings), TRACEABLE2, "name-bbb", null); + apmTracer.startTrace(new ThreadContext(settings), TRACEABLE3, "name-ccc", null); - assertThat(apmTracer.getSpans(), not(hasKey(SPAN_ID1))); - assertThat(apmTracer.getSpans(), not(hasKey(SPAN_ID2))); - assertThat(apmTracer.getSpans(), hasKey(SPAN_ID3)); + assertThat(apmTracer.getSpans(), not(hasKey(TRACEABLE1.getSpanId()))); + assertThat(apmTracer.getSpans(), not(hasKey(TRACEABLE2.getSpanId()))); + assertThat(apmTracer.getSpans(), hasKey(TRACEABLE3.getSpanId())); } /** @@ -360,4 +361,17 @@ public Span startSpan() { } } } + + private static class TestTraceable implements Traceable { + private final String spanId; + + TestTraceable(String spanId) { + this.spanId = Objects.requireNonNull(spanId); + } + + @Override + public String getSpanId() { + return spanId; + } + } } diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java index 20fc7c19a17a3..979752b671378 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamIT.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.core.util.Throwables; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; @@ -28,6 +29,8 @@ import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverResponse; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest; +import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction; import org.elasticsearch.action.admin.indices.stats.IndexShardStats; import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; @@ -107,6 +110,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; @@ -278,17 +282,17 @@ public void testOtherWriteOps() throws Exception { } { IndexRequest indexRequest = new IndexRequest(dataStreamName).source("{\"@timestamp\": \"2020-12-12\"}", XContentType.JSON); - Exception e = expectThrows(IllegalArgumentException.class, () -> client().index(indexRequest).actionGet()); + Exception e = expectThrows(IllegalArgumentException.class, client().index(indexRequest)); assertThat(e.getMessage(), equalTo("only write ops with an op_type of create are allowed in data streams")); } { UpdateRequest updateRequest = new UpdateRequest(dataStreamName, "_id").doc("{}", XContentType.JSON); - Exception e = expectThrows(IllegalArgumentException.class, () -> client().update(updateRequest).actionGet()); + Exception e = expectThrows(IllegalArgumentException.class, client().update(updateRequest)); assertThat(e.getMessage(), equalTo("only write ops with an op_type of create are allowed in data streams")); } { DeleteRequest deleteRequest = new DeleteRequest(dataStreamName, "_id"); - Exception e = expectThrows(IllegalArgumentException.class, () -> client().delete(deleteRequest).actionGet()); + Exception e = expectThrows(IllegalArgumentException.class, client().delete(deleteRequest)); assertThat(e.getMessage(), equalTo("only write ops with an op_type of create are allowed in data streams")); } { @@ -523,7 +527,7 @@ public void testTimeStampValidationInvalidFieldMapping() throws Exception { Exception e = expectThrows( IllegalArgumentException.class, - () -> client().execute(PutComposableIndexTemplateAction.INSTANCE, createTemplateRequest).actionGet() + client().execute(PutComposableIndexTemplateAction.INSTANCE, createTemplateRequest) ); assertThat( e.getCause().getCause().getMessage(), @@ -580,7 +584,7 @@ public void testResolvabilityOfDataStreamsInAPIs() throws Exception { verifyResolvability(dataStreamName, indicesAdmin().prepareClose(dataStreamName), true); verifyResolvability(aliasToDataStream, indicesAdmin().prepareClose(aliasToDataStream), true); verifyResolvability(dataStreamName, clusterAdmin().prepareSearchShards(dataStreamName), false); - verifyResolvability(dataStreamName, indicesAdmin().prepareShardStores(dataStreamName), false); + verifyResolvability(client().execute(TransportIndicesShardStoresAction.TYPE, new IndicesShardStoresRequest(dataStreamName))); request = new CreateDataStreamAction.Request("logs-barbaz"); client().execute(CreateDataStreamAction.INSTANCE, request).actionGet(); @@ -624,7 +628,7 @@ public void testResolvabilityOfDataStreamsInAPIs() throws Exception { verifyResolvability(wildcardExpression, indicesAdmin().prepareOpen(wildcardExpression), false); verifyResolvability(wildcardExpression, indicesAdmin().prepareClose(wildcardExpression), false); verifyResolvability(wildcardExpression, clusterAdmin().prepareSearchShards(wildcardExpression), false); - verifyResolvability(wildcardExpression, indicesAdmin().prepareShardStores(wildcardExpression), false); + verifyResolvability(client().execute(TransportIndicesShardStoresAction.TYPE, new IndicesShardStoresRequest(wildcardExpression))); } public void testCannotDeleteComposableTemplateUsedByDataStream() throws Exception { @@ -636,7 +640,7 @@ public void testCannotDeleteComposableTemplateUsedByDataStream() throws Exceptio client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); DeleteComposableIndexTemplateAction.Request req = new DeleteComposableIndexTemplateAction.Request("id"); - Exception e = expectThrows(Exception.class, () -> client().execute(DeleteComposableIndexTemplateAction.INSTANCE, req).get()); + Exception e = expectThrows(Exception.class, client().execute(DeleteComposableIndexTemplateAction.INSTANCE, req)); Optional maybeE = ExceptionsHelper.unwrapCausesAndSuppressed( e, err -> err.getMessage() @@ -648,7 +652,7 @@ public void testCannotDeleteComposableTemplateUsedByDataStream() throws Exceptio assertTrue(maybeE.isPresent()); DeleteComposableIndexTemplateAction.Request req2 = new DeleteComposableIndexTemplateAction.Request("i*"); - Exception e2 = expectThrows(Exception.class, () -> client().execute(DeleteComposableIndexTemplateAction.INSTANCE, req2).get()); + Exception e2 = expectThrows(Exception.class, client().execute(DeleteComposableIndexTemplateAction.INSTANCE, req2)); maybeE = ExceptionsHelper.unwrapCausesAndSuppressed( e2, err -> err.getMessage() @@ -677,7 +681,7 @@ public void testCannotDeleteComposableTemplateUsedByDataStream() throws Exceptio client().execute(DeleteComposableIndexTemplateAction.INSTANCE, deleteRequest).get(); GetComposableIndexTemplateAction.Request getReq = new GetComposableIndexTemplateAction.Request("id"); - Exception e3 = expectThrows(Exception.class, () -> client().execute(GetComposableIndexTemplateAction.INSTANCE, getReq).get()); + Exception e3 = expectThrows(Exception.class, client().execute(GetComposableIndexTemplateAction.INSTANCE, getReq)); maybeE = ExceptionsHelper.unwrapCausesAndSuppressed(e3, err -> err.getMessage().contains("index template matching [id] not found")); assertTrue(maybeE.isPresent()); } @@ -876,7 +880,7 @@ public void testDataSteamAliasWithMalformedFilter() throws Exception { } Exception e = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().aliases(new IndicesAliasesRequest().addAliasAction(addAction)).actionGet() + indicesAdmin().aliases(new IndicesAliasesRequest().addAliasAction(addAction)) ); assertThat(e.getMessage(), equalTo("failed to parse filter for alias [" + alias + "]")); GetAliasesResponse response = indicesAdmin().getAliases(new GetAliasesRequest()).actionGet(); @@ -893,7 +897,7 @@ public void testAliasActionsFailOnDataStreamBackingIndices() throws Exception { AliasActions addAction = new AliasActions(AliasActions.Type.ADD).index(backingIndex).aliases("first_gen"); IndicesAliasesRequest aliasesAddRequest = new IndicesAliasesRequest(); aliasesAddRequest.addAliasAction(addAction); - Exception e = expectThrows(IllegalArgumentException.class, () -> indicesAdmin().aliases(aliasesAddRequest).actionGet()); + Exception e = expectThrows(IllegalArgumentException.class, indicesAdmin().aliases(aliasesAddRequest)); assertThat( e.getMessage(), equalTo( @@ -917,7 +921,7 @@ public void testAddDataStreamAliasesMixedExpressionValidation() throws Exception AliasActions addAction = new AliasActions(AliasActions.Type.ADD).index("metrics-*").aliases("my-alias"); IndicesAliasesRequest aliasesAddRequest = new IndicesAliasesRequest(); aliasesAddRequest.addAliasAction(addAction); - Exception e = expectThrows(IllegalArgumentException.class, () -> indicesAdmin().aliases(aliasesAddRequest).actionGet()); + Exception e = expectThrows(IllegalArgumentException.class, indicesAdmin().aliases(aliasesAddRequest)); assertThat(e.getMessage(), equalTo("expressions [metrics-*] that match with both data streams and regular indices are disallowed")); } @@ -979,7 +983,7 @@ public void testUpdateDataStreamsWithWildcards() throws Exception { { IndicesAliasesRequest aliasesAddRequest = new IndicesAliasesRequest(); aliasesAddRequest.addAliasAction(new AliasActions(AliasActions.Type.ADD).index("metrics-foo").aliases("my-alias*")); - expectThrows(InvalidAliasNameException.class, () -> indicesAdmin().aliases(aliasesAddRequest).actionGet()); + expectThrows(InvalidAliasNameException.class, indicesAdmin().aliases(aliasesAddRequest)); } // REMOVE does resolve wildcards: { @@ -1005,7 +1009,7 @@ public void testDataStreamAliasesUnsupportedParametersValidation() throws Except AliasActions addAction = new AliasActions(AliasActions.Type.ADD).index("metrics-*").aliases("my-alias").routing("[routing]"); IndicesAliasesRequest aliasesAddRequest = new IndicesAliasesRequest(); aliasesAddRequest.addAliasAction(addAction); - Exception e = expectThrows(IllegalArgumentException.class, () -> indicesAdmin().aliases(aliasesAddRequest).actionGet()); + Exception e = expectThrows(IllegalArgumentException.class, indicesAdmin().aliases(aliasesAddRequest)); assertThat(e.getMessage(), equalTo("aliases that point to data streams don't support routing")); } { @@ -1014,7 +1018,7 @@ public void testDataStreamAliasesUnsupportedParametersValidation() throws Except .indexRouting("[index_routing]"); IndicesAliasesRequest aliasesAddRequest = new IndicesAliasesRequest(); aliasesAddRequest.addAliasAction(addAction); - Exception e = expectThrows(IllegalArgumentException.class, () -> indicesAdmin().aliases(aliasesAddRequest).actionGet()); + Exception e = expectThrows(IllegalArgumentException.class, indicesAdmin().aliases(aliasesAddRequest)); assertThat(e.getMessage(), equalTo("aliases that point to data streams don't support index_routing")); } { @@ -1023,7 +1027,7 @@ public void testDataStreamAliasesUnsupportedParametersValidation() throws Except .searchRouting("[search_routing]"); IndicesAliasesRequest aliasesAddRequest = new IndicesAliasesRequest(); aliasesAddRequest.addAliasAction(addAction); - Exception e = expectThrows(IllegalArgumentException.class, () -> indicesAdmin().aliases(aliasesAddRequest).actionGet()); + Exception e = expectThrows(IllegalArgumentException.class, indicesAdmin().aliases(aliasesAddRequest)); assertThat(e.getMessage(), equalTo("aliases that point to data streams don't support search_routing")); } { @@ -1032,7 +1036,7 @@ public void testDataStreamAliasesUnsupportedParametersValidation() throws Except .isHidden(randomBoolean()); IndicesAliasesRequest aliasesAddRequest = new IndicesAliasesRequest(); aliasesAddRequest.addAliasAction(addAction); - Exception e = expectThrows(IllegalArgumentException.class, () -> indicesAdmin().aliases(aliasesAddRequest).actionGet()); + Exception e = expectThrows(IllegalArgumentException.class, indicesAdmin().aliases(aliasesAddRequest)); assertThat(e.getMessage(), equalTo("aliases that point to data streams don't support is_hidden")); } } @@ -1158,10 +1162,7 @@ public void testIndexDocsWithCustomRoutingTargetingDataStreamIsNotAllowed() thro IndexRequest indexRequestWithRouting = new IndexRequest(dataStream).source("@timestamp", System.currentTimeMillis()) .opType(DocWriteRequest.OpType.CREATE) .routing("custom"); - IllegalArgumentException exception = expectThrows( - IllegalArgumentException.class, - () -> client().index(indexRequestWithRouting).actionGet() - ); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, client().index(indexRequestWithRouting)); assertThat( exception.getMessage(), is( @@ -1320,7 +1321,7 @@ public void testNoTimestampInDocument() throws Exception { client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); IndexRequest indexRequest = new IndexRequest(dataStreamName).opType("create").source("{}", XContentType.JSON); - Exception e = expectThrows(Exception.class, () -> client().index(indexRequest).actionGet()); + Exception e = expectThrows(Exception.class, client().index(indexRequest)); assertThat(e.getCause().getMessage(), equalTo("data stream timestamp field [@timestamp] is missing")); } @@ -1332,7 +1333,7 @@ public void testMultipleTimestampValuesInDocument() throws Exception { IndexRequest indexRequest = new IndexRequest(dataStreamName).opType("create") .source("{\"@timestamp\": [\"2020-12-12\",\"2022-12-12\"]}", XContentType.JSON); - Exception e = expectThrows(Exception.class, () -> client().index(indexRequest).actionGet()); + Exception e = expectThrows(Exception.class, client().index(indexRequest)); assertThat(e.getCause().getMessage(), equalTo("data stream timestamp field [@timestamp] encountered multiple values")); } @@ -1435,7 +1436,7 @@ public void testCreatingDataStreamAndFirstBackingIndexExistsFails() throws Excep CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName, now); Exception e = expectThrows( ElasticsearchStatusException.class, - () -> client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).actionGet() + client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest) ); assertThat(e.getMessage(), equalTo("data stream could not be created because backing index [" + backingIndex + "] already exists")); } @@ -1605,7 +1606,7 @@ public void testCreateDataStreamWithSameNameAsIndexAlias() throws Exception { DataStreamIT.putComposableIndexTemplate("my-template", List.of("my-*")); var request = new CreateDataStreamAction.Request("my-alias"); - var e = expectThrows(IllegalStateException.class, () -> client().execute(CreateDataStreamAction.INSTANCE, request).actionGet()); + var e = expectThrows(IllegalStateException.class, client().execute(CreateDataStreamAction.INSTANCE, request)); assertThat(e.getMessage(), containsString("[my-alias (alias of [")); assertThat(e.getMessage(), containsString("]) conflicts with data stream")); } @@ -1618,7 +1619,7 @@ public void testCreateDataStreamWithSameNameAsIndex() throws Exception { DataStreamIT.putComposableIndexTemplate("my-template", List.of("my-*")); var request = new CreateDataStreamAction.Request("my-index"); - var e = expectThrows(IllegalStateException.class, () -> client().execute(CreateDataStreamAction.INSTANCE, request).actionGet()); + var e = expectThrows(IllegalStateException.class, client().execute(CreateDataStreamAction.INSTANCE, request)); assertThat(e.getMessage(), containsString("data stream [my-index] conflicts with index")); } @@ -1632,10 +1633,7 @@ public void testCreateDataStreamWithSameNameAsDataStreamAlias() throws Exception assertAcked(indicesAdmin().aliases(aliasesAddRequest).actionGet()); var request2 = new CreateDataStreamAction.Request("my-alias"); - var e = expectThrows( - IllegalStateException.class, - () -> client().execute(CreateDataStreamAction.INSTANCE, request2).actionGet() - ); + var e = expectThrows(IllegalStateException.class, client().execute(CreateDataStreamAction.INSTANCE, request2)); assertThat(e.getMessage(), containsString("data stream alias and data stream have the same name (my-alias)")); } { @@ -1653,10 +1651,7 @@ public void testCreateDataStreamWithSameNameAsDataStreamAlias() throws Exception assertAcked(client().execute(CreateDataStreamAction.INSTANCE, request).actionGet()); var request2 = new CreateDataStreamAction.Request("my-alias"); - var e = expectThrows( - IllegalStateException.class, - () -> client().execute(CreateDataStreamAction.INSTANCE, request2).actionGet() - ); + var e = expectThrows(IllegalStateException.class, client().execute(CreateDataStreamAction.INSTANCE, request2)); assertThat(e.getMessage(), containsString("data stream alias and data stream have the same name (my-alias)")); } } @@ -1671,7 +1666,7 @@ public void testCreateDataStreamAliasWithSameNameAsIndexAlias() throws Exception assertAcked(client().execute(CreateDataStreamAction.INSTANCE, request).actionGet()); IndicesAliasesRequest aliasesAddRequest = new IndicesAliasesRequest(); aliasesAddRequest.addAliasAction(new AliasActions(AliasActions.Type.ADD).index("logs-es").aliases("logs")); - var e = expectThrows(IllegalStateException.class, () -> indicesAdmin().aliases(aliasesAddRequest).actionGet()); + var e = expectThrows(IllegalStateException.class, indicesAdmin().aliases(aliasesAddRequest)); assertThat(e.getMessage(), containsString("data stream alias and indices alias have the same name (logs)")); } { @@ -1687,7 +1682,7 @@ public void testCreateDataStreamAliasWithSameNameAsIndexAlias() throws Exception ); var request = new CreateDataStreamAction.Request("logs-es"); - var e = expectThrows(IllegalStateException.class, () -> client().execute(CreateDataStreamAction.INSTANCE, request).actionGet()); + var e = expectThrows(IllegalStateException.class, client().execute(CreateDataStreamAction.INSTANCE, request)); assertThat(e.getMessage(), containsString("data stream alias and indices alias have the same name (logs)")); } } @@ -1703,7 +1698,7 @@ public void testCreateDataStreamAliasWithSameNameAsIndex() throws Exception { assertAcked(client().execute(CreateDataStreamAction.INSTANCE, request).actionGet()); IndicesAliasesRequest aliasesAddRequest = new IndicesAliasesRequest(); aliasesAddRequest.addAliasAction(new AliasActions(AliasActions.Type.ADD).index("logs-es").aliases("logs")); - var e = expectThrows(InvalidAliasNameException.class, () -> indicesAdmin().aliases(aliasesAddRequest).actionGet()); + var e = expectThrows(InvalidAliasNameException.class, indicesAdmin().aliases(aliasesAddRequest)); assertThat( e.getMessage(), equalTo("Invalid alias name [logs]: an index or data stream exists with the same name as the alias") @@ -1740,7 +1735,7 @@ public void testCreateIndexWithSameNameAsDataStreamAlias() throws Exception { assertAcked(indicesAdmin().aliases(aliasesAddRequest).actionGet()); CreateIndexRequest createIndexRequest = new CreateIndexRequest("logs"); - var e = expectThrows(InvalidIndexNameException.class, () -> indicesAdmin().create(createIndexRequest).actionGet()); + var e = expectThrows(InvalidIndexNameException.class, indicesAdmin().create(createIndexRequest)); assertThat(e.getMessage(), equalTo("Invalid index name [logs], already exists as alias")); } @@ -1755,7 +1750,7 @@ public void testCreateIndexAliasWithSameNameAsDataStreamAlias() throws Exception { CreateIndexRequest createIndexRequest = new CreateIndexRequest("my-index").alias(new Alias("logs")); - var e = expectThrows(IllegalStateException.class, () -> indicesAdmin().create(createIndexRequest).actionGet()); + var e = expectThrows(IllegalStateException.class, indicesAdmin().create(createIndexRequest)); assertThat(e.getMessage(), containsString("data stream alias and indices alias have the same name (logs)")); } { @@ -1763,7 +1758,7 @@ public void testCreateIndexAliasWithSameNameAsDataStreamAlias() throws Exception assertAcked(indicesAdmin().create(createIndexRequest).actionGet()); IndicesAliasesRequest addAliasRequest = new IndicesAliasesRequest(); addAliasRequest.addAliasAction(new AliasActions(AliasActions.Type.ADD).index("my-index").aliases("logs")); - var e = expectThrows(IllegalStateException.class, () -> indicesAdmin().aliases(addAliasRequest).actionGet()); + var e = expectThrows(IllegalStateException.class, indicesAdmin().aliases(addAliasRequest)); assertThat(e.getMessage(), containsString("data stream alias and indices alias have the same name (logs)")); } } @@ -1821,8 +1816,8 @@ public void onFailure(Exception e) { var ghostReference = brokenDataStreamHolder.get().getIndices().get(0); // Many APIs fail with NPE, because of broken data stream: - expectThrows(NullPointerException.class, () -> indicesAdmin().stats(new IndicesStatsRequest()).actionGet()); - expectThrows(NullPointerException.class, () -> client().search(new SearchRequest()).actionGet()); + expectThrows(NullPointerException.class, indicesAdmin().stats(new IndicesStatsRequest())); + expectThrows(NullPointerException.class, client().search(new SearchRequest())); assertAcked( client().execute( @@ -1859,10 +1854,10 @@ private static void verifyResolvability( assertThat(multiSearchResponse.getResponses()[0].getFailure().getMessage(), equalTo(expectedErrorMessage)); }); } else if (requestBuilder instanceof ValidateQueryRequestBuilder) { - Exception e = expectThrows(IndexNotFoundException.class, requestBuilder::get); + Exception e = expectThrows(IndexNotFoundException.class, requestBuilder); assertThat(e.getMessage(), equalTo(expectedErrorMessage)); } else { - Exception e = expectThrows(IndexNotFoundException.class, requestBuilder::get); + Exception e = expectThrows(IndexNotFoundException.class, requestBuilder); assertThat(e.getMessage(), equalTo(expectedErrorMessage)); } } else { @@ -1874,11 +1869,15 @@ private static void verifyResolvability( multiSearchResponse -> assertThat(multiSearchResponse.getResponses()[0].isFailure(), is(false)) ); } else { - requestBuilder.get(); + verifyResolvability(requestBuilder.execute()); } } } + private static void verifyResolvability(ActionFuture future) { + future.actionGet(10, TimeUnit.SECONDS); + } + static void indexDocs(String dataStream, int numDocs) { try (BulkRequest bulkRequest = new BulkRequest()) { for (int i = 0; i < numDocs; i++) { @@ -1980,10 +1979,10 @@ public void testPartitionedTemplate() throws IOException { ComposableIndexTemplate finalTemplate1 = template; Exception e = expectThrows( IllegalArgumentException.class, - () -> client().execute( + client().execute( PutComposableIndexTemplateAction.INSTANCE, new PutComposableIndexTemplateAction.Request("my-it").indexTemplate(finalTemplate1) - ).actionGet() + ) ); Exception actualException = (Exception) e.getCause(); assertTrue( @@ -2012,10 +2011,10 @@ public void testRoutingEnabledInMappingDisabledInDataStreamTemplate() throws IOE .build(); Exception e = expectThrows( IllegalArgumentException.class, - () -> client().execute( + client().execute( PutComposableIndexTemplateAction.INSTANCE, new PutComposableIndexTemplateAction.Request("my-it").indexTemplate(template) - ).actionGet() + ) ); Exception actualException = (Exception) e.getCause(); assertTrue(Throwables.getRootCause(actualException).getMessage().contains("contradicting `_routing.required` settings")); diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java index c3e59be54cc7f..2904ba713f1fd 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/DataStreamsSnapshotsIT.java @@ -59,6 +59,7 @@ import java.util.stream.Collectors; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.anEmptyMap; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; @@ -161,9 +162,11 @@ public void testSnapshotAndRestore() throws Exception { assertEquals(1, restoreSnapshotResponse.getRestoreInfo().successfulShards()); assertEquals(DOCUMENT_SOURCE, client.prepareGet(dsBackingIndexName, id).get().getSourceAsMap()); - SearchHit[] hits = client.prepareSearch("ds").get().getHits().getHits(); - assertEquals(1, hits.length); - assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); + assertResponse(client.prepareSearch("ds"), response -> { + SearchHit[] hits = response.getHits().getHits(); + assertEquals(1, hits.length); + assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); + }); GetDataStreamAction.Response ds = client.execute( GetDataStreamAction.INSTANCE, @@ -219,9 +222,11 @@ public void testSnapshotAndRestoreAllDataStreamsInPlace() throws Exception { assertEquals(1, restoreSnapshotResponse.getRestoreInfo().successfulShards()); assertEquals(DOCUMENT_SOURCE, client.prepareGet(dsBackingIndexName, id).get().getSourceAsMap()); - SearchHit[] hits = client.prepareSearch("ds").get().getHits().getHits(); - assertEquals(1, hits.length); - assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); + assertResponse(client.prepareSearch("ds"), response -> { + SearchHit[] hits = response.getHits().getHits(); + assertEquals(1, hits.length); + assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); + }); GetDataStreamAction.Request getDataSteamRequest = new GetDataStreamAction.Request(new String[] { "*" }); GetDataStreamAction.Response ds = client.execute(GetDataStreamAction.INSTANCE, getDataSteamRequest).get(); @@ -271,9 +276,12 @@ public void testSnapshotAndRestoreInPlace() { assertEquals(1, restoreSnapshotResponse.getRestoreInfo().successfulShards()); assertEquals(DOCUMENT_SOURCE, client.prepareGet(dsBackingIndexName, id).get().getSourceAsMap()); - SearchHit[] hits = client.prepareSearch("ds").get().getHits().getHits(); - assertEquals(1, hits.length); - assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); + + assertResponse(client.prepareSearch("ds"), response -> { + SearchHit[] hits = response.getHits().getHits(); + assertEquals(1, hits.length); + assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); + }); GetDataStreamAction.Request getDataSteamRequest = new GetDataStreamAction.Request(new String[] { "ds" }); GetDataStreamAction.Response ds = client.execute(GetDataStreamAction.INSTANCE, getDataSteamRequest).actionGet(); @@ -347,9 +355,11 @@ public void testSnapshotAndRestoreAllIncludeSpecificDataStream() throws Exceptio assertEquals(1, restoreSnapshotResponse.getRestoreInfo().successfulShards()); assertEquals(DOCUMENT_SOURCE, client.prepareGet(backingIndexName, idToGet).get().getSourceAsMap()); - SearchHit[] hits = client.prepareSearch(backingIndexName).get().getHits().getHits(); - assertEquals(1, hits.length); - assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); + assertResponse(client.prepareSearch(backingIndexName), response -> { + SearchHit[] hits = response.getHits().getHits(); + assertEquals(1, hits.length); + assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); + }); GetDataStreamAction.Response ds = client.execute( GetDataStreamAction.INSTANCE, @@ -396,9 +406,11 @@ public void testSnapshotAndRestoreReplaceAll() throws Exception { assertEquals(2, restoreSnapshotResponse.getRestoreInfo().successfulShards()); assertEquals(DOCUMENT_SOURCE, client.prepareGet(dsBackingIndexName, id).get().getSourceAsMap()); - SearchHit[] hits = client.prepareSearch("ds").get().getHits().getHits(); - assertEquals(1, hits.length); - assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); + assertResponse(client.prepareSearch("ds"), response -> { + SearchHit[] hits = response.getHits().getHits(); + assertEquals(1, hits.length); + assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); + }); GetDataStreamAction.Response ds = client.execute( GetDataStreamAction.INSTANCE, @@ -449,9 +461,11 @@ public void testSnapshotAndRestoreAll() throws Exception { assertEquals(2, restoreSnapshotResponse.getRestoreInfo().successfulShards()); assertEquals(DOCUMENT_SOURCE, client.prepareGet(dsBackingIndexName, id).get().getSourceAsMap()); - SearchHit[] hits = client.prepareSearch("ds").get().getHits().getHits(); - assertEquals(1, hits.length); - assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); + assertResponse(client.prepareSearch("ds"), response -> { + SearchHit[] hits = response.getHits().getHits(); + assertEquals(1, hits.length); + assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); + }); GetDataStreamAction.Response ds = client.execute( GetDataStreamAction.INSTANCE, @@ -505,9 +519,11 @@ public void testSnapshotAndRestoreIncludeAliasesFalse() throws Exception { assertEquals(2, restoreSnapshotResponse.getRestoreInfo().successfulShards()); assertEquals(DOCUMENT_SOURCE, client.prepareGet(dsBackingIndexName, id).get().getSourceAsMap()); - SearchHit[] hits = client.prepareSearch("ds").get().getHits().getHits(); - assertEquals(1, hits.length); - assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); + assertResponse(client.prepareSearch("ds"), response -> { + SearchHit[] hits = response.getHits().getHits(); + assertEquals(1, hits.length); + assertEquals(DOCUMENT_SOURCE, hits[0].getSourceAsMap()); + }); GetDataStreamAction.Response ds = client.execute( GetDataStreamAction.INSTANCE, @@ -538,7 +554,7 @@ public void testRename() throws Exception { expectThrows( SnapshotRestoreException.class, - () -> client.admin().cluster().prepareRestoreSnapshot(REPO, SNAPSHOT).setWaitForCompletion(true).setIndices("ds").get() + client.admin().cluster().prepareRestoreSnapshot(REPO, SNAPSHOT).setWaitForCompletion(true).setIndices("ds") ); client.admin() @@ -557,7 +573,10 @@ public void testRename() throws Exception { assertEquals(1, ds.getDataStreams().size()); assertEquals(1, ds.getDataStreams().get(0).getDataStream().getIndices().size()); assertEquals(ds2BackingIndexName, ds.getDataStreams().get(0).getDataStream().getIndices().get(0).getName()); - assertEquals(DOCUMENT_SOURCE, client.prepareSearch("ds2").get().getHits().getHits()[0].getSourceAsMap()); + assertResponse( + client.prepareSearch("ds2"), + response -> assertEquals(DOCUMENT_SOURCE, response.getHits().getHits()[0].getSourceAsMap()) + ); assertEquals(DOCUMENT_SOURCE, client.prepareGet(ds2BackingIndexName, id).get().getSourceAsMap()); GetAliasesResponse getAliasesResponse = client.admin().indices().getAliases(new GetAliasesRequest("my-alias")).actionGet(); @@ -652,7 +671,7 @@ public void testBackingIndexIsNotRenamedWhenRestoringDataStream() { expectThrows( SnapshotRestoreException.class, - () -> client.admin().cluster().prepareRestoreSnapshot(REPO, SNAPSHOT).setWaitForCompletion(true).setIndices("ds").get() + client.admin().cluster().prepareRestoreSnapshot(REPO, SNAPSHOT).setWaitForCompletion(true).setIndices("ds") ); // delete data stream @@ -689,7 +708,7 @@ public void testDataStreamAndBackingIndicesAreRenamedUsingRegex() { expectThrows( SnapshotRestoreException.class, - () -> client.admin().cluster().prepareRestoreSnapshot(REPO, SNAPSHOT).setWaitForCompletion(true).setIndices("ds").get() + client.admin().cluster().prepareRestoreSnapshot(REPO, SNAPSHOT).setWaitForCompletion(true).setIndices("ds") ); // restore data stream attempting to rename the backing index @@ -768,7 +787,7 @@ public void testDataStreamNotStoredWhenIndexRequested() { assertEquals(RestStatus.OK, status); expectThrows( Exception.class, - () -> client.admin().cluster().prepareRestoreSnapshot(REPO, "snap2").setWaitForCompletion(true).setIndices("ds").get() + client.admin().cluster().prepareRestoreSnapshot(REPO, "snap2").setWaitForCompletion(true).setIndices("ds") ); } @@ -796,7 +815,7 @@ public void testDataStreamNotRestoredWhenIndexRequested() throws Exception { assertEquals(RestStatus.OK, restoreSnapshotResponse.status()); GetDataStreamAction.Request getRequest = new GetDataStreamAction.Request(new String[] { "ds" }); - expectThrows(ResourceNotFoundException.class, () -> client.execute(GetDataStreamAction.INSTANCE, getRequest).actionGet()); + expectThrows(ResourceNotFoundException.class, client.execute(GetDataStreamAction.INSTANCE, getRequest)); } public void testDataStreamNotIncludedInLimitedSnapshot() throws ExecutionException, InterruptedException { @@ -938,7 +957,7 @@ public void testSnapshotDSDuringRollover() throws Exception { if (partial) { assertTrue(rolloverResponse.get().isRolledOver()); } else { - SnapshotInProgressException e = expectThrows(SnapshotInProgressException.class, rolloverResponse::actionGet); + SnapshotInProgressException e = expectThrows(SnapshotInProgressException.class, rolloverResponse); assertThat(e.getMessage(), containsString("Cannot roll over data stream that is being snapshotted:")); } unblockAllDataNodes(repoName); @@ -1046,7 +1065,7 @@ public void testRestoreDataStreamAliasWithConflictingDataStream() throws Excepti assertAcked(client.execute(CreateDataStreamAction.INSTANCE, request).actionGet()); var e = expectThrows( IllegalStateException.class, - () -> client.admin().cluster().prepareRestoreSnapshot(REPO, snapshotName).setWaitForCompletion(true).get() + client.admin().cluster().prepareRestoreSnapshot(REPO, snapshotName).setWaitForCompletion(true) ); assertThat(e.getMessage(), containsString("data stream alias and data stream have the same name (my-alias)")); } finally { @@ -1067,7 +1086,7 @@ public void testRestoreDataStreamAliasWithConflictingIndicesAlias() throws Excep var e = expectThrows( IllegalStateException.class, - () -> client.admin().cluster().prepareRestoreSnapshot(REPO, snapshotName).setWaitForCompletion(true).get() + client.admin().cluster().prepareRestoreSnapshot(REPO, snapshotName).setWaitForCompletion(true) ); assertThat(e.getMessage(), containsString("data stream alias and indices alias have the same name (my-alias)")); } diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java index 755dc2fa1523a..34a8e2f4aa9f7 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/TSDBIndexingIT.java @@ -49,6 +49,7 @@ import static org.elasticsearch.test.MapMatcher.assertMap; import static org.elasticsearch.test.MapMatcher.matchesMap; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -400,11 +401,12 @@ public void testSkippingShards() throws Exception { var searchRequest = new SearchRequest("pattern-*"); searchRequest.setPreFilterShardSize(1); searchRequest.source(matchingRange); - var searchResponse = client().search(searchRequest).actionGet(); - ElasticsearchAssertions.assertHitCount(searchResponse, 2); - assertThat(searchResponse.getTotalShards(), equalTo(2)); - assertThat(searchResponse.getSkippedShards(), equalTo(0)); - assertThat(searchResponse.getSuccessfulShards(), equalTo(2)); + assertResponse(client().search(searchRequest), searchResponse -> { + ElasticsearchAssertions.assertHitCount(searchResponse, 2); + assertThat(searchResponse.getTotalShards(), equalTo(2)); + assertThat(searchResponse.getSkippedShards(), equalTo(0)); + assertThat(searchResponse.getSuccessfulShards(), equalTo(2)); + }); } { var nonMatchingRange = new SearchSourceBuilder().query( @@ -414,11 +416,12 @@ public void testSkippingShards() throws Exception { var searchRequest = new SearchRequest("pattern-*"); searchRequest.setPreFilterShardSize(1); searchRequest.source(nonMatchingRange); - var searchResponse = client().search(searchRequest).actionGet(); - ElasticsearchAssertions.assertNoSearchHits(searchResponse); - assertThat(searchResponse.getTotalShards(), equalTo(2)); - assertThat(searchResponse.getSkippedShards(), equalTo(1)); - assertThat(searchResponse.getSuccessfulShards(), equalTo(2)); + assertResponse(client().search(searchRequest), searchResponse -> { + ElasticsearchAssertions.assertNoSearchHits(searchResponse); + assertThat(searchResponse.getTotalShards(), equalTo(2)); + assertThat(searchResponse.getSkippedShards(), equalTo(1)); + assertThat(searchResponse.getSuccessfulShards(), equalTo(2)); + }); } } @@ -537,17 +540,19 @@ public void testTrimId() throws Exception { ); // Check the search api can synthesize _id + final String idxName = indexName; var searchRequest = new SearchRequest(dataStreamName); searchRequest.source().trackTotalHits(true); - var searchResponse = client().search(searchRequest).actionGet(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo((long) numBulkRequests * numDocsPerBulk)); - String id = searchResponse.getHits().getHits()[0].getId(); - assertThat(id, notNullValue()); - - // Check that the _id is gettable: - var getResponse = client().get(new GetRequest(indexName).id(id)).actionGet(); - assertThat(getResponse.isExists(), is(true)); - assertThat(getResponse.getId(), equalTo(id)); + assertResponse(client().search(searchRequest), searchResponse -> { + assertThat(searchResponse.getHits().getTotalHits().value, equalTo((long) numBulkRequests * numDocsPerBulk)); + String id = searchResponse.getHits().getHits()[0].getId(); + assertThat(id, notNullValue()); + + // Check that the _id is gettable: + var getResponse = client().get(new GetRequest(idxName).id(id)).actionGet(); + assertThat(getResponse.isExists(), is(true)); + assertThat(getResponse.getId(), equalTo(id)); + }); } static String formatInstant(Instant instant) { diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProvider.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProvider.java index 7ec2d32851ea5..519499addd77e 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProvider.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProvider.java @@ -83,7 +83,7 @@ public Settings getAdditionalIndexSettings( if (indexMode != null) { if (indexMode == IndexMode.TIME_SERIES) { Settings.Builder builder = Settings.builder(); - TimeValue lookAheadTime = DataStreamsPlugin.LOOK_AHEAD_TIME.get(allSettings); + TimeValue lookAheadTime = DataStreamsPlugin.getLookAheadTime(allSettings); TimeValue lookBackTime = DataStreamsPlugin.LOOK_BACK_TIME.get(allSettings); final Instant start; final Instant end; diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java index fb93b7d688a74..f3739747d96cc 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java @@ -90,15 +90,28 @@ public class DataStreamsPlugin extends Plugin implements ActionPlugin, HealthPlu Setting.Property.Dynamic ); + private static final TimeValue MAX_LOOK_AHEAD_TIME = TimeValue.timeValueHours(2); public static final Setting LOOK_AHEAD_TIME = Setting.timeSetting( "index.look_ahead_time", TimeValue.timeValueHours(2), TimeValue.timeValueMinutes(1), - TimeValue.timeValueDays(7), + TimeValue.timeValueDays(7), // is effectively 2h now. Setting.Property.IndexScope, Setting.Property.Dynamic, Setting.Property.ServerlessPublic ); + + /** + * Returns the look ahead time and lowers it when it to 2 hours if it is configured to more than 2 hours. + */ + public static TimeValue getLookAheadTime(Settings settings) { + TimeValue lookAheadTime = DataStreamsPlugin.LOOK_AHEAD_TIME.get(settings); + if (lookAheadTime.compareTo(DataStreamsPlugin.MAX_LOOK_AHEAD_TIME) > 0) { + lookAheadTime = DataStreamsPlugin.MAX_LOOK_AHEAD_TIME; + } + return lookAheadTime; + } + public static final String LIFECYCLE_CUSTOM_INDEX_METADATA_KEY = "data_stream_lifecycle"; public static final Setting LOOK_BACK_TIME = Setting.timeSetting( "index.look_back_time", diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeService.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeService.java index f973eb95b39ce..3bbc37cd87ad5 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeService.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeService.java @@ -108,7 +108,7 @@ ClusterState updateTimeSeriesTemporalRange(ClusterState current, Instant now) { Index head = dataStream.getWriteIndex(); IndexMetadata im = current.metadata().getIndexSafe(head); Instant currentEnd = IndexSettings.TIME_SERIES_END_TIME.get(im.getSettings()); - TimeValue lookAheadTime = DataStreamsPlugin.LOOK_AHEAD_TIME.get(im.getSettings()); + TimeValue lookAheadTime = DataStreamsPlugin.getLookAheadTime(im.getSettings()); Instant newEnd = DataStream.getCanonicalTimestampBound( now.plus(lookAheadTime.getMillis(), ChronoUnit.MILLIS).plus(pollInterval.getMillis(), ChronoUnit.MILLIS) ); diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestDataStreamLifecycleStatsAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestDataStreamLifecycleStatsAction.java index 2daff2a05940c..ece16042706a7 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestDataStreamLifecycleStatsAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestDataStreamLifecycleStatsAction.java @@ -14,7 +14,7 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.util.List; @@ -40,6 +40,10 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient if (masterNodeTimeout != null) { request.masterNodeTimeout(masterNodeTimeout); } - return channel -> client.execute(GetDataStreamLifecycleStatsAction.INSTANCE, request, new RestChunkedToXContentListener<>(channel)); + return channel -> client.execute( + GetDataStreamLifecycleStatsAction.INSTANCE, + request, + new RestRefCountedChunkedToXContentListener<>(channel) + ); } } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestExplainDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestExplainDataStreamLifecycleAction.java index b3c39bc17f134..09f4b6efce633 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestExplainDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestExplainDataStreamLifecycleAction.java @@ -16,7 +16,7 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.util.List; @@ -48,7 +48,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient return channel -> client.execute( ExplainDataStreamLifecycleAction.INSTANCE, explainRequest, - new RestChunkedToXContentListener<>(channel) + new RestRefCountedChunkedToXContentListener<>(channel) ); } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestGetDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestGetDataStreamLifecycleAction.java index f65266f1b5e34..f2c514c794b32 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestGetDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/rest/RestGetDataStreamLifecycleAction.java @@ -15,7 +15,7 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.util.List; @@ -44,7 +44,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli return channel -> client.execute( GetDataStreamLifecycleAction.INSTANCE, getDataLifecycleRequest, - new RestChunkedToXContentListener<>(channel) + new RestRefCountedChunkedToXContentListener<>(channel) ); } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeServiceTests.java index c383991dba19c..0b565d835465f 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeServiceTests.java @@ -94,7 +94,7 @@ public void testUpdateTimeSeriesTemporalRange() { } public void testUpdateTimeSeriesTemporalRange_customLookAHeadTime() { - int lookAHeadTimeMinutes = randomIntBetween(30, 180); + int lookAHeadTimeMinutes = randomIntBetween(30, 120); TemporalAmount lookAHeadTime = Duration.ofMinutes(lookAHeadTimeMinutes); int timeSeriesPollIntervalMinutes = randomIntBetween(1, 10); TemporalAmount timeSeriesPollInterval = Duration.ofMinutes(timeSeriesPollIntervalMinutes); diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AppendProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AppendProcessor.java index 273b76955060b..4f28c9bb14f80 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AppendProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AppendProcessor.java @@ -48,9 +48,10 @@ public ValueSource getValue() { } @Override - public IngestDocument execute(IngestDocument ingestDocument) throws Exception { - ingestDocument.appendFieldValue(field, value, allowDuplicates); - return ingestDocument; + public IngestDocument execute(IngestDocument document) throws Exception { + String path = document.renderTemplate(field); + document.appendFieldValue(path, value, allowDuplicates); + return document; } @Override diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JsonProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JsonProcessor.java index 7b20cfbf0b398..3bf82be24330e 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JsonProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/JsonProcessor.java @@ -8,8 +8,6 @@ package org.elasticsearch.ingest.common; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.core.Strings; import org.elasticsearch.ingest.AbstractProcessor; import org.elasticsearch.ingest.ConfigurationUtils; @@ -20,7 +18,6 @@ import org.elasticsearch.xcontent.json.JsonXContent; import java.io.IOException; -import java.io.InputStream; import java.util.Locale; import java.util.Map; @@ -90,10 +87,11 @@ public ConflictStrategy getAddToRootConflictStrategy() { } public static Object apply(Object fieldValue, boolean allowDuplicateKeys, boolean strictJsonParsing) { - BytesReference bytesRef = fieldValue == null ? new BytesArray("null") : new BytesArray(fieldValue.toString()); try ( - InputStream stream = bytesRef.streamInput(); - XContentParser parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, stream) + XContentParser parser = JsonXContent.jsonXContent.createParser( + XContentParserConfiguration.EMPTY, + fieldValue == null ? "null" : fieldValue.toString() + ) ) { parser.allowDuplicateKeys(allowDuplicateKeys); XContentParser.Token token = parser.nextToken(); diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RemoveProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RemoveProcessor.java index c27bc4de85ec4..fa86bcda5047b 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RemoveProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/RemoveProcessor.java @@ -64,7 +64,7 @@ private void fieldsToRemoveProcessor(IngestDocument document) { } } else { for (TemplateScript.Factory field : fieldsToRemove) { - document.removeField(field); + document.removeField(document.renderTemplate(field)); } } } diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java index 84e66a3134b69..b629f00545aec 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ScriptProcessor.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.ingest.AbstractProcessor; import org.elasticsearch.ingest.IngestDocument; @@ -25,7 +26,6 @@ import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonXContent; -import java.io.InputStream; import java.util.Arrays; import java.util.Map; @@ -108,9 +108,11 @@ public ScriptProcessor create( ) throws Exception { try ( XContentBuilder builder = XContentBuilder.builder(JsonXContent.jsonXContent).map(config); - InputStream stream = BytesReference.bytes(builder).streamInput(); - XContentParser parser = XContentType.JSON.xContent() - .createParser(XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), stream) + XContentParser parser = XContentHelper.createParserNotCompressed( + XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), + BytesReference.bytes(builder), + XContentType.JSON + ) ) { Script script = Script.parse(parser); diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/SetProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/SetProcessor.java index 229b796b89c75..2d7db39f3738e 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/SetProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/SetProcessor.java @@ -78,12 +78,13 @@ public boolean isIgnoreEmptyValue() { @Override public IngestDocument execute(IngestDocument document) { - if (overrideEnabled || document.hasField(field) == false || document.getFieldValue(field, Object.class) == null) { + String path = document.renderTemplate(field); + if (overrideEnabled || document.hasField(path) == false || document.getFieldValue(path, Object.class) == null) { if (copyFrom != null) { Object fieldValue = document.getFieldValue(copyFrom, Object.class, ignoreEmptyValue); - document.setFieldValue(field, IngestDocument.deepCopy(fieldValue), ignoreEmptyValue); + document.setFieldValue(path, IngestDocument.deepCopy(fieldValue), ignoreEmptyValue); } else { - document.setFieldValue(field, value, ignoreEmptyValue); + document.setFieldValue(path, value, ignoreEmptyValue); } } return document; diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorTests.java index 1e40345208a1b..0b20fbc22e1cc 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GrokProcessorTests.java @@ -120,7 +120,7 @@ public void testMatchWithoutCaptures() throws Exception { public void testNullField() { String fieldName = RandomDocumentPicks.randomFieldName(random()); IngestDocument doc = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); - doc.setFieldValue(fieldName, null); + doc.setFieldValue(fieldName, (Object) null); GrokProcessor processor = new GrokProcessor( randomAlphaOfLength(10), null, @@ -138,7 +138,7 @@ public void testNullField() { public void testNullFieldWithIgnoreMissing() throws Exception { String fieldName = RandomDocumentPicks.randomFieldName(random()); IngestDocument originalIngestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); - originalIngestDocument.setFieldValue(fieldName, null); + originalIngestDocument.setFieldValue(fieldName, (Object) null); IngestDocument ingestDocument = new IngestDocument(originalIngestDocument); GrokProcessor processor = new GrokProcessor( randomAlphaOfLength(10), diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorTests.java index 1d10c30909906..f472e9d9bacd4 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/RenameProcessorTests.java @@ -123,7 +123,7 @@ public void testRenameNewFieldAlreadyExists() throws Exception { public void testRenameExistingFieldNullValue() throws Exception { IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); String fieldName = RandomDocumentPicks.randomFieldName(random()); - ingestDocument.setFieldValue(fieldName, null); + ingestDocument.setFieldValue(fieldName, (Object) null); String newFieldName = randomValueOtherThanMany(ingestDocument::hasField, () -> RandomDocumentPicks.randomFieldName(random())); Processor processor = createRenameProcessor(fieldName, newFieldName, false); processor.execute(ingestDocument); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SetProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SetProcessorFactoryTests.java index bd6a29181c657..09c5c58e3664d 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SetProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SetProcessorFactoryTests.java @@ -167,4 +167,17 @@ public void testMediaType() throws Exception { ElasticsearchException e = expectThrows(ElasticsearchException.class, () -> factory.create(null, processorTag, null, config2)); assertThat(e.getMessage(), containsString("property does not contain a supported media type [" + expectedMediaType + "]")); } + + public void testCreateWithEmptyField() throws Exception { + // edge case: it's valid (according to the current validation) to *create* a set processor that has an empty string as its 'field'. + // it will fail at ingest execution time, but we don't reject it at pipeline creation time. + Map config = new HashMap<>(); + config.put("field", ""); + config.put("value", "value1"); + String processorTag = randomAlphaOfLength(10); + SetProcessor setProcessor = factory.create(null, processorTag, null, config); + assertThat(setProcessor.getTag(), equalTo(processorTag)); + assertThat(setProcessor.getField().newInstance(Map.of()).execute(), equalTo("")); + assertThat(setProcessor.getValue().copyAndResolve(Map.of()), equalTo("value1")); + } } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SetProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SetProcessorTests.java index 5973e4fe5741c..6cef9d3ecde8a 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SetProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/SetProcessorTests.java @@ -61,15 +61,11 @@ public void testSetFieldsTypeMismatch() throws Exception { IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), new HashMap<>()); ingestDocument.setFieldValue("field", "value"); Processor processor = createSetProcessor("field.inner", "value", null, true, false); - try { - processor.execute(ingestDocument); - fail("processor execute should have failed"); - } catch (IllegalArgumentException e) { - assertThat( - e.getMessage(), - equalTo("cannot set [inner] with parent object of type [java.lang.String] as " + "part of path [field.inner]") - ); - } + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> processor.execute(ingestDocument)); + assertThat( + exception.getMessage(), + equalTo("cannot set [inner] with parent object of type [java.lang.String] as part of path [field.inner]") + ); } public void testSetNewFieldWithOverrideDisabled() throws Exception { @@ -184,20 +180,6 @@ public void testCopyFromOtherField() throws Exception { } } - private static void assertMapEquals(Object actual, Object expected) { - if (expected instanceof Map expectedMap) { - Map actualMap = (Map) actual; - assertThat(actualMap.keySet().toArray(), arrayContainingInAnyOrder(expectedMap.keySet().toArray())); - for (Map.Entry entry : actualMap.entrySet()) { - if (entry.getValue() instanceof Map) { - assertMapEquals(entry.getValue(), expectedMap.get(entry.getKey())); - } else { - assertThat(entry.getValue(), equalTo(expectedMap.get(entry.getKey()))); - } - } - } - } - public void testCopyFromDeepCopiesNonPrimitiveMutableTypes() throws Exception { final String originalField = "originalField"; final String targetField = "targetField"; @@ -256,6 +238,15 @@ public void testCopyFromDeepCopiesNonPrimitiveMutableTypes() throws Exception { assertThat(ingestDocument.getFieldValue(targetField, Object.class), equalTo(preservedDate)); } + public void testSetEmptyField() { + // edge case: it's valid (according to the current validation) to *create* a set processor that has an empty string as its 'field', + // but it will fail at ingest execution time. + IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random()); + Processor processor = createSetProcessor("", "some_value", null, false, false); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> processor.execute(ingestDocument)); + assertThat(exception.getMessage(), equalTo("path cannot be null nor empty")); + } + private static Processor createSetProcessor( String fieldName, Object fieldValue, @@ -273,4 +264,18 @@ private static Processor createSetProcessor( ignoreEmptyValue ); } + + private static void assertMapEquals(Object actual, Object expected) { + if (expected instanceof Map expectedMap) { + Map actualMap = (Map) actual; + assertThat(actualMap.keySet().toArray(), arrayContainingInAnyOrder(expectedMap.keySet().toArray())); + for (Map.Entry entry : actualMap.entrySet()) { + if (entry.getValue() instanceof Map) { + assertMapEquals(entry.getValue(), expectedMap.get(entry.getKey())); + } else { + assertThat(entry.getValue(), equalTo(expectedMap.get(entry.getKey()))); + } + } + } + } } diff --git a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/270_set_processor.yml b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/270_set_processor.yml index 594ff52c2b27a..f74e9a5752b80 100644 --- a/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/270_set_processor.yml +++ b/modules/ingest-common/src/yamlRestTest/resources/rest-api-spec/test/ingest/270_set_processor.yml @@ -227,3 +227,71 @@ teardown: - match: { _source.foo: "hello" } - match: { _source.method_call_is_ignored: "" } - match: { _source.missing_method_is_ignored: "" } + +--- +"Test set processor with mustache edge cases": + - do: + ingest.put_pipeline: + id: "1" + body: > + { + "processors" : [ + { + "script": { + "description": "Set a field 'foo' with a value of '{{bar}}' -- no mustache here, just strings", + "lang": "painless", + "source": "ctx.foo = '{{bar}}'" + } + }, + { + "set": { + "description": "Dereference the foo field via actual mustache", + "field": "result_1", + "value": "{{foo}}" + } + }, + { + "set": { + "description": "Dereference the foo field via copy_from", + "field": "result_2", + "copy_from": "foo" + } + }, + { + "set": { + "description": "Copy the original bar value into old_bar", + "field": "old_bar", + "copy_from": "bar" + } + }, + { + "set": { + "description": "Set whatever field value_bar refers to (it's bar) to 3", + "field": "{{value_bar}}", + "value": 3 + } + } + ] + } + - match: { acknowledged: true } + + - do: + index: + index: test + id: "1" + pipeline: "1" + body: { + foo: 1, + bar: 2, + value_bar: "bar" + } + + - do: + get: + index: test + id: "1" + - match: { _source.foo: "{{bar}}" } + - match: { _source.result_1: "{{bar}}" } + - match: { _source.result_2: "{{bar}}" } + - match: { _source.old_bar: 2 } + - match: { _source.bar: 3 } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseNodeService.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseNodeService.java index ab2d96c7d198d..ec17915f7d622 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseNodeService.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseNodeService.java @@ -464,15 +464,19 @@ void retrieveDatabase( // so it is ok if this happens in a blocking manner on a thread from generic thread pool. // This makes the code easier to understand and maintain. SearchResponse searchResponse = client.search(searchRequest).actionGet(); - SearchHit[] hits = searchResponse.getHits().getHits(); + try { + SearchHit[] hits = searchResponse.getHits().getHits(); - if (searchResponse.getHits().getHits().length == 0) { - failureHandler.accept(new ResourceNotFoundException("chunk document with id [" + id + "] not found")); - return; + if (searchResponse.getHits().getHits().length == 0) { + failureHandler.accept(new ResourceNotFoundException("chunk document with id [" + id + "] not found")); + return; + } + byte[] data = (byte[]) hits[0].getSourceAsMap().get("data"); + md.update(data); + chunkConsumer.accept(data); + } finally { + searchResponse.decRef(); } - byte[] data = (byte[]) hits[0].getSourceAsMap().get("data"); - md.update(data); - chunkConsumer.accept(data); } String actualMd5 = MessageDigests.toHexString(md.digest()); if (Objects.equals(expectedMd5, actualMd5)) { diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/DatabaseNodeServiceTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/DatabaseNodeServiceTests.java index 58089f792226a..c7dbee47ea823 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/DatabaseNodeServiceTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/DatabaseNodeServiceTests.java @@ -15,7 +15,6 @@ import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchResponseSections; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -38,6 +37,8 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.CheckedRunnable; +import org.elasticsearch.core.Releasable; +import org.elasticsearch.core.Releasables; import org.elasticsearch.core.Strings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; @@ -57,6 +58,7 @@ import org.junit.After; import org.junit.Before; import org.mockito.ArgumentCaptor; +import org.mockito.stubbing.Answer; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; @@ -68,11 +70,13 @@ import java.security.MessageDigest; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; import java.util.UUID; +import java.util.concurrent.CopyOnWriteArrayList; import java.util.function.Consumer; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -109,6 +113,8 @@ public class DatabaseNodeServiceTests extends ESTestCase { private IngestService ingestService; private ClusterService clusterService; + private final Collection toRelease = new CopyOnWriteArrayList<>(); + @Before public void setup() throws IOException { final Path geoIpConfigDir = createTempDir(); @@ -133,6 +139,8 @@ public void setup() throws IOException { public void cleanup() { resourceWatcherService.close(); threadPool.shutdownNow(); + Releasables.close(toRelease); + toRelease.clear(); } public void testCheckDatabases() throws Exception { @@ -321,19 +329,14 @@ private String mockSearches(String databaseName, int firstChunk, int lastChunk) } SearchHits hits = new SearchHits(new SearchHit[] { hit }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1f); - SearchResponse searchResponse = new SearchResponse( - new SearchResponseSections(hits, null, null, false, null, null, 0), - null, - 1, - 1, - 0, - 1L, - null, - null - ); + SearchResponse searchResponse = new SearchResponse(hits, null, null, false, null, null, 0, null, 1, 1, 0, 1L, null, null); + toRelease.add(searchResponse::decRef); @SuppressWarnings("unchecked") ActionFuture actionFuture = mock(ActionFuture.class); - when(actionFuture.actionGet()).thenReturn(searchResponse); + when(actionFuture.actionGet()).thenAnswer((Answer) invocation -> { + searchResponse.incRef(); + return searchResponse; + }); requestMap.put(databaseName + "_" + i, actionFuture); } when(client.search(any())).thenAnswer(invocationOnMock -> { diff --git a/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/DeviceTypeParser.java b/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/DeviceTypeParser.java index 40e54c8fe5f7e..6117ebc6aa319 100644 --- a/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/DeviceTypeParser.java +++ b/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/DeviceTypeParser.java @@ -39,23 +39,24 @@ public class DeviceTypeParser { private final HashMap> deviceTypePatterns = new HashMap<>(); public void init(InputStream regexStream) throws IOException { - XContentParser yamlParser = XContentFactory.xContent(XContentType.YAML) - .createParser(XContentParserConfiguration.EMPTY, regexStream); - - XContentParser.Token token = yamlParser.nextToken(); - - if (token == XContentParser.Token.START_OBJECT) { - token = yamlParser.nextToken(); - - for (; token != null; token = yamlParser.nextToken()) { - String currentName = yamlParser.currentName(); - if (token == XContentParser.Token.FIELD_NAME && patternListKeys.contains(currentName)) { - List> parserConfigurations = readParserConfigurations(yamlParser); - ArrayList subPatterns = new ArrayList<>(); - for (Map map : parserConfigurations) { - subPatterns.add(new DeviceTypeSubPattern(Pattern.compile((map.get("regex"))), map.get("replacement"))); + try ( + XContentParser yamlParser = XContentFactory.xContent(XContentType.YAML) + .createParser(XContentParserConfiguration.EMPTY, regexStream) + ) { + XContentParser.Token token = yamlParser.nextToken(); + if (token == XContentParser.Token.START_OBJECT) { + token = yamlParser.nextToken(); + + for (; token != null; token = yamlParser.nextToken()) { + String currentName = yamlParser.currentName(); + if (token == XContentParser.Token.FIELD_NAME && patternListKeys.contains(currentName)) { + List> parserConfigurations = readParserConfigurations(yamlParser); + ArrayList subPatterns = new ArrayList<>(); + for (Map map : parserConfigurations) { + subPatterns.add(new DeviceTypeSubPattern(Pattern.compile((map.get("regex"))), map.get("replacement"))); + } + deviceTypePatterns.put(currentName, subPatterns); } - deviceTypePatterns.put(currentName, subPatterns); } } } diff --git a/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentParser.java b/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentParser.java index 41ced0c7ff4cc..515c31735c313 100644 --- a/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentParser.java +++ b/modules/ingest-user-agent/src/main/java/org/elasticsearch/ingest/useragent/UserAgentParser.java @@ -48,59 +48,62 @@ final class UserAgentParser { private void init(InputStream regexStream) throws IOException { // EMPTY is safe here because we don't use namedObject - XContentParser yamlParser = XContentFactory.xContent(XContentType.YAML) - .createParser(XContentParserConfiguration.EMPTY, regexStream); + try ( + XContentParser yamlParser = XContentFactory.xContent(XContentType.YAML) + .createParser(XContentParserConfiguration.EMPTY, regexStream) + ) { - XContentParser.Token token = yamlParser.nextToken(); + XContentParser.Token token = yamlParser.nextToken(); - if (token == XContentParser.Token.START_OBJECT) { - token = yamlParser.nextToken(); + if (token == XContentParser.Token.START_OBJECT) { + token = yamlParser.nextToken(); - for (; token != null; token = yamlParser.nextToken()) { - if (token == XContentParser.Token.FIELD_NAME && yamlParser.currentName().equals("user_agent_parsers")) { - List> parserConfigurations = readParserConfigurations(yamlParser); - - for (Map map : parserConfigurations) { - uaPatterns.add( - new UserAgentSubpattern( - compilePattern(map.get("regex"), map.get("regex_flag")), - map.get("family_replacement"), - map.get("v1_replacement"), - map.get("v2_replacement"), - map.get("v3_replacement"), - map.get("v4_replacement") - ) - ); - } - } else if (token == XContentParser.Token.FIELD_NAME && yamlParser.currentName().equals("os_parsers")) { - List> parserConfigurations = readParserConfigurations(yamlParser); - - for (Map map : parserConfigurations) { - osPatterns.add( - new UserAgentSubpattern( - compilePattern(map.get("regex"), map.get("regex_flag")), - map.get("os_replacement"), - map.get("os_v1_replacement"), - map.get("os_v2_replacement"), - map.get("os_v3_replacement"), - map.get("os_v4_replacement") - ) - ); - } - } else if (token == XContentParser.Token.FIELD_NAME && yamlParser.currentName().equals("device_parsers")) { - List> parserConfigurations = readParserConfigurations(yamlParser); - - for (Map map : parserConfigurations) { - devicePatterns.add( - new UserAgentSubpattern( - compilePattern(map.get("regex"), map.get("regex_flag")), - map.get("device_replacement"), - null, - null, - null, - null - ) - ); + for (; token != null; token = yamlParser.nextToken()) { + if (token == XContentParser.Token.FIELD_NAME && yamlParser.currentName().equals("user_agent_parsers")) { + List> parserConfigurations = readParserConfigurations(yamlParser); + + for (Map map : parserConfigurations) { + uaPatterns.add( + new UserAgentSubpattern( + compilePattern(map.get("regex"), map.get("regex_flag")), + map.get("family_replacement"), + map.get("v1_replacement"), + map.get("v2_replacement"), + map.get("v3_replacement"), + map.get("v4_replacement") + ) + ); + } + } else if (token == XContentParser.Token.FIELD_NAME && yamlParser.currentName().equals("os_parsers")) { + List> parserConfigurations = readParserConfigurations(yamlParser); + + for (Map map : parserConfigurations) { + osPatterns.add( + new UserAgentSubpattern( + compilePattern(map.get("regex"), map.get("regex_flag")), + map.get("os_replacement"), + map.get("os_v1_replacement"), + map.get("os_v2_replacement"), + map.get("os_v3_replacement"), + map.get("os_v4_replacement") + ) + ); + } + } else if (token == XContentParser.Token.FIELD_NAME && yamlParser.currentName().equals("device_parsers")) { + List> parserConfigurations = readParserConfigurations(yamlParser); + + for (Map map : parserConfigurations) { + devicePatterns.add( + new UserAgentSubpattern( + compilePattern(map.get("regex"), map.get("regex_flag")), + map.get("device_replacement"), + null, + null, + null, + null + ) + ); + } } } } diff --git a/modules/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/DeviceTypeParserTests.java b/modules/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/DeviceTypeParserTests.java index 6543ef2095b87..582a40fb8a210 100644 --- a/modules/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/DeviceTypeParserTests.java +++ b/modules/ingest-user-agent/src/test/java/org/elasticsearch/ingest/useragent/DeviceTypeParserTests.java @@ -31,36 +31,39 @@ public class DeviceTypeParserTests extends ESTestCase { private static DeviceTypeParser deviceTypeParser; private ArrayList> readTestDevices(InputStream regexStream, String keyName) throws IOException { - XContentParser yamlParser = XContentFactory.xContent(XContentType.YAML) - .createParser(XContentParserConfiguration.EMPTY, regexStream); + try ( + XContentParser yamlParser = XContentFactory.xContent(XContentType.YAML) + .createParser(XContentParserConfiguration.EMPTY, regexStream) + ) { - XContentParser.Token token = yamlParser.nextToken(); + XContentParser.Token token = yamlParser.nextToken(); - ArrayList> testDevices = new ArrayList<>(); + ArrayList> testDevices = new ArrayList<>(); - if (token == XContentParser.Token.START_OBJECT) { - token = yamlParser.nextToken(); + if (token == XContentParser.Token.START_OBJECT) { + token = yamlParser.nextToken(); - for (; token != null; token = yamlParser.nextToken()) { - String currentName = yamlParser.currentName(); - if (token == XContentParser.Token.FIELD_NAME && currentName.equals(keyName)) { - List> parserConfigurations = readParserConfigurations(yamlParser); + for (; token != null; token = yamlParser.nextToken()) { + String currentName = yamlParser.currentName(); + if (token == XContentParser.Token.FIELD_NAME && currentName.equals(keyName)) { + List> parserConfigurations = readParserConfigurations(yamlParser); - for (Map map : parserConfigurations) { - HashMap testDevice = new HashMap<>(); + for (Map map : parserConfigurations) { + HashMap testDevice = new HashMap<>(); - testDevice.put("type", map.get("type")); - testDevice.put("os", map.get("os")); - testDevice.put("browser", map.get("browser")); - testDevice.put("device", map.get("device")); - testDevices.add(testDevice); + testDevice.put("type", map.get("type")); + testDevice.put("os", map.get("os")); + testDevice.put("browser", map.get("browser")); + testDevice.put("device", map.get("device")); + testDevices.add(testDevice); + } } } } - } - return testDevices; + return testDevices; + } } private static VersionedName getVersionName(String name) { diff --git a/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java b/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java index d9e346454aefe..0c3376c9c8a90 100644 --- a/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java +++ b/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java @@ -31,6 +31,7 @@ import java.util.Map; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.containsString; @@ -141,42 +142,43 @@ public void testBasic() throws Exception { search5.setScriptParams(params5); multiRequest.add(search5); - MultiSearchTemplateResponse response = client().execute(MustachePlugin.MULTI_SEARCH_TEMPLATE_ACTION, multiRequest).get(); - assertThat(response.getResponses(), arrayWithSize(5)); - assertThat(response.getTook().millis(), greaterThan(0L)); - - MultiSearchTemplateResponse.Item response1 = response.getResponses()[0]; - assertThat(response1.isFailure(), is(false)); - SearchTemplateResponse searchTemplateResponse1 = response1.getResponse(); - assertThat(searchTemplateResponse1.hasResponse(), is(true)); - assertHitCount(searchTemplateResponse1.getResponse(), (numDocs / 2) + (numDocs % 2)); - assertThat(searchTemplateResponse1.getSource().utf8ToString(), equalTo(""" - {"query":{"match":{"odd":"true"}}}""")); - - MultiSearchTemplateResponse.Item response2 = response.getResponses()[1]; - assertThat(response2.isFailure(), is(false)); - SearchTemplateResponse searchTemplateResponse2 = response2.getResponse(); - assertThat(searchTemplateResponse2.hasResponse(), is(false)); - assertThat(searchTemplateResponse2.getSource().utf8ToString(), equalTo(""" - {"query":{"match_phrase_prefix":{"message":"quick brown f"}}}""")); - - MultiSearchTemplateResponse.Item response3 = response.getResponses()[2]; - assertThat(response3.isFailure(), is(false)); - SearchTemplateResponse searchTemplateResponse3 = response3.getResponse(); - assertThat(searchTemplateResponse3.hasResponse(), is(true)); - assertHitCount(searchTemplateResponse3.getResponse(), (numDocs / 2)); - assertThat(searchTemplateResponse3.getSource().utf8ToString(), equalTo(""" - {"query":{"term":{"odd":"false"}}}""")); - - MultiSearchTemplateResponse.Item response4 = response.getResponses()[3]; - assertThat(response4.isFailure(), is(true)); - assertThat(response4.getFailure(), instanceOf(IndexNotFoundException.class)); - assertThat(response4.getFailure().getMessage(), equalTo("no such index [unknown]")); - - MultiSearchTemplateResponse.Item response5 = response.getResponses()[4]; - assertThat(response5.isFailure(), is(true)); - assertNull(response5.getResponse()); - assertThat(response5.getFailure(), instanceOf(XContentParseException.class)); + assertResponse(client().execute(MustachePlugin.MULTI_SEARCH_TEMPLATE_ACTION, multiRequest), response -> { + assertThat(response.getResponses(), arrayWithSize(5)); + assertThat(response.getTook().millis(), greaterThan(0L)); + + MultiSearchTemplateResponse.Item response1 = response.getResponses()[0]; + assertThat(response1.isFailure(), is(false)); + SearchTemplateResponse searchTemplateResponse1 = response1.getResponse(); + assertThat(searchTemplateResponse1.hasResponse(), is(true)); + assertHitCount(searchTemplateResponse1.getResponse(), (numDocs / 2) + (numDocs % 2)); + assertThat(searchTemplateResponse1.getSource().utf8ToString(), equalTo(""" + {"query":{"match":{"odd":"true"}}}""")); + + MultiSearchTemplateResponse.Item response2 = response.getResponses()[1]; + assertThat(response2.isFailure(), is(false)); + SearchTemplateResponse searchTemplateResponse2 = response2.getResponse(); + assertThat(searchTemplateResponse2.hasResponse(), is(false)); + assertThat(searchTemplateResponse2.getSource().utf8ToString(), equalTo(""" + {"query":{"match_phrase_prefix":{"message":"quick brown f"}}}""")); + + MultiSearchTemplateResponse.Item response3 = response.getResponses()[2]; + assertThat(response3.isFailure(), is(false)); + SearchTemplateResponse searchTemplateResponse3 = response3.getResponse(); + assertThat(searchTemplateResponse3.hasResponse(), is(true)); + assertHitCount(searchTemplateResponse3.getResponse(), (numDocs / 2)); + assertThat(searchTemplateResponse3.getSource().utf8ToString(), equalTo(""" + {"query":{"term":{"odd":"false"}}}""")); + + MultiSearchTemplateResponse.Item response4 = response.getResponses()[3]; + assertThat(response4.isFailure(), is(true)); + assertThat(response4.getFailure(), instanceOf(IndexNotFoundException.class)); + assertThat(response4.getFailure().getMessage(), equalTo("no such index [unknown]")); + + MultiSearchTemplateResponse.Item response5 = response.getResponses()[4]; + assertThat(response5.isFailure(), is(true)); + assertNull(response5.getResponse()); + assertThat(response5.getFailure(), instanceOf(XContentParseException.class)); + }); } /** @@ -193,21 +195,24 @@ public void testCCSCheckCompatibility() throws Exception { searchTemplateRequest.setRequest(new SearchRequest()); MultiSearchTemplateRequest request = new MultiSearchTemplateRequest(); request.add(searchTemplateRequest); - MultiSearchTemplateResponse multiSearchTemplateResponse = client().execute(MustachePlugin.MULTI_SEARCH_TEMPLATE_ACTION, request) - .get(); - Item response = multiSearchTemplateResponse.getResponses()[0]; - assertTrue(response.isFailure()); - Exception ex = response.getFailure(); - assertThat(ex.getMessage(), containsString("[class org.elasticsearch.action.search.SearchRequest] is not compatible with version")); - assertThat(ex.getMessage(), containsString("'search.check_ccs_compatibility' setting is enabled.")); - - String expectedCause = Strings.format( - "[fail_before_current_version] was released first in version %s, failed compatibility " - + "check trying to send it to node with version %s", - FailBeforeCurrentVersionQueryBuilder.FUTURE_VERSION, - TransportVersions.MINIMUM_CCS_VERSION - ); - String actualCause = ex.getCause().getMessage(); - assertEquals(expectedCause, actualCause); + assertResponse(client().execute(MustachePlugin.MULTI_SEARCH_TEMPLATE_ACTION, request), multiSearchTemplateResponse -> { + Item response = multiSearchTemplateResponse.getResponses()[0]; + assertTrue(response.isFailure()); + Exception ex = response.getFailure(); + assertThat( + ex.getMessage(), + containsString("[class org.elasticsearch.action.search.SearchRequest] is not compatible with version") + ); + assertThat(ex.getMessage(), containsString("'search.check_ccs_compatibility' setting is enabled.")); + + String expectedCause = Strings.format( + "[fail_before_current_version] was released first in version %s, failed compatibility " + + "check trying to send it to node with version %s", + FailBeforeCurrentVersionQueryBuilder.FUTURE_VERSION, + TransportVersions.MINIMUM_CCS_VERSION + ); + String actualCause = ex.getCause().getMessage(); + assertEquals(expectedCause, actualCause); + }); } } diff --git a/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchTemplateIT.java b/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchTemplateIT.java index 7fffafaf14e5b..9684defa19080 100644 --- a/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchTemplateIT.java +++ b/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/SearchTemplateIT.java @@ -18,6 +18,7 @@ import org.elasticsearch.search.DummyQueryParserPlugin; import org.elasticsearch.search.SearchService; import org.elasticsearch.test.ESSingleNodeTestCase; +import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.elasticsearch.xcontent.XContentParseException; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonXContent; @@ -32,7 +33,7 @@ import java.util.concurrent.ExecutionException; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -77,13 +78,13 @@ public void testSearchRequestFail() throws Exception { .get() ); - SearchTemplateResponse searchResponse = new SearchTemplateRequestBuilder(client()).setRequest(searchRequest) - .setScript(query) - .setScriptType(ScriptType.INLINE) - .setScriptParams(Collections.singletonMap("my_size", 1)) - .get(); - - assertThat(searchResponse.getResponse().getHits().getHits().length, equalTo(1)); + assertResponse( + new SearchTemplateRequestBuilder(client()).setRequest(searchRequest) + .setScript(query) + .setScriptType(ScriptType.INLINE) + .setScriptParams(Collections.singletonMap("my_size", 1)), + searchResponse -> assertThat(searchResponse.getResponse().getHits().getHits().length, equalTo(1)) + ); } /** @@ -101,8 +102,10 @@ public void testTemplateQueryAsEscapedString() throws Exception { }"""; SearchTemplateRequest request = SearchTemplateRequest.fromXContent(createParser(JsonXContent.jsonXContent, query)); request.setRequest(searchRequest); - SearchTemplateResponse searchResponse = client().execute(MustachePlugin.SEARCH_TEMPLATE_ACTION, request).get(); - assertThat(searchResponse.getResponse().getHits().getHits().length, equalTo(1)); + assertResponse( + client().execute(MustachePlugin.SEARCH_TEMPLATE_ACTION, request), + searchResponse -> assertThat(searchResponse.getResponse().getHits().getHits().length, equalTo(1)) + ); } /** @@ -122,8 +125,10 @@ public void testTemplateQueryAsEscapedStringStartingWithConditionalClause() thro }"""; SearchTemplateRequest request = SearchTemplateRequest.fromXContent(createParser(JsonXContent.jsonXContent, templateString)); request.setRequest(searchRequest); - SearchTemplateResponse searchResponse = client().execute(MustachePlugin.SEARCH_TEMPLATE_ACTION, request).get(); - assertThat(searchResponse.getResponse().getHits().getHits().length, equalTo(1)); + assertResponse( + client().execute(MustachePlugin.SEARCH_TEMPLATE_ACTION, request), + searchResponse -> assertThat(searchResponse.getResponse().getHits().getHits().length, equalTo(1)) + ); } /** @@ -143,8 +148,10 @@ public void testTemplateQueryAsEscapedStringWithConditionalClauseAtEnd() throws }"""; SearchTemplateRequest request = SearchTemplateRequest.fromXContent(createParser(JsonXContent.jsonXContent, templateString)); request.setRequest(searchRequest); - SearchTemplateResponse searchResponse = client().execute(MustachePlugin.SEARCH_TEMPLATE_ACTION, request).get(); - assertThat(searchResponse.getResponse().getHits().getHits().length, equalTo(1)); + assertResponse( + client().execute(MustachePlugin.SEARCH_TEMPLATE_ACTION, request), + searchResponse -> assertThat(searchResponse.getResponse().getHits().getHits().length, equalTo(1)) + ); } public void testIndexedTemplateClient() throws Exception { @@ -178,12 +185,13 @@ public void testIndexedTemplateClient() throws Exception { Map templateParams = new HashMap<>(); templateParams.put("fieldParam", "foo"); - SearchTemplateResponse searchResponse = new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest("test")) - .setScript("testTemplate") - .setScriptType(ScriptType.STORED) - .setScriptParams(templateParams) - .get(); - assertHitCount(searchResponse.getResponse(), 4); + assertHitCount( + new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest("test")) + .setScript("testTemplate") + .setScriptType(ScriptType.STORED) + .setScriptParams(templateParams), + 4 + ); assertAcked(clusterAdmin().prepareDeleteStoredScript("testTemplate")); @@ -275,13 +283,13 @@ public void testIndexedTemplate() throws Exception { Map templateParams = new HashMap<>(); templateParams.put("fieldParam", "foo"); - - SearchTemplateResponse searchResponse = new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest().indices("test")) - .setScript("1a") - .setScriptType(ScriptType.STORED) - .setScriptParams(templateParams) - .get(); - assertHitCount(searchResponse.getResponse(), 4); + assertHitCount( + new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest().indices("test")) + .setScript("1a") + .setScriptType(ScriptType.STORED) + .setScriptParams(templateParams), + 4 + ); expectThrows( ResourceNotFoundException.class, @@ -293,12 +301,13 @@ public void testIndexedTemplate() throws Exception { ); templateParams.put("fieldParam", "bar"); - searchResponse = new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest("test")) - .setScript("2") - .setScriptType(ScriptType.STORED) - .setScriptParams(templateParams) - .get(); - assertHitCount(searchResponse.getResponse(), 1); + assertHitCount( + new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest("test")) + .setScript("2") + .setScriptType(ScriptType.STORED) + .setScriptParams(templateParams), + 1 + ); } // Relates to #10397 @@ -354,13 +363,14 @@ public void testIndexedTemplateOverwrite() throws Exception { .setId("git01") .setContent(new BytesArray(query.replace("{{slop}}", Integer.toString(0))), XContentType.JSON) ); - - SearchTemplateResponse searchResponse = new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest("testindex")) - .setScript("git01") - .setScriptType(ScriptType.STORED) - .setScriptParams(templateParams) - .get(); - assertHitCount(searchResponse.getResponse(), 1); + assertHitCount( + new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest("testindex")) + .setScript("git01") + .setScript("git01") + .setScriptType(ScriptType.STORED) + .setScriptParams(templateParams), + 1 + ); } } @@ -397,12 +407,13 @@ public void testIndexedTemplateWithArray() throws Exception { String[] fieldParams = { "foo", "bar" }; arrayTemplateParams.put("fieldParam", fieldParams); - SearchTemplateResponse searchResponse = new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest("test")) - .setScript("4") - .setScriptType(ScriptType.STORED) - .setScriptParams(arrayTemplateParams) - .get(); - assertHitCount(searchResponse.getResponse(), 5); + assertHitCount( + new SearchTemplateRequestBuilder(client()).setRequest(new SearchRequest("test")) + .setScript("4") + .setScriptType(ScriptType.STORED) + .setScriptParams(arrayTemplateParams), + 5 + ); } /** @@ -438,4 +449,8 @@ public void testCCSCheckCompatibility() throws Exception { String actualCause = underlying.getMessage().replaceAll("\\d{7,}", "XXXXXXX"); assertEquals(expectedCause, actualCause); } + + public static void assertHitCount(SearchTemplateRequestBuilder requestBuilder, long expectedHitCount) { + assertResponse(requestBuilder, response -> ElasticsearchAssertions.assertHitCount(response.getResponse(), expectedHitCount)); + } } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java index a26352eb3d8c7..9bdabcede8ec6 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java @@ -18,8 +18,11 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.transport.LeakTracker; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; @@ -99,6 +102,20 @@ public String toString() { private final Item[] items; private final long tookInMillis; + private final RefCounted refCounted = LeakTracker.wrap(new AbstractRefCounted() { + @Override + protected void closeInternal() { + for (int i = 0; i < items.length; i++) { + Item item = items[i]; + var r = item.response; + if (r != null) { + r.decRef(); + items[i] = null; + } + } + } + }); + MultiSearchTemplateResponse(StreamInput in) throws IOException { super(in); items = in.readArray(Item::new, Item[]::new); @@ -162,6 +179,26 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par return builder; } + @Override + public void incRef() { + refCounted.incRef(); + } + + @Override + public boolean tryIncRef() { + return refCounted.tryIncRef(); + } + + @Override + public boolean decRef() { + return refCounted.decRef(); + } + + @Override + public boolean hasReferences() { + return refCounted.hasReferences(); + } + static final class Fields { static final String RESPONSES = "responses"; static final String STATUS = "status"; @@ -179,6 +216,7 @@ public static MultiSearchTemplateResponse fromXContext(XContentParser parser) { if (item.getResponse() != null) { stResponse = new SearchTemplateResponse(); stResponse.setResponse(item.getResponse()); + item.getResponse().incRef(); } templateResponses[i++] = new Item(stResponse, item.getFailure()); } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateResponse.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateResponse.java index b4b804bf22e92..9451ac089476e 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateResponse.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateResponse.java @@ -14,7 +14,10 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ChunkedToXContent; +import org.elasticsearch.core.AbstractRefCounted; +import org.elasticsearch.core.RefCounted; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.transport.LeakTracker; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; @@ -35,6 +38,15 @@ public class SearchTemplateResponse extends ActionResponse implements ToXContent /** Contains the search response, if any **/ private SearchResponse response; + private final RefCounted refCounted = LeakTracker.wrap(new AbstractRefCounted() { + @Override + protected void closeInternal() { + if (response != null) { + response.decRef(); + } + } + }); + SearchTemplateResponse() {} SearchTemplateResponse(StreamInput in) throws IOException { @@ -74,6 +86,26 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(response); } + @Override + public void incRef() { + refCounted.incRef(); + } + + @Override + public boolean tryIncRef() { + return refCounted.tryIncRef(); + } + + @Override + public boolean decRef() { + return refCounted.decRef(); + } + + @Override + public boolean hasReferences() { + return refCounted.hasReferences(); + } + public static SearchTemplateResponse fromXContent(XContentParser parser) throws IOException { SearchTemplateResponse searchTemplateResponse = new SearchTemplateResponse(); Map contentAsMap = parser.map(); @@ -85,11 +117,12 @@ public static SearchTemplateResponse fromXContent(XContentParser parser) throws } else { XContentType contentType = parser.contentType(); XContentBuilder builder = XContentFactory.contentBuilder(contentType).map(contentAsMap); - XContentParser searchResponseParser = contentType.xContent() - .createParser(parser.getXContentRegistry(), parser.getDeprecationHandler(), BytesReference.bytes(builder).streamInput()); - - SearchResponse searchResponse = SearchResponse.fromXContent(searchResponseParser); - searchTemplateResponse.setResponse(searchResponse); + try ( + XContentParser searchResponseParser = contentType.xContent() + .createParser(parser.getXContentRegistry(), parser.getDeprecationHandler(), BytesReference.bytes(builder).streamInput()) + ) { + searchTemplateResponse.setResponse(SearchResponse.fromXContent(searchResponseParser)); + } } return searchTemplateResponse; } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportMultiSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportMultiSearchTemplateAction.java index 4b0c365ba8b13..11871978e433a 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportMultiSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportMultiSearchTemplateAction.java @@ -80,6 +80,7 @@ protected void doExecute(Task task, MultiSearchTemplateRequest request, ActionLi try { searchRequest = convert(searchTemplateRequest, searchTemplateResponse, scriptService, xContentRegistry, searchUsageHolder); } catch (Exception e) { + searchTemplateResponse.decRef(); items[i] = new MultiSearchTemplateResponse.Item(null, e); if (ExceptionsHelper.status(e).getStatus() >= 500 && ExceptionsHelper.isNodeOrShardUnavailableTypeException(e) == false) { logger.warn("MultiSearchTemplate convert failure", e); @@ -98,12 +99,17 @@ protected void doExecute(Task task, MultiSearchTemplateRequest request, ActionLi MultiSearchResponse.Item item = r.getResponses()[i]; int originalSlot = originalSlots.get(i); if (item.isFailure()) { + var existing = items[originalSlot]; + if (existing.getResponse() != null) { + existing.getResponse().decRef(); + } items[originalSlot] = new MultiSearchTemplateResponse.Item(null, item.getFailure()); } else { items[originalSlot].getResponse().setResponse(item.getResponse()); + item.getResponse().incRef(); } } - l.onResponse(new MultiSearchTemplateResponse(items, r.getTook().millis())); + ActionListener.respondAndRelease(l, new MultiSearchTemplateResponse(items, r.getTook().millis())); })); } } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportSearchTemplateAction.java index 2b315f48dcce4..c6bd2afc64d21 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/TransportSearchTemplateAction.java @@ -71,18 +71,29 @@ public TransportSearchTemplateAction( @Override protected void doExecute(Task task, SearchTemplateRequest request, ActionListener listener) { final SearchTemplateResponse response = new SearchTemplateResponse(); + boolean success = false; try { SearchRequest searchRequest = convert(request, response, scriptService, xContentRegistry, searchUsageHolder); if (searchRequest != null) { - client.search(searchRequest, listener.delegateFailureAndWrap((l, searchResponse) -> { + client.search(searchRequest, listener.delegateResponse((l, e) -> { + response.decRef(); + l.onFailure(e); + }).delegateFailureAndWrap((l, searchResponse) -> { response.setResponse(searchResponse); - l.onResponse(response); + searchResponse.incRef(); + ActionListener.respondAndRelease(l, response); })); + success = true; } else { - listener.onResponse(response); + success = true; + ActionListener.respondAndRelease(listener, response); } } catch (IOException e) { listener.onFailure(e); + } finally { + if (success == false) { + response.decRef(); + } } } diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponseTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponseTests.java index 3db0d12216e54..03f2fbd3e81a7 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponseTests.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponseTests.java @@ -11,7 +11,8 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.Strings; -import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.core.RefCounted; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.test.AbstractXContentTestCase; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentParser; @@ -38,11 +39,9 @@ protected MultiSearchTemplateResponse createTestInstance() { int totalShards = randomIntBetween(1, Integer.MAX_VALUE); int successfulShards = randomIntBetween(0, totalShards); int skippedShards = totalShards - successfulShards; - InternalSearchResponse internalSearchResponse = InternalSearchResponse.EMPTY_WITH_TOTAL_HITS; SearchResponse.Clusters clusters = randomClusters(); SearchTemplateResponse searchTemplateResponse = new SearchTemplateResponse(); - SearchResponse searchResponse = new SearchResponse( - internalSearchResponse, + SearchResponse searchResponse = SearchResponseUtils.emptyWithTotalHits( null, totalShards, successfulShards, @@ -75,11 +74,9 @@ private static MultiSearchTemplateResponse createTestInstanceWithFailures() { int totalShards = randomIntBetween(1, Integer.MAX_VALUE); int successfulShards = randomIntBetween(0, totalShards); int skippedShards = totalShards - successfulShards; - InternalSearchResponse internalSearchResponse = InternalSearchResponse.EMPTY_WITH_TOTAL_HITS; SearchResponse.Clusters clusters = randomClusters(); SearchTemplateResponse searchTemplateResponse = new SearchTemplateResponse(); - SearchResponse searchResponse = new SearchResponse( - internalSearchResponse, + SearchResponse searchResponse = SearchResponseUtils.emptyWithTotalHits( null, totalShards, successfulShards, @@ -150,7 +147,13 @@ public void testFromXContentWithFailures() throws IOException { this::doParseInstance, this::assertEqualInstances, assertToXContentEquivalence, - ToXContent.EMPTY_PARAMS + ToXContent.EMPTY_PARAMS, + RefCounted::decRef ); } + + @Override + protected void dispose(MultiSearchTemplateResponse instance) { + instance.decRef(); + } } diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateResponseTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateResponseTests.java index d3f23d3f4a21c..73c8887669a02 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateResponseTests.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateResponseTests.java @@ -14,7 +14,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; -import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.test.AbstractXContentTestCase; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -54,10 +54,8 @@ private static SearchResponse createSearchResponse() { int totalShards = randomIntBetween(1, Integer.MAX_VALUE); int successfulShards = randomIntBetween(0, totalShards); int skippedShards = randomIntBetween(0, totalShards); - InternalSearchResponse internalSearchResponse = InternalSearchResponse.EMPTY_WITH_TOTAL_HITS; - return new SearchResponse( - internalSearchResponse, + return SearchResponseUtils.emptyWithTotalHits( null, totalShards, successfulShards, @@ -127,33 +125,36 @@ protected boolean supportsUnknownFields() { public void testSourceToXContent() throws IOException { SearchTemplateResponse response = new SearchTemplateResponse(); + try { + XContentBuilder source = XContentFactory.jsonBuilder() + .startObject() + .startObject("query") + .startObject("terms") + .field("status", new String[] { "pending", "published" }) + .endObject() + .endObject() + .endObject(); + response.setSource(BytesReference.bytes(source)); + + XContentType contentType = randomFrom(XContentType.values()); + XContentBuilder expectedResponse = XContentFactory.contentBuilder(contentType) + .startObject() + .startObject("template_output") + .startObject("query") + .startObject("terms") + .field("status", new String[] { "pending", "published" }) + .endObject() + .endObject() + .endObject() + .endObject(); - XContentBuilder source = XContentFactory.jsonBuilder() - .startObject() - .startObject("query") - .startObject("terms") - .field("status", new String[] { "pending", "published" }) - .endObject() - .endObject() - .endObject(); - response.setSource(BytesReference.bytes(source)); - - XContentType contentType = randomFrom(XContentType.values()); - XContentBuilder expectedResponse = XContentFactory.contentBuilder(contentType) - .startObject() - .startObject("template_output") - .startObject("query") - .startObject("terms") - .field("status", new String[] { "pending", "published" }) - .endObject() - .endObject() - .endObject() - .endObject(); - - XContentBuilder actualResponse = XContentFactory.contentBuilder(contentType); - response.toXContent(actualResponse, ToXContent.EMPTY_PARAMS); - - assertToXContentEquivalent(BytesReference.bytes(expectedResponse), BytesReference.bytes(actualResponse), contentType); + XContentBuilder actualResponse = XContentFactory.contentBuilder(contentType); + response.toXContent(actualResponse, ToXContent.EMPTY_PARAMS); + + assertToXContentEquivalent(BytesReference.bytes(expectedResponse), BytesReference.bytes(actualResponse), contentType); + } finally { + response.decRef(); + } } public void testSearchResponseToXContent() throws IOException { @@ -161,17 +162,14 @@ public void testSearchResponseToXContent() throws IOException { hit.score(2.0f); SearchHit[] hits = new SearchHit[] { hit }; - InternalSearchResponse internalSearchResponse = new InternalSearchResponse( + SearchResponse searchResponse = new SearchResponse( new SearchHits(hits, new TotalHits(100, TotalHits.Relation.EQUAL_TO), 1.5f), null, null, - null, false, null, - 1 - ); - SearchResponse searchResponse = new SearchResponse( - internalSearchResponse, + null, + 1, null, 0, 0, @@ -182,37 +180,46 @@ public void testSearchResponseToXContent() throws IOException { ); SearchTemplateResponse response = new SearchTemplateResponse(); - response.setResponse(searchResponse); - - XContentType contentType = randomFrom(XContentType.values()); - XContentBuilder expectedResponse = XContentFactory.contentBuilder(contentType) - .startObject() - .field("took", 0) - .field("timed_out", false) - .startObject("_shards") - .field("total", 0) - .field("successful", 0) - .field("skipped", 0) - .field("failed", 0) - .endObject() - .startObject("hits") - .startObject("total") - .field("value", 100) - .field("relation", "eq") - .endObject() - .field("max_score", 1.5F) - .startArray("hits") - .startObject() - .field("_id", "id") - .field("_score", 2.0F) - .endObject() - .endArray() - .endObject() - .endObject(); - - XContentBuilder actualResponse = XContentFactory.contentBuilder(contentType); - response.toXContent(actualResponse, ToXContent.EMPTY_PARAMS); - - assertToXContentEquivalent(BytesReference.bytes(expectedResponse), BytesReference.bytes(actualResponse), contentType); + try { + response.setResponse(searchResponse); + + XContentType contentType = randomFrom(XContentType.values()); + XContentBuilder expectedResponse = XContentFactory.contentBuilder(contentType) + .startObject() + .field("took", 0) + .field("timed_out", false) + .startObject("_shards") + .field("total", 0) + .field("successful", 0) + .field("skipped", 0) + .field("failed", 0) + .endObject() + .startObject("hits") + .startObject("total") + .field("value", 100) + .field("relation", "eq") + .endObject() + .field("max_score", 1.5F) + .startArray("hits") + .startObject() + .field("_id", "id") + .field("_score", 2.0F) + .endObject() + .endArray() + .endObject() + .endObject(); + + XContentBuilder actualResponse = XContentFactory.contentBuilder(contentType); + response.toXContent(actualResponse, ToXContent.EMPTY_PARAMS); + + assertToXContentEquivalent(BytesReference.bytes(expectedResponse), BytesReference.bytes(actualResponse), contentType); + } finally { + response.decRef(); + } + } + + @Override + protected void dispose(SearchTemplateResponse instance) { + instance.decRef(); } } diff --git a/modules/lang-painless/src/doc/java/org/elasticsearch/painless/ContextGeneratorCommon.java b/modules/lang-painless/src/doc/java/org/elasticsearch/painless/ContextGeneratorCommon.java index c54214e5f854d..b8390f6aab75c 100644 --- a/modules/lang-painless/src/doc/java/org/elasticsearch/painless/ContextGeneratorCommon.java +++ b/modules/lang-painless/src/doc/java/org/elasticsearch/painless/ContextGeneratorCommon.java @@ -34,15 +34,21 @@ public class ContextGeneratorCommon { @SuppressForbidden(reason = "retrieving data from an internal API not exposed as part of the REST client") + @SuppressWarnings("unchecked") public static List getContextInfos() throws IOException { URLConnection getContextNames = new URL("http://" + System.getProperty("cluster.uri") + "/_scripts/painless/_context") .openConnection(); - XContentParser parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, getContextNames.getInputStream()); - parser.nextToken(); - parser.nextToken(); - @SuppressWarnings("unchecked") - List contextNames = (List) (Object) parser.list(); - parser.close(); + List contextNames; + try ( + XContentParser parser = JsonXContent.jsonXContent.createParser( + XContentParserConfiguration.EMPTY, + getContextNames.getInputStream() + ) + ) { + parser.nextToken(); + parser.nextToken(); + contextNames = (List) (Object) parser.list(); + } ((HttpURLConnection) getContextNames).disconnect(); List contextInfos = new ArrayList<>(); @@ -51,9 +57,10 @@ public static List getContextInfos() throws IOException { URLConnection getContextInfo = new URL( "http://" + System.getProperty("cluster.uri") + "/_scripts/painless/_context?context=" + contextName ).openConnection(); - parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, getContextInfo.getInputStream()); - contextInfos.add(PainlessContextInfo.fromXContent(parser)); - ((HttpURLConnection) getContextInfo).disconnect(); + try (var parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, getContextInfo.getInputStream())) { + contextInfos.add(PainlessContextInfo.fromXContent(parser)); + ((HttpURLConnection) getContextInfo).disconnect(); + } } contextInfos.sort(Comparator.comparing(PainlessContextInfo::getName)); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/api/Json.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/api/Json.java index b2993d6169336..f121894cf4dc5 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/api/Json.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/api/Json.java @@ -20,16 +20,16 @@ public class Json { * Load a string as the Java version of a JSON type, either List (JSON array), Map (JSON object), Number, Boolean or String */ public static Object load(String json) throws IOException { - XContentParser parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, json); - - return switch (parser.nextToken()) { - case START_ARRAY -> parser.list(); - case START_OBJECT -> parser.map(); - case VALUE_NUMBER -> parser.numberValue(); - case VALUE_BOOLEAN -> parser.booleanValue(); - case VALUE_STRING -> parser.text(); - default -> null; - }; + try (XContentParser parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, json)) { + return switch (parser.nextToken()) { + case START_ARRAY -> parser.list(); + case START_OBJECT -> parser.map(); + case VALUE_NUMBER -> parser.numberValue(); + case VALUE_BOOLEAN -> parser.booleanValue(); + case VALUE_STRING -> parser.text(); + default -> null; + }; + } } /** diff --git a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoJsonShapeParserTests.java b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoJsonShapeParserTests.java index 5ef5eb6c0b5b8..4fa1d7b7a3108 100644 --- a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoJsonShapeParserTests.java +++ b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/GeoJsonShapeParserTests.java @@ -181,34 +181,37 @@ public void testParseMultiDimensionShapes() throws IOException { .endArray() .endObject(); - XContentParser parser = createParser(pointGeoJson); - parser.nextToken(); - ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class); - assertNull(parser.nextToken()); + XContentBuilder lineGeoJson; + try (XContentParser parser = createParser(pointGeoJson)) { + parser.nextToken(); + ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class); + assertNull(parser.nextToken()); - // multi dimension linestring - XContentBuilder lineGeoJson = XContentFactory.jsonBuilder() - .startObject() - .field("type", "LineString") - .startArray("coordinates") - .startArray() - .value(100.0) - .value(0.0) - .value(15.0) - .endArray() - .startArray() - .value(101.0) - .value(1.0) - .value(18.0) - .value(19.0) - .endArray() - .endArray() - .endObject(); + // multi dimension linestring + lineGeoJson = XContentFactory.jsonBuilder() + .startObject() + .field("type", "LineString") + .startArray("coordinates") + .startArray() + .value(100.0) + .value(0.0) + .value(15.0) + .endArray() + .startArray() + .value(101.0) + .value(1.0) + .value(18.0) + .value(19.0) + .endArray() + .endArray() + .endObject(); + } - parser = createParser(lineGeoJson); - parser.nextToken(); - ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class); - assertNull(parser.nextToken()); + try (var parser = createParser(lineGeoJson)) { + parser.nextToken(); + ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchParseException.class); + assertNull(parser.nextToken()); + } } @Override diff --git a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/search/LegacyGeoShapeQueryTests.java b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/search/LegacyGeoShapeQueryTests.java index 6ad4d2c06c6d4..4e06a37ec7f20 100644 --- a/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/search/LegacyGeoShapeQueryTests.java +++ b/modules/legacy-geo/src/test/java/org/elasticsearch/legacygeo/search/LegacyGeoShapeQueryTests.java @@ -99,7 +99,7 @@ public void testPointsOnlyExplicit() throws Exception { .get(); // test that point was inserted - assertHitCount(client().prepareSearch("geo_points_only").setQuery(matchAllQuery()).get(), 2L); + assertHitCount(client().prepareSearch("geo_points_only").setQuery(matchAllQuery()), 2L); } public void testPointsOnly() throws Exception { diff --git a/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/MatchOnlyTextMapperIT.java b/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/MatchOnlyTextMapperIT.java index ef082e8fba8fb..4a43b6afe3054 100644 --- a/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/MatchOnlyTextMapperIT.java +++ b/modules/mapper-extras/src/internalClusterTest/java/org/elasticsearch/index/mapper/MatchOnlyTextMapperIT.java @@ -55,7 +55,7 @@ public void testHighlightingWithMatchOnlyTextFieldMatchPhrase() throws IOExcepti .startObject() .field( "message", - "[.ds-.slm-history-6-2023.09.20-" + "[.ds-.slm-history-7-2023.09.20-" + randomInt() + "][0] marking and sending shard failed due to [failed recovery]" ) @@ -105,7 +105,7 @@ public void testHighlightingWithMatchOnlyTextFieldSyntheticSource() throws IOExc .startObject() .field( "message", - "[.ds-.slm-history-6-2023.09.20-" + "[.ds-.slm-history-7-2023.09.20-" + randomInt() + "][0] marking and sending shard failed due to [failed recovery]" ) diff --git a/modules/reindex/src/main/java/org/elasticsearch/reindex/Reindexer.java b/modules/reindex/src/main/java/org/elasticsearch/reindex/Reindexer.java index 4c3206e82b8d6..dbe1968bb076a 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/reindex/Reindexer.java +++ b/modules/reindex/src/main/java/org/elasticsearch/reindex/Reindexer.java @@ -36,6 +36,7 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; @@ -60,7 +61,6 @@ import org.elasticsearch.xcontent.XContentType; import java.io.IOException; -import java.io.InputStream; import java.io.UncheckedIOException; import java.util.ArrayList; import java.util.List; @@ -321,8 +321,11 @@ protected RequestWrapper buildRequest(ScrollableHitSource.Hit doc) if (mainRequestXContentType != null && doc.getXContentType() != mainRequestXContentType) { // we need to convert try ( - InputStream stream = doc.getSource().streamInput(); - XContentParser parser = sourceXContentType.xContent().createParser(XContentParserConfiguration.EMPTY, stream); + XContentParser parser = XContentHelper.createParserNotCompressed( + XContentParserConfiguration.EMPTY, + doc.getSource(), + sourceXContentType + ); XContentBuilder builder = XContentBuilder.builder(mainRequestXContentType.xContent()) ) { parser.nextToken(); diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java index 3d34e4df2c8c7..ed2e6ae8a62be 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java @@ -66,7 +66,6 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskManager; @@ -574,9 +573,14 @@ protected RequestWrapper buildRequest(Hit doc) { new TotalHits(0, TotalHits.Relation.EQUAL_TO), 0 ); - InternalSearchResponse internalResponse = new InternalSearchResponse(hits, null, null, null, false, false, 1); SearchResponse searchResponse = new SearchResponse( - internalResponse, + hits, + null, + null, + false, + false, + null, + 1, scrollId(), 5, 4, diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java index 0ad1867e75058..7ac50eb0e7c6c 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/ClientScrollableHitSourceTests.java @@ -30,7 +30,6 @@ import org.elasticsearch.index.reindex.ScrollableHitSource; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; @@ -127,6 +126,7 @@ private void dotestBasicsWithRetry(int retries, int minFailures, int maxFailures ++expectedSearchRetries; } + searchResponse.decRef(); searchResponse = createSearchResponse(); client.respond(TransportSearchScrollAction.TYPE, searchResponse); } @@ -168,9 +168,14 @@ private SearchResponse createSearchResponse() { new TotalHits(0, TotalHits.Relation.EQUAL_TO), 0 ); - InternalSearchResponse internalResponse = new InternalSearchResponse(hits, null, null, null, false, false, 1); return new SearchResponse( - internalResponse, + hits, + null, + null, + false, + false, + null, + 1, randomSimpleString(random(), 1, 10), 5, 4, diff --git a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiTokenizerFactory.java b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiTokenizerFactory.java index d662003530c22..65276c04bed56 100644 --- a/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiTokenizerFactory.java +++ b/plugins/analysis-kuromoji/src/main/java/org/elasticsearch/plugin/analysis/kuromoji/KuromojiTokenizerFactory.java @@ -12,7 +12,6 @@ import org.apache.lucene.analysis.ja.JapaneseTokenizer; import org.apache.lucene.analysis.ja.JapaneseTokenizer.Mode; import org.apache.lucene.analysis.ja.dict.UserDictionary; -import org.apache.lucene.analysis.util.CSVUtil; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; @@ -23,10 +22,8 @@ import java.io.IOException; import java.io.Reader; import java.io.StringReader; -import java.util.HashSet; import java.util.List; import java.util.Locale; -import java.util.Set; public class KuromojiTokenizerFactory extends AbstractTokenizerFactory { @@ -60,11 +57,10 @@ public static UserDictionary getUserDictionary(Environment env, Settings setting "It is not allowed to use [" + USER_DICT_PATH_OPTION + "] in conjunction" + " with [" + USER_DICT_RULES_OPTION + "]" ); } - List ruleList = Analysis.getWordList(env, settings, USER_DICT_PATH_OPTION, USER_DICT_RULES_OPTION, false); + List ruleList = Analysis.getWordList(env, settings, USER_DICT_PATH_OPTION, USER_DICT_RULES_OPTION, false, true); if (ruleList == null || ruleList.isEmpty()) { return null; } - validateDuplicatedWords(ruleList); StringBuilder sb = new StringBuilder(); for (String line : ruleList) { sb.append(line).append(System.lineSeparator()); @@ -76,23 +72,6 @@ public static UserDictionary getUserDictionary(Environment env, Settings setting } } - private static void validateDuplicatedWords(List ruleList) { - Set dup = new HashSet<>(); - int lineNum = 0; - for (String line : ruleList) { - // ignore comments - if (line.startsWith("#") == false) { - String[] values = CSVUtil.parse(line); - if (dup.add(values[0]) == false) { - throw new IllegalArgumentException( - "Found duplicate term [" + values[0] + "] in user dictionary " + "at line [" + lineNum + "]" - ); - } - } - ++lineNum; - } - } - public static JapaneseTokenizer.Mode getMode(Settings settings) { String modeSetting = settings.get("mode", JapaneseTokenizer.DEFAULT_MODE.name()); return JapaneseTokenizer.Mode.valueOf(modeSetting.toUpperCase(Locale.ENGLISH)); diff --git a/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriAnalyzerProvider.java b/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriAnalyzerProvider.java index f2949e45964a4..65c9bb9833177 100644 --- a/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriAnalyzerProvider.java +++ b/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriAnalyzerProvider.java @@ -30,7 +30,7 @@ public class NoriAnalyzerProvider extends AbstractIndexAnalyzerProvider tagList = Analysis.getWordList(env, settings, "stoptags"); final Set stopTags = tagList != null ? resolvePOSList(tagList) : KoreanPartOfSpeechStopFilter.DEFAULT_STOP_TAGS; analyzer = new KoreanAnalyzer(userDictionary, mode, stopTags, false); diff --git a/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriTokenizerFactory.java b/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriTokenizerFactory.java index c0be8322ade95..eedb4c2011af3 100644 --- a/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriTokenizerFactory.java +++ b/plugins/analysis-nori/src/main/java/org/elasticsearch/plugin/analysis/nori/NoriTokenizerFactory.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.analysis.AbstractTokenizerFactory; import org.elasticsearch.index.analysis.Analysis; @@ -24,6 +25,8 @@ import java.util.List; import java.util.Locale; +import static org.elasticsearch.index.IndexVersions.UPGRADE_LUCENE_9_9_1; + public class NoriTokenizerFactory extends AbstractTokenizerFactory { private static final String USER_DICT_PATH_OPTION = "user_dictionary"; private static final String USER_DICT_RULES_OPTION = "user_dictionary_rules"; @@ -35,17 +38,24 @@ public class NoriTokenizerFactory extends AbstractTokenizerFactory { public NoriTokenizerFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, settings, name); decompoundMode = getMode(settings); - userDictionary = getUserDictionary(env, settings); + userDictionary = getUserDictionary(env, settings, indexSettings); discardPunctuation = settings.getAsBoolean("discard_punctuation", true); } - public static UserDictionary getUserDictionary(Environment env, Settings settings) { + public static UserDictionary getUserDictionary(Environment env, Settings settings, IndexSettings indexSettings) { if (settings.get(USER_DICT_PATH_OPTION) != null && settings.get(USER_DICT_RULES_OPTION) != null) { throw new IllegalArgumentException( "It is not allowed to use [" + USER_DICT_PATH_OPTION + "] in conjunction" + " with [" + USER_DICT_RULES_OPTION + "]" ); } - List ruleList = Analysis.getWordList(env, settings, USER_DICT_PATH_OPTION, USER_DICT_RULES_OPTION, true); + List ruleList = Analysis.getWordList( + env, + settings, + USER_DICT_PATH_OPTION, + USER_DICT_RULES_OPTION, + true, + isSupportDuplicateCheck(indexSettings) + ); if (ruleList == null || ruleList.isEmpty()) { return null; } @@ -60,6 +70,24 @@ public static UserDictionary getUserDictionary(Environment env, Settings setting } } + /** + * Determines if the specified index version supports duplicate checks. + * This method checks if the version of the index where it was created + * is at Version 8.13.0 or above. + * The feature of duplicate checks is introduced starting + * from version 8.13.0, hence any versions earlier than this do not support duplicate checks. + * + * @param indexSettings The settings of the index in question. + * @return Returns true if the version is 8.13.0 or later which means + * that the duplicate check feature is supported. + */ + private static boolean isSupportDuplicateCheck(IndexSettings indexSettings) { + var idxVersion = indexSettings.getIndexVersionCreated(); + // Explicitly exclude the range of versions greater than NORI_DUPLICATES, that + // are also in 8.12. The only version in this range is UPGRADE_LUCENE_9_9_1. + return idxVersion.onOrAfter(IndexVersions.NORI_DUPLICATES) && idxVersion != UPGRADE_LUCENE_9_9_1; + } + public static KoreanTokenizer.DecompoundMode getMode(Settings settings) { String modeSetting = settings.get("decompound_mode", KoreanTokenizer.DEFAULT_DECOMPOUND.name()); return KoreanTokenizer.DecompoundMode.valueOf(modeSetting.toUpperCase(Locale.ENGLISH)); diff --git a/plugins/analysis-nori/src/test/java/org/elasticsearch/plugin/analysis/nori/NoriAnalysisTests.java b/plugins/analysis-nori/src/test/java/org/elasticsearch/plugin/analysis/nori/NoriAnalysisTests.java index e091813184472..642ed19c520d7 100644 --- a/plugins/analysis-nori/src/test/java/org/elasticsearch/plugin/analysis/nori/NoriAnalysisTests.java +++ b/plugins/analysis-nori/src/test/java/org/elasticsearch/plugin/analysis/nori/NoriAnalysisTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.IndexVersions; import org.elasticsearch.index.analysis.AnalysisTestsHelper; import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.index.analysis.NamedAnalyzer; @@ -117,6 +118,31 @@ public void testNoriAnalyzerInvalidUserDictOption() throws Exception { ); } + public void testNoriAnalyzerDuplicateUserDictRule() throws Exception { + Settings settings = Settings.builder() + .put("index.analysis.analyzer.my_analyzer.type", "nori") + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersions.NORI_DUPLICATES) + .putList("index.analysis.analyzer.my_analyzer.user_dictionary_rules", "c++", "C쁠쁠", "세종", "세종", "세종시 세종 시") + .build(); + + final IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> createTestAnalysis(settings)); + assertThat(exc.getMessage(), containsString("[세종] in user dictionary at line [3]")); + } + + public void testNoriAnalyzerDuplicateUserDictRuleWithLegacyVersion() throws IOException { + Settings settings = Settings.builder() + .put("index.analysis.analyzer.my_analyzer.type", "nori") + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersions.V_8_10_0) + .putList("index.analysis.analyzer.my_analyzer.user_dictionary_rules", "c++", "C쁠쁠", "세종", "세종", "세종시 세종 시") + .build(); + + final TestAnalysis analysis = createTestAnalysis(settings); + Analyzer analyzer = analysis.indexAnalyzers.get("my_analyzer"); + try (TokenStream stream = analyzer.tokenStream("", "세종")) { + assertTokenStreamContents(stream, new String[] { "세종" }); + } + } + public void testNoriTokenizer() throws Exception { Settings settings = Settings.builder() .put("index.analysis.tokenizer.my_tokenizer.type", "nori_tokenizer") diff --git a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java index b818de468ea2c..88740edffc09a 100644 --- a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java +++ b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/CcsCommonYamlTestSuiteIT.java @@ -16,7 +16,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.tests.util.TimeUnits; -import org.elasticsearch.Version; import org.elasticsearch.client.NodeSelector; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -25,12 +24,12 @@ import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.common.Strings; import org.elasticsearch.core.IOUtils; -import org.elasticsearch.core.Tuple; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.FeatureFlag; import org.elasticsearch.test.cluster.local.LocalClusterConfigProvider; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.ObjectPath; +import org.elasticsearch.test.rest.TestFeatureService; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestApi; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestSpec; import org.elasticsearch.test.rest.yaml.section.ClientYamlTestSection; @@ -46,12 +45,15 @@ import org.junit.rules.TestRule; import java.io.IOException; +import java.io.UncheckedIOException; import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.function.BiPredicate; -import java.util.function.Predicate; +import java.util.stream.Collectors; +import java.util.stream.Stream; import static java.util.Collections.unmodifiableList; @@ -159,19 +161,7 @@ public void initSearchClient() throws IOException { searchClient = buildClient(restClientSettings(), clusterHosts.toArray(new HttpHost[clusterHosts.size()])); adminSearchClient = buildClient(restAdminSettings(), clusterHosts.toArray(new HttpHost[clusterHosts.size()])); - Tuple versionVersionTuple = readVersionsFromCatNodes(adminSearchClient); - final Version esVersion = versionVersionTuple.v1(); - final String os = readOsFromNodesInfo(adminSearchClient); - - searchYamlTestClient = new TestCandidateAwareClient( - getRestSpec(), - searchClient, - hosts, - esVersion, - ESRestTestCase::clusterHasFeature, - os, - this::getClientBuilderWithSniffedHosts - ); + searchYamlTestClient = new TestCandidateAwareClient(getRestSpec(), searchClient, hosts, this::getClientBuilderWithSniffedHosts); // check that we have an established CCS connection Request request = new Request("GET", "_remote/info"); @@ -298,18 +288,56 @@ public static Iterable parameters() throws Exception { @Override protected ClientYamlTestExecutionContext createRestTestExecutionContext( ClientYamlTestCandidate clientYamlTestCandidate, - ClientYamlTestClient clientYamlTestClient + ClientYamlTestClient clientYamlTestClient, + final Set nodesVersions, + final TestFeatureService testFeatureService, + final Set osSet ) { - // depending on the API called, we either return the client running against the "write" or the "search" cluster here - return new ClientYamlTestExecutionContext(clientYamlTestCandidate, clientYamlTestClient, randomizeContentType()) { - protected ClientYamlTestClient clientYamlTestClient(String apiName) { - if (CCS_APIS.contains(apiName)) { - return searchYamlTestClient; - } else { - return super.clientYamlTestClient(apiName); + try { + // Ensure the test specific initialization is run by calling it explicitly (@Before annotations on base-derived class may + // be called in a different order) + initSearchClient(); + // Reconcile and provide unified features, os, version(s), based on both clientYamlTestClient and searchYamlTestClient + var searchOs = readOsFromNodesInfo(adminSearchClient); + var searchNodeVersions = readVersionsFromNodesInfo(adminSearchClient); + var semanticNodeVersions = searchNodeVersions.stream() + .map(ESRestTestCase::parseLegacyVersion) + .flatMap(Optional::stream) + .collect(Collectors.toSet()); + final TestFeatureService searchTestFeatureService = createTestFeatureService( + getClusterStateFeatures(adminSearchClient), + semanticNodeVersions + ); + final TestFeatureService combinedTestFeatureService = new TestFeatureService() { + @Override + public boolean clusterHasFeature(String featureId) { + return testFeatureService.clusterHasFeature(featureId) && searchTestFeatureService.clusterHasFeature(featureId); } - } - }; + }; + final Set combinedOsSet = Stream.concat(osSet.stream(), Stream.of(searchOs)).collect(Collectors.toSet()); + final Set combinedNodeVersions = Stream.concat(nodesVersions.stream(), searchNodeVersions.stream()) + .collect(Collectors.toSet()); + + return new ClientYamlTestExecutionContext( + clientYamlTestCandidate, + clientYamlTestClient, + randomizeContentType(), + combinedNodeVersions, + combinedTestFeatureService, + combinedOsSet + ) { + // depending on the API called, we either return the client running against the "write" or the "search" cluster here + protected ClientYamlTestClient clientYamlTestClient(String apiName) { + if (CCS_APIS.contains(apiName)) { + return searchYamlTestClient; + } else { + return super.clientYamlTestClient(apiName); + } + } + }; + } catch (IOException e) { + throw new UncheckedIOException(e); + } } @AfterClass @@ -328,12 +356,9 @@ static class TestCandidateAwareClient extends ClientYamlTestClient { ClientYamlSuiteRestSpec restSpec, RestClient restClient, List hosts, - Version esVersion, - Predicate clusterFeaturesPredicate, - String os, CheckedSupplier clientBuilderWithSniffedNodes ) { - super(restSpec, restClient, hosts, esVersion, clusterFeaturesPredicate, os, clientBuilderWithSniffedNodes); + super(restSpec, restClient, hosts, clientBuilderWithSniffedNodes); } public void setTestCandidate(ClientYamlTestCandidate testCandidate) { diff --git a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/RcsCcsCommonYamlTestSuiteIT.java b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/RcsCcsCommonYamlTestSuiteIT.java index 51d499db61932..a331d6f54cb4a 100644 --- a/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/RcsCcsCommonYamlTestSuiteIT.java +++ b/qa/ccs-common-rest/src/yamlRestTest/java/org/elasticsearch/test/rest/yaml/RcsCcsCommonYamlTestSuiteIT.java @@ -15,7 +15,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.tests.util.TimeUnits; -import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; @@ -26,13 +25,13 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.IOUtils; -import org.elasticsearch.core.Tuple; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.FeatureFlag; import org.elasticsearch.test.cluster.local.LocalClusterConfigProvider; import org.elasticsearch.test.cluster.util.resource.Resource; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.ObjectPath; +import org.elasticsearch.test.rest.TestFeatureService; import org.elasticsearch.test.rest.yaml.CcsCommonYamlTestSuiteIT.TestCandidateAwareClient; import org.junit.AfterClass; import org.junit.Before; @@ -45,7 +44,11 @@ import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.Optional; +import java.util.Set; import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; +import java.util.stream.Stream; import static java.util.Collections.unmodifiableList; import static org.elasticsearch.test.rest.yaml.CcsCommonYamlTestSuiteIT.CCS_APIS; @@ -221,19 +224,7 @@ public void initSearchClient() throws IOException { clusterHosts.toArray(new HttpHost[clusterHosts.size()]) ); - Tuple versionVersionTuple = readVersionsFromCatNodes(adminSearchClient); - final Version esVersion = versionVersionTuple.v1(); - final String os = readOsFromNodesInfo(adminSearchClient); - - searchYamlTestClient = new TestCandidateAwareClient( - getRestSpec(), - searchClient, - hosts, - esVersion, - ESRestTestCase::clusterHasFeature, - os, - this::getClientBuilderWithSniffedHosts - ); + searchYamlTestClient = new TestCandidateAwareClient(getRestSpec(), searchClient, hosts, this::getClientBuilderWithSniffedHosts); configureRemoteCluster(); // check that we have an established CCS connection @@ -282,18 +273,56 @@ public static Iterable parameters() throws Exception { @Override protected ClientYamlTestExecutionContext createRestTestExecutionContext( ClientYamlTestCandidate clientYamlTestCandidate, - ClientYamlTestClient clientYamlTestClient + ClientYamlTestClient clientYamlTestClient, + final Set nodesVersions, + final TestFeatureService testFeatureService, + final Set osSet ) { - // depending on the API called, we either return the client running against the "write" or the "search" cluster here - return new ClientYamlTestExecutionContext(clientYamlTestCandidate, clientYamlTestClient, randomizeContentType()) { - protected ClientYamlTestClient clientYamlTestClient(String apiName) { - if (CCS_APIS.contains(apiName)) { - return searchYamlTestClient; - } else { - return super.clientYamlTestClient(apiName); + try { + // Ensure the test specific initialization is run by calling it explicitly (@Before annotations on base-derived class may + // be called in a different order) + initSearchClient(); + // Reconcile and provide unified features, os, version(s), based on both clientYamlTestClient and searchYamlTestClient + var searchOs = readOsFromNodesInfo(adminSearchClient); + var searchNodeVersions = readVersionsFromNodesInfo(adminSearchClient); + var semanticNodeVersions = searchNodeVersions.stream() + .map(ESRestTestCase::parseLegacyVersion) + .flatMap(Optional::stream) + .collect(Collectors.toSet()); + final TestFeatureService searchTestFeatureService = createTestFeatureService( + getClusterStateFeatures(adminSearchClient), + semanticNodeVersions + ); + final TestFeatureService combinedTestFeatureService = new TestFeatureService() { + @Override + public boolean clusterHasFeature(String featureId) { + return testFeatureService.clusterHasFeature(featureId) && searchTestFeatureService.clusterHasFeature(featureId); } - } - }; + }; + final Set combinedOsSet = Stream.concat(osSet.stream(), Stream.of(searchOs)).collect(Collectors.toSet()); + final Set combinedNodeVersions = Stream.concat(nodesVersions.stream(), searchNodeVersions.stream()) + .collect(Collectors.toSet()); + + return new ClientYamlTestExecutionContext( + clientYamlTestCandidate, + clientYamlTestClient, + randomizeContentType(), + combinedNodeVersions, + combinedTestFeatureService, + combinedOsSet + ) { + // depending on the API called, we either return the client running against the "write" or the "search" cluster here + protected ClientYamlTestClient clientYamlTestClient(String apiName) { + if (CCS_APIS.contains(apiName)) { + return searchYamlTestClient; + } else { + return super.clientYamlTestClient(apiName); + } + } + }; + } catch (IOException e) { + throw new UncheckedIOException(e); + } } @AfterClass diff --git a/qa/ccs-rolling-upgrade-remote-cluster/build.gradle b/qa/ccs-rolling-upgrade-remote-cluster/build.gradle index ff28b1213079d..caa48db634f46 100644 --- a/qa/ccs-rolling-upgrade-remote-cluster/build.gradle +++ b/qa/ccs-rolling-upgrade-remote-cluster/build.gradle @@ -6,6 +6,7 @@ * Side Public License, v 1. */ +import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask @@ -32,6 +33,7 @@ BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> versions = [bwcVersion.toString(), project.version] setting 'cluster.remote.node.attr', 'gateway' setting 'xpack.security.enabled', 'false' + requiresFeature 'es.failure_store_feature_flag_enabled', new Version(8, 12, 0) } def remoteCluster = testClusters.register("${baseName}-remote") { numberOfNodes = 3 @@ -39,6 +41,7 @@ BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> firstNode.setting 'node.attr.gateway', 'true' lastNode.setting 'node.attr.gateway', 'true' setting 'xpack.security.enabled', 'false' + requiresFeature 'es.failure_store_feature_flag_enabled', new Version(8, 12, 0) } diff --git a/qa/ccs-unavailable-clusters/src/javaRestTest/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java b/qa/ccs-unavailable-clusters/src/javaRestTest/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java index b17b81b6ac188..45aed866dc086 100644 --- a/qa/ccs-unavailable-clusters/src/javaRestTest/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java +++ b/qa/ccs-unavailable-clusters/src/javaRestTest/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java @@ -37,7 +37,6 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.ObjectPath; import org.elasticsearch.test.transport.MockTransportService; @@ -92,18 +91,15 @@ private static MockTransportService startTransport( TransportSearchAction.TYPE.name(), EsExecutors.DIRECT_EXECUTOR_SERVICE, SearchRequest::new, - (request, channel, task) -> { - InternalSearchResponse response = new InternalSearchResponse( + (request, channel, task) -> channel.sendResponse( + new SearchResponse( new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), Float.NaN), InternalAggregations.EMPTY, null, - null, false, null, - 1 - ); - SearchResponse searchResponse = new SearchResponse( - response, + null, + 1, null, 1, 1, @@ -111,9 +107,8 @@ private static MockTransportService startTransport( 100, ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY - ); - channel.sendResponse(searchResponse); - } + ) + ) ); newService.registerRequestHandler( ClusterStateAction.NAME, diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 47f7bb488d83d..16209a73826ca 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -8,6 +8,8 @@ package org.elasticsearch.upgrades; +import io.netty.handler.codec.http.HttpMethod; + import com.carrotsearch.randomizedtesting.annotations.Name; import org.apache.http.util.EntityUtils; @@ -42,8 +44,10 @@ import org.elasticsearch.test.rest.ObjectPath; import org.elasticsearch.test.rest.RestTestLegacyFeatures; import org.elasticsearch.transport.Compression; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonXContent; import org.hamcrest.Matchers; import org.junit.Before; @@ -137,15 +141,12 @@ public void setIndex() { public void testSearch() throws Exception { int count; if (isRunningAgainstOldCluster()) { - XContentBuilder mappingsAndSettings = jsonBuilder(); - mappingsAndSettings.startObject(); - { + final var createIndex = newXContentRequest(HttpMethod.PUT, "/" + index, (mappingsAndSettings, params) -> { mappingsAndSettings.startObject("settings"); mappingsAndSettings.field("number_of_shards", 1); mappingsAndSettings.field("number_of_replicas", 0); mappingsAndSettings.endObject(); - } - { + mappingsAndSettings.startObject("mappings"); mappingsAndSettings.startObject("properties"); { @@ -166,11 +167,8 @@ public void testSearch() throws Exception { } mappingsAndSettings.endObject(); mappingsAndSettings.endObject(); - } - mappingsAndSettings.endObject(); - - Request createIndex = new Request("PUT", "/" + index); - createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); + return mappingsAndSettings; + }); client().performRequest(createIndex); count = randomIntBetween(2000, 3000); @@ -207,15 +205,12 @@ public void testSearch() throws Exception { public void testNewReplicas() throws Exception { if (isRunningAgainstOldCluster()) { - XContentBuilder mappingsAndSettings = jsonBuilder(); - mappingsAndSettings.startObject(); - { + final var createIndex = newXContentRequest(HttpMethod.PUT, "/" + index, (mappingsAndSettings, params) -> { mappingsAndSettings.startObject("settings"); mappingsAndSettings.field("number_of_shards", 1); mappingsAndSettings.field("number_of_replicas", 0); mappingsAndSettings.endObject(); - } - { + mappingsAndSettings.startObject("mappings"); mappingsAndSettings.startObject("properties"); { @@ -225,11 +220,8 @@ public void testNewReplicas() throws Exception { } mappingsAndSettings.endObject(); mappingsAndSettings.endObject(); - } - mappingsAndSettings.endObject(); - - Request createIndex = new Request("PUT", "/" + index); - createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); + return mappingsAndSettings; + }); client().performRequest(createIndex); int numDocs = randomIntBetween(2000, 3000); @@ -247,8 +239,11 @@ public void testNewReplicas() throws Exception { final int numReplicas = 1; final long startTime = System.currentTimeMillis(); logger.debug("--> creating [{}] replicas for index [{}]", numReplicas, index); - Request setNumberOfReplicas = new Request("PUT", "/" + index + "/_settings"); - setNumberOfReplicas.setJsonEntity("{ \"index\": { \"number_of_replicas\" : " + numReplicas + " }}"); + Request setNumberOfReplicas = newXContentRequest( + HttpMethod.PUT, + "/" + index + "/_settings", + (builder, params) -> builder.startObject("index").field("number_of_replicas", numReplicas).endObject() + ); client().performRequest(setNumberOfReplicas); ensureGreenLongWait(index); @@ -282,30 +277,26 @@ public void testSearchTimeSeriesMode() throws Exception { numDocs = countOfIndexedRandomDocuments(); } assertCountAll(numDocs); - Request request = new Request("GET", "/" + index + "/_search"); - XContentBuilder body = jsonBuilder().startObject(); - body.field("size", 0); - body.startObject("aggs").startObject("check").startObject("scripted_metric"); - { + Request request = newXContentRequest(HttpMethod.GET, "/" + index + "/_search", (body, params) -> { + body.field("size", 0); + body.startObject("aggs").startObject("check").startObject("scripted_metric"); body.field("init_script", "state.timeSeries = new HashSet()"); body.field("map_script", "state.timeSeries.add(doc['dim'].value)"); body.field("combine_script", "return state.timeSeries"); - StringBuilder reduceScript = new StringBuilder(); - reduceScript.append("Set timeSeries = new TreeSet();"); - reduceScript.append("for (s in states) {"); - reduceScript.append(" for (ts in s) {"); - reduceScript.append(" boolean newTs = timeSeries.add(ts);"); - reduceScript.append(" if (false == newTs) {"); - reduceScript.append(" throw new IllegalArgumentException(ts + ' appeared in two shards');"); - reduceScript.append(" }"); - reduceScript.append(" }"); - reduceScript.append("}"); - reduceScript.append("return timeSeries;"); - body.field("reduce_script", reduceScript.toString()); - } - body.endObject().endObject().endObject(); - body.endObject(); - request.setJsonEntity(Strings.toString(body)); + body.field("reduce_script", """ + Set timeSeries = new TreeSet(); + for (s in states) { + for (ts in s) { + boolean newTs = timeSeries.add(ts); + if (false == newTs) { + throw new IllegalArgumentException(ts + ' appeared in two shards'); + } + } + } + return timeSeries;"""); + body.endObject().endObject().endObject(); + return body; + }); Map response = entityAsMap(client().performRequest(request)); assertMap( response, @@ -326,8 +317,11 @@ public void testNewReplicasTimeSeriesMode() throws Exception { final int numReplicas = 1; final long startTime = System.currentTimeMillis(); logger.debug("--> creating [{}] replicas for index [{}]", numReplicas, index); - Request setNumberOfReplicas = new Request("PUT", "/" + index + "/_settings"); - setNumberOfReplicas.setJsonEntity("{ \"index\": { \"number_of_replicas\" : " + numReplicas + " }}"); + Request setNumberOfReplicas = newXContentRequest( + HttpMethod.PUT, + "/" + index + "/_settings", + (builder, params) -> builder.startObject("index").field("number_of_replicas", numReplicas).endObject() + ); client().performRequest(setNumberOfReplicas); ensureGreenLongWait(index); @@ -350,9 +344,7 @@ public void testNewReplicasTimeSeriesMode() throws Exception { } private int createTimeSeriesModeIndex(int replicas) throws IOException { - XContentBuilder mappingsAndSettings = jsonBuilder(); - mappingsAndSettings.startObject(); - { + final var createIndex = newXContentRequest(HttpMethod.PUT, "/" + index, (mappingsAndSettings, params) -> { mappingsAndSettings.startObject("settings"); mappingsAndSettings.field("number_of_shards", 1); mappingsAndSettings.field("number_of_replicas", replicas); @@ -361,8 +353,7 @@ private int createTimeSeriesModeIndex(int replicas) throws IOException { mappingsAndSettings.field("time_series.start_time", 1L); mappingsAndSettings.field("time_series.end_time", DateUtils.MAX_MILLIS_BEFORE_9999 - 1); mappingsAndSettings.endObject(); - } - { + mappingsAndSettings.startObject("mappings"); mappingsAndSettings.startObject("properties"); { @@ -371,11 +362,8 @@ private int createTimeSeriesModeIndex(int replicas) throws IOException { } mappingsAndSettings.endObject(); mappingsAndSettings.endObject(); - } - mappingsAndSettings.endObject(); - - Request createIndex = new Request("PUT", "/" + index); - createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); + return mappingsAndSettings; + }); client().performRequest(createIndex); int numDocs = randomIntBetween(2000, 3000); @@ -398,19 +386,15 @@ private int createTimeSeriesModeIndex(int replicas) throws IOException { public void testClusterState() throws Exception { if (isRunningAgainstOldCluster()) { - XContentBuilder mappingsAndSettings = jsonBuilder(); - mappingsAndSettings.startObject(); - mappingsAndSettings.field("index_patterns", index); - mappingsAndSettings.field("order", "1000"); - { + final Request createTemplate = newXContentRequest(HttpMethod.PUT, "/_template/template_1", (mappingsAndSettings, params) -> { + mappingsAndSettings.field("index_patterns", index); + mappingsAndSettings.field("order", "1000"); mappingsAndSettings.startObject("settings"); mappingsAndSettings.field("number_of_shards", 1); mappingsAndSettings.field("number_of_replicas", 0); mappingsAndSettings.endObject(); - } - mappingsAndSettings.endObject(); - Request createTemplate = new Request("PUT", "/_template/template_1"); - createTemplate.setJsonEntity(Strings.toString(mappingsAndSettings)); + return mappingsAndSettings; + }); createTemplate.setOptions(expectWarnings(RestPutIndexTemplateAction.DEPRECATION_WARNING)); client().performRequest(createTemplate); client().performRequest(new Request("PUT", "/" + index)); @@ -455,9 +439,7 @@ public void testShrink() throws IOException { String shrunkenIndex = index + "_shrunk"; int numDocs; if (isRunningAgainstOldCluster()) { - XContentBuilder mappingsAndSettings = jsonBuilder(); - mappingsAndSettings.startObject(); - { + final var createIndex = newXContentRequest(HttpMethod.PUT, "/" + index, (mappingsAndSettings, params) -> { mappingsAndSettings.startObject("mappings"); { mappingsAndSettings.startObject("properties"); @@ -477,11 +459,8 @@ public void testShrink() throws IOException { mappingsAndSettings.field("index.number_of_shards", 5); } mappingsAndSettings.endObject(); - } - mappingsAndSettings.endObject(); - - Request createIndex = new Request("PUT", "/" + index); - createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); + return mappingsAndSettings; + }); client().performRequest(createIndex); numDocs = randomIntBetween(512, 1024); @@ -495,14 +474,21 @@ public void testShrink() throws IOException { ensureGreen(index); // wait for source index to be available on both nodes before starting shrink - Request updateSettingsRequest = new Request("PUT", "/" + index + "/_settings"); - updateSettingsRequest.setJsonEntity("{\"settings\": {\"index.blocks.write\": true}}"); - client().performRequest(updateSettingsRequest); - - Request shrinkIndexRequest = new Request("PUT", "/" + index + "/_shrink/" + shrunkenIndex); + client().performRequest( + newXContentRequest( + HttpMethod.PUT, + "/" + index + "/_settings", + (builder, params) -> builder.startObject("settings").field("index.blocks.write", true).endObject() + ) + ); - shrinkIndexRequest.setJsonEntity("{\"settings\": {\"index.number_of_shards\": 1}}"); - client().performRequest(shrinkIndexRequest); + client().performRequest( + newXContentRequest( + HttpMethod.PUT, + "/" + index + "/_shrink/" + shrunkenIndex, + (builder, params) -> builder.startObject("settings").field("index.number_of_shards", 1).endObject() + ) + ); refreshAllIndices(); } else { @@ -532,9 +518,7 @@ public void testShrinkAfterUpgrade() throws IOException { String shrunkenIndex = index + "_shrunk"; int numDocs; if (isRunningAgainstOldCluster()) { - XContentBuilder mappingsAndSettings = jsonBuilder(); - mappingsAndSettings.startObject(); - { + final var createIndex = newXContentRequest(HttpMethod.PUT, "/" + index, (mappingsAndSettings, params) -> { mappingsAndSettings.startObject("mappings"); { mappingsAndSettings.startObject("properties"); @@ -552,11 +536,8 @@ public void testShrinkAfterUpgrade() throws IOException { mappingsAndSettings.startObject("settings"); mappingsAndSettings.field("index.number_of_shards", 5); mappingsAndSettings.endObject(); - } - mappingsAndSettings.endObject(); - - Request createIndex = new Request("PUT", "/" + index); - createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); + return mappingsAndSettings; + }); client().performRequest(createIndex); numDocs = randomIntBetween(512, 1024); @@ -570,13 +551,21 @@ public void testShrinkAfterUpgrade() throws IOException { } else { ensureGreen(index); // wait for source index to be available on both nodes before starting shrink - Request updateSettingsRequest = new Request("PUT", "/" + index + "/_settings"); - updateSettingsRequest.setJsonEntity("{\"settings\": {\"index.blocks.write\": true}}"); - client().performRequest(updateSettingsRequest); + client().performRequest( + newXContentRequest( + HttpMethod.PUT, + "/" + index + "/_settings", + (builder, params) -> builder.startObject("settings").field("index.blocks.write", true).endObject() + ) + ); - Request shrinkIndexRequest = new Request("PUT", "/" + index + "/_shrink/" + shrunkenIndex); - shrinkIndexRequest.setJsonEntity("{\"settings\": {\"index.number_of_shards\": 1}}"); - client().performRequest(shrinkIndexRequest); + client().performRequest( + newXContentRequest( + HttpMethod.PUT, + "/" + index + "/_shrink/" + shrunkenIndex, + (builder, params) -> builder.startObject("settings").field("index.number_of_shards", 1).endObject() + ) + ); numDocs = countOfIndexedRandomDocuments(); } @@ -618,14 +607,13 @@ public void testShrinkAfterUpgrade() throws IOException { */ public void testRollover() throws IOException { if (isRunningAgainstOldCluster()) { - Request createIndex = new Request("PUT", "/" + index + "-000001"); - createIndex.setJsonEntity(Strings.format(""" - { - "aliases": { - "%s_write": {} - } - }""", index)); - client().performRequest(createIndex); + client().performRequest( + newXContentRequest( + HttpMethod.PUT, + "/" + index + "-000001", + (builder, params) -> builder.startObject("aliases").startObject(index + "_write").endObject().endObject() + ) + ); } int bulkCount = 10; @@ -641,10 +629,13 @@ public void testRollover() throws IOException { assertThat(EntityUtils.toString(client().performRequest(bulkRequest).getEntity()), containsString("\"errors\":false")); if (isRunningAgainstOldCluster()) { - Request rolloverRequest = new Request("POST", "/" + index + "_write/_rollover"); - rolloverRequest.setJsonEntity(""" - { "conditions": { "max_docs": 5 }}"""); - client().performRequest(rolloverRequest); + client().performRequest( + newXContentRequest( + HttpMethod.POST, + "/" + index + "_write/_rollover", + (builder, params) -> builder.startObject("conditions").field("max_docs", 5).endObject() + ) + ); assertThat( EntityUtils.toString(client().performRequest(new Request("GET", "/_cat/indices?v")).getEntity()), @@ -677,30 +668,53 @@ void assertBasicSearchWorks(int count) throws IOException { logger.info("--> testing basic search with sort"); { - Request searchRequest = new Request("GET", "/" + index + "/_search"); - searchRequest.setJsonEntity(""" - { "sort": [{ "int" : "asc" }]}"""); - Map response = entityAsMap(client().performRequest(searchRequest)); + Map response = entityAsMap( + client().performRequest( + newXContentRequest( + HttpMethod.GET, + "/" + index + "/_search", + (builder, params) -> builder.startArray("sort").startObject().field("int", "asc").endObject().endArray() + ) + ) + ); assertNoFailures(response); assertTotalHits(count, response); } logger.info("--> testing exists filter"); { - Request searchRequest = new Request("GET", "/" + index + "/_search"); - searchRequest.setJsonEntity(""" - { "query": { "exists" : {"field": "string"} }}"""); - Map response = entityAsMap(client().performRequest(searchRequest)); + Map response = entityAsMap( + client().performRequest( + newXContentRequest( + HttpMethod.GET, + "/" + index + "/_search", + (builder, params) -> builder.startObject("query") + .startObject("exists") + .field("field", "string") + .endObject() + .endObject() + ) + ) + ); assertNoFailures(response); assertTotalHits(count, response); } logger.info("--> testing field with dots in the name"); { - Request searchRequest = new Request("GET", "/" + index + "/_search"); - searchRequest.setJsonEntity(""" - { "query": { "exists" : {"field": "field.with.dots"} }}"""); - Map response = entityAsMap(client().performRequest(searchRequest)); + Map response = entityAsMap( + client().performRequest( + newXContentRequest( + HttpMethod.GET, + "/" + index + "/_search", + (builder, params) -> builder.startObject("query") + .startObject("exists") + .field("field", "field.with.dots") + .endObject() + .endObject() + ) + ) + ); assertNoFailures(response); assertTotalHits(count, response); } @@ -719,14 +733,23 @@ void assertAllSearchWorks(int count) throws IOException { assertNotNull(stringValue); String id = (String) bestHit.get("_id"); - Request explainRequest = new Request("GET", "/" + index + "/_explain/" + id); - explainRequest.setJsonEntity("{ \"query\": { \"match_all\" : {} }}"); - String explanation = toStr(client().performRequest(explainRequest)); + String explanation = toStr( + client().performRequest( + newXContentRequest( + HttpMethod.GET, + "/" + index + "/_explain/" + id, + (builder, params) -> builder.startObject("query").startObject("match_all").endObject().endObject() + ) + ) + ); assertFalse("Could not find payload boost in explanation\n" + explanation, explanation.contains("payloadBoost")); // Make sure the query can run on the whole index - Request searchRequest = new Request("GET", "/" + index + "/_search"); - searchRequest.setEntity(explainRequest.getEntity()); + Request searchRequest = newXContentRequest( + HttpMethod.GET, + "/" + index + "/_search", + (builder, params) -> builder.startObject("query").startObject("match_all").endObject().endObject() + ); searchRequest.addParameter("explain", "true"); Map matchAllResponse = entityAsMap(client().performRequest(searchRequest)); assertNoFailures(matchAllResponse); @@ -735,19 +758,22 @@ void assertAllSearchWorks(int count) throws IOException { void assertBasicAggregationWorks() throws IOException { // histogram on a long - Request longHistogramRequest = new Request("GET", "/" + index + "/_search"); - longHistogramRequest.setJsonEntity(""" - { - "aggs": { - "histo": { - "histogram": { - "field": "int", - "interval": 10 - } - } - } - }"""); - Map longHistogram = entityAsMap(client().performRequest(longHistogramRequest)); + Map longHistogram = entityAsMap( + client().performRequest( + newXContentRequest( + HttpMethod.GET, + "/" + index + "/_search", + (builder, params) -> builder.startObject("aggs") + .startObject("histo") + .startObject("histogram") + .field("field", "int") + .field("interval", 10) + .endObject() + .endObject() + .endObject() + ) + ) + ); assertNoFailures(longHistogram); List histoBuckets = (List) XContentMapValues.extractValue("aggregations.histo.buckets", longHistogram); int histoCount = 0; @@ -758,18 +784,21 @@ void assertBasicAggregationWorks() throws IOException { assertTotalHits(histoCount, longHistogram); // terms on a boolean - Request boolTermsRequest = new Request("GET", "/" + index + "/_search"); - boolTermsRequest.setJsonEntity(""" - { - "aggs": { - "bool_terms": { - "terms": { - "field": "bool" - } - } - } - }"""); - Map boolTerms = entityAsMap(client().performRequest(boolTermsRequest)); + Map boolTerms = entityAsMap( + client().performRequest( + newXContentRequest( + HttpMethod.GET, + "/" + index + "/_search", + (builder, params) -> builder.startObject("aggs") + .startObject("bool_terms") + .startObject("terms") + .field("field", "bool") + .endObject() + .endObject() + .endObject() + ) + ) + ); List termsBuckets = (List) XContentMapValues.extractValue("aggregations.bool_terms.buckets", boolTerms); int termsCount = 0; for (Object entry : termsBuckets) { @@ -780,22 +809,33 @@ void assertBasicAggregationWorks() throws IOException { } void assertRealtimeGetWorks() throws IOException { - Request disableAutoRefresh = new Request("PUT", "/" + index + "/_settings"); - disableAutoRefresh.setJsonEntity(""" - { "index": { "refresh_interval" : -1 }}"""); - client().performRequest(disableAutoRefresh); - - Request searchRequest = new Request("GET", "/" + index + "/_search"); - searchRequest.setJsonEntity(""" - { "query": { "match_all" : {} }}"""); - Map searchResponse = entityAsMap(client().performRequest(searchRequest)); + client().performRequest( + newXContentRequest( + HttpMethod.PUT, + "/" + index + "/_settings", + (builder, params) -> builder.startObject("index").field("refresh_interval", -1).endObject() + ) + ); + + Map searchResponse = entityAsMap( + client().performRequest( + newXContentRequest( + HttpMethod.GET, + "/" + index + "/_search", + (builder, params) -> builder.startObject("query").startObject("match_all").endObject().endObject() + ) + ) + ); Map hit = (Map) ((List) (XContentMapValues.extractValue("hits.hits", searchResponse))).get(0); String docId = (String) hit.get("_id"); - Request updateRequest = new Request("POST", "/" + index + "/_update/" + docId); - updateRequest.setJsonEntity(""" - { "doc" : { "foo": "bar"}}"""); - client().performRequest(updateRequest); + client().performRequest( + newXContentRequest( + HttpMethod.POST, + "/" + index + "/_update/" + docId, + (builder, params) -> builder.startObject("doc").field("foo", "bar").endObject() + ) + ); Request getRequest = new Request("GET", "/" + index + "/_doc/" + docId); @@ -803,23 +843,29 @@ void assertRealtimeGetWorks() throws IOException { Map source = (Map) getRsp.get("_source"); assertTrue("doc does not contain 'foo' key: " + source, source.containsKey("foo")); - Request enableAutoRefresh = new Request("PUT", "/" + index + "/_settings"); - enableAutoRefresh.setJsonEntity(""" - { "index": { "refresh_interval" : "1s" }}"""); - client().performRequest(enableAutoRefresh); + client().performRequest( + newXContentRequest( + HttpMethod.PUT, + "/" + index + "/_settings", + (builder, params) -> builder.startObject("index").field("refresh_interval", "1s").endObject() + ) + ); } void assertStoredBinaryFields(int count) throws Exception { - Request request = new Request("GET", "/" + index + "/_search"); - request.setJsonEntity(""" - { - "query": { - "match_all": {} - }, - "size": 100, - "stored_fields": "binary" - }"""); - Map rsp = entityAsMap(client().performRequest(request)); + final var restResponse = client().performRequest( + newXContentRequest( + HttpMethod.GET, + "/" + index + "/_search", + (builder, params) -> builder.startObject("query") + .startObject("match_all") + .endObject() + .endObject() + .field("size", 100) + .field("stored_fields", "binary") + ) + ); + Map rsp = entityAsMap(restResponse); assertTotalHits(count, rsp); List hits = (List) XContentMapValues.extractValue("hits.hits", rsp); @@ -828,9 +874,11 @@ void assertStoredBinaryFields(int count) throws Exception { Map hitRsp = (Map) hit; List values = (List) XContentMapValues.extractValue("fields.binary", hitRsp); assertEquals(1, values.size()); - String value = (String) values.get(0); - byte[] binaryValue = Base64.getDecoder().decode(value); - assertEquals("Unexpected string length [" + value + "]", 16, binaryValue.length); + byte[] binaryValue = switch (XContentType.fromMediaType(restResponse.getEntity().getContentType().getValue())) { + case JSON, VND_JSON -> Base64.getDecoder().decode((String) values.get(0)); + case SMILE, CBOR, YAML, VND_SMILE, VND_CBOR, VND_YAML -> (byte[]) values.get(0); + }; + assertEquals("Unexpected binary length [" + Base64.getEncoder().encodeToString(binaryValue) + "]", 16, binaryValue.length); } } @@ -969,76 +1017,80 @@ public void testSnapshotRestore() throws IOException { assertTotalHits(count, countResponse); // Stick a routing attribute into to cluster settings so we can see it after the restore - Request addRoutingSettings = new Request("PUT", "/_cluster/settings"); - addRoutingSettings.setJsonEntity(Strings.format(""" - {"persistent": {"cluster.routing.allocation.exclude.test_attr": "%s"}} - """, getOldClusterVersion())); - client().performRequest(addRoutingSettings); + client().performRequest( + newXContentRequest( + HttpMethod.PUT, + "/_cluster/settings", + (builder, params) -> builder.startObject("persistent") + .field("cluster.routing.allocation.exclude.test_attr", getOldClusterVersion()) + .endObject() + ) + ); // Stick a template into the cluster so we can see it after the restore - XContentBuilder templateBuilder = JsonXContent.contentBuilder().startObject(); - templateBuilder.field("index_patterns", "evil_*"); // Don't confuse other tests by applying the template - templateBuilder.startObject("settings"); - { - templateBuilder.field("number_of_shards", 1); - } - templateBuilder.endObject(); - templateBuilder.startObject("mappings"); - { + Request createTemplateRequest = newXContentRequest(HttpMethod.PUT, "/_template/test_template", (templateBuilder, params) -> { + templateBuilder.field("index_patterns", "evil_*"); // Don't confuse other tests by applying the template + templateBuilder.startObject("settings"); + { + templateBuilder.field("number_of_shards", 1); + } + templateBuilder.endObject(); + templateBuilder.startObject("mappings"); { - templateBuilder.startObject("_source"); { - templateBuilder.field("enabled", true); + templateBuilder.startObject("_source"); + { + templateBuilder.field("enabled", true); + } + templateBuilder.endObject(); } - templateBuilder.endObject(); } - } - templateBuilder.endObject(); - templateBuilder.startObject("aliases"); - { - templateBuilder.startObject("alias1").endObject(); - templateBuilder.startObject("alias2"); + templateBuilder.endObject(); + templateBuilder.startObject("aliases"); { - templateBuilder.startObject("filter"); + templateBuilder.startObject("alias1").endObject(); + templateBuilder.startObject("alias2"); { - templateBuilder.startObject("term"); + templateBuilder.startObject("filter"); { - templateBuilder.field("version", isRunningAgainstOldCluster() ? getOldClusterVersion() : Build.current().version()); + templateBuilder.startObject("term"); + { + templateBuilder.field( + "version", + isRunningAgainstOldCluster() ? getOldClusterVersion() : Build.current().version() + ); + } + templateBuilder.endObject(); } templateBuilder.endObject(); } templateBuilder.endObject(); } templateBuilder.endObject(); - } - templateBuilder.endObject().endObject(); - Request createTemplateRequest = new Request("PUT", "/_template/test_template"); - createTemplateRequest.setJsonEntity(Strings.toString(templateBuilder)); + return templateBuilder; + }); createTemplateRequest.setOptions(expectWarnings(RestPutIndexTemplateAction.DEPRECATION_WARNING)); - client().performRequest(createTemplateRequest); if (isRunningAgainstOldCluster()) { // Create the repo - XContentBuilder repoConfig = JsonXContent.contentBuilder().startObject(); - { + client().performRequest(newXContentRequest(HttpMethod.PUT, "/_snapshot/repo", (repoConfig, params) -> { repoConfig.field("type", "fs"); repoConfig.startObject("settings"); { repoConfig.field("compress", randomBoolean()); repoConfig.field("location", repoDirectory.getRoot().getPath()); } - repoConfig.endObject(); - } - repoConfig.endObject(); - Request createRepoRequest = new Request("PUT", "/_snapshot/repo"); - createRepoRequest.setJsonEntity(Strings.toString(repoConfig)); - client().performRequest(createRepoRequest); + return repoConfig.endObject(); + })); } - Request createSnapshot = new Request("PUT", "/_snapshot/repo/" + (isRunningAgainstOldCluster() ? "old_snap" : "new_snap")); + Request createSnapshot = newXContentRequest( + HttpMethod.PUT, + "/_snapshot/repo/" + (isRunningAgainstOldCluster() ? "old_snap" : "new_snap"), + (builder, params) -> builder.field("indices", index) + ); createSnapshot.addParameter("wait_for_completion", "true"); - createSnapshot.setJsonEntity("{\"indices\": \"" + index + "\"}"); client().performRequest(createSnapshot); checkSnapshot("old_snap", count, getOldClusterVersion(), getOldClusterIndexVersion()); @@ -1049,18 +1101,13 @@ public void testSnapshotRestore() throws IOException { public void testHistoryUUIDIsAdded() throws Exception { if (isRunningAgainstOldCluster()) { - XContentBuilder mappingsAndSettings = jsonBuilder(); - mappingsAndSettings.startObject(); - { + client().performRequest(newXContentRequest(HttpMethod.PUT, '/' + index, (mappingsAndSettings, params) -> { mappingsAndSettings.startObject("settings"); mappingsAndSettings.field("number_of_shards", 1); mappingsAndSettings.field("number_of_replicas", 1); mappingsAndSettings.endObject(); - } - mappingsAndSettings.endObject(); - Request createIndex = new Request("PUT", "/" + index); - createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); - client().performRequest(createIndex); + return mappingsAndSettings; + })); } else { ensureGreenLongWait(index); @@ -1092,9 +1139,7 @@ public void testHistoryUUIDIsAdded() throws Exception { public void testSoftDeletes() throws Exception { if (isRunningAgainstOldCluster()) { - XContentBuilder mappingsAndSettings = jsonBuilder(); - mappingsAndSettings.startObject(); - { + client().performRequest(newXContentRequest(HttpMethod.PUT, "/" + index, (mappingsAndSettings, params) -> { mappingsAndSettings.startObject("settings"); mappingsAndSettings.field("number_of_shards", 1); mappingsAndSettings.field("number_of_replicas", 1); @@ -1102,17 +1147,13 @@ public void testSoftDeletes() throws Exception { mappingsAndSettings.field("soft_deletes.enabled", true); } mappingsAndSettings.endObject(); - } - mappingsAndSettings.endObject(); - Request createIndex = new Request("PUT", "/" + index); - createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); - client().performRequest(createIndex); + return mappingsAndSettings; + })); int numDocs = between(10, 100); for (int i = 0; i < numDocs; i++) { - String doc = Strings.toString(JsonXContent.contentBuilder().startObject().field("field", "v1").endObject()); - Request request = new Request("POST", "/" + index + "/_doc/" + i); - request.setJsonEntity(doc); - client().performRequest(request); + client().performRequest( + newXContentRequest(HttpMethod.POST, "/" + index + "/_doc/" + i, (builder, params) -> builder.field("field", "v1")) + ); refreshAllIndices(); } client().performRequest(new Request("POST", "/" + index + "/_flush")); @@ -1120,10 +1161,9 @@ public void testSoftDeletes() throws Exception { assertTotalHits(liveDocs, entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search")))); for (int i = 0; i < numDocs; i++) { if (randomBoolean()) { - String doc = Strings.toString(JsonXContent.contentBuilder().startObject().field("field", "v2").endObject()); - Request request = new Request("POST", "/" + index + "/_doc/" + i); - request.setJsonEntity(doc); - client().performRequest(request); + client().performRequest( + newXContentRequest(HttpMethod.POST, "/" + index + "/_doc/" + i, (builder, params) -> builder.field("field", "v2")) + ); } else if (randomBoolean()) { client().performRequest(new Request("DELETE", "/" + index + "/_doc/" + i)); liveDocs--; @@ -1151,9 +1191,15 @@ public void testClosedIndices() throws Exception { if (randomBoolean()) { numDocs = between(1, 100); for (int i = 0; i < numDocs; i++) { - final Request request = new Request("POST", "/" + index + "/_doc/" + i); - request.setJsonEntity(Strings.toString(JsonXContent.contentBuilder().startObject().field("field", "v1").endObject())); - assertOK(client().performRequest(request)); + assertOK( + client().performRequest( + newXContentRequest( + HttpMethod.POST, + "/" + index + "/_doc/" + i, + (builder, params) -> builder.field("field", "v1") + ) + ) + ); if (rarely()) { refreshAllIndices(); } @@ -1252,22 +1298,29 @@ private void checkSnapshot(String snapshotName, int count, String tookOnVersion, ); // Remove the routing setting and template so we can test restoring them. - Request clearRoutingFromSettings = new Request("PUT", "/_cluster/settings"); - clearRoutingFromSettings.setJsonEntity(""" - {"persistent":{"cluster.routing.allocation.exclude.test_attr": null}}"""); - client().performRequest(clearRoutingFromSettings); + client().performRequest( + newXContentRequest( + HttpMethod.PUT, + "/_cluster/settings", + (builder, params) -> builder.startObject("persistent").nullField("cluster.routing.allocation.exclude.test_attr").endObject() + ) + ); + client().performRequest(new Request("DELETE", "/_template/test_template")); // Restore - XContentBuilder restoreCommand = JsonXContent.contentBuilder().startObject(); - restoreCommand.field("include_global_state", true); - restoreCommand.field("indices", index); - restoreCommand.field("rename_pattern", index); - restoreCommand.field("rename_replacement", "restored_" + index); - restoreCommand.endObject(); - Request restoreRequest = new Request("POST", "/_snapshot/repo/" + snapshotName + "/_restore"); + Request restoreRequest = newXContentRequest( + HttpMethod.POST, + "/_snapshot/repo/" + snapshotName + "/_restore", + (restoreCommand, params) -> { + restoreCommand.field("include_global_state", true); + restoreCommand.field("indices", index); + restoreCommand.field("rename_pattern", index); + restoreCommand.field("rename_replacement", "restored_" + index); + return restoreCommand; + } + ); restoreRequest.addParameter("wait_for_completion", "true"); - restoreRequest.setJsonEntity(Strings.toString(restoreCommand)); client().performRequest(restoreRequest); // Make sure search finds all documents @@ -1361,9 +1414,8 @@ private void indexRandomDocuments( } private void indexDocument(String id) throws IOException { - final Request indexRequest = new Request("POST", "/" + index + "/" + "_doc/" + id); - indexRequest.setJsonEntity(Strings.toString(JsonXContent.contentBuilder().startObject().field("f", "v").endObject())); - assertOK(client().performRequest(indexRequest)); + final var req = newXContentRequest(HttpMethod.POST, "/" + index + "/" + "_doc/" + id, (builder, params) -> builder.field("f", "v")); + assertOK(client().performRequest(req)); } private int countOfIndexedRandomDocuments() throws IOException { @@ -1371,13 +1423,9 @@ private int countOfIndexedRandomDocuments() throws IOException { } private void saveInfoDocument(String id, String value) throws IOException { - XContentBuilder infoDoc = JsonXContent.contentBuilder().startObject(); - infoDoc.field("value", value); - infoDoc.endObject(); // Only create the first version so we know how many documents are created when the index is first created - Request request = new Request("PUT", "/info/_doc/" + id); + Request request = newXContentRequest(HttpMethod.PUT, "/info/_doc/" + id, (builder, params) -> builder.field("value", value)); request.addParameter("op_type", "create"); - request.setJsonEntity(Strings.toString(infoDoc)); client().performRequest(request); } @@ -1422,19 +1470,13 @@ protected void ensureGreenLongWait(String indexName) throws IOException { public void testPeerRecoveryRetentionLeases() throws Exception { if (isRunningAgainstOldCluster()) { - XContentBuilder settings = jsonBuilder(); - settings.startObject(); - { + client().performRequest(newXContentRequest(HttpMethod.PUT, "/" + index, (settings, params) -> { settings.startObject("settings"); settings.field("number_of_shards", between(1, 5)); settings.field("number_of_replicas", between(0, 1)); settings.endObject(); - } - settings.endObject(); - - Request createIndex = new Request("PUT", "/" + index); - createIndex.setJsonEntity(Strings.toString(settings)); - client().performRequest(createIndex); + return settings; + })); } ensureGreen(index); ensurePeerRecoveryRetentionLeasesRenewedAndSynced(index); @@ -1536,18 +1578,19 @@ public void testResize() throws Exception { flush(index, randomBoolean()); } } - Request updateSettingsRequest = new Request("PUT", "/" + index + "/_settings"); - updateSettingsRequest.setJsonEntity("{\"settings\": {\"index.blocks.write\": true}}"); - client().performRequest(updateSettingsRequest); + final ToXContent settings0 = (builder, params) -> builder.startObject("settings").field("index.blocks.write", true).endObject(); + client().performRequest(newXContentRequest(HttpMethod.PUT, "/" + index + "/_settings", settings0)); { final String target = index + "_shrunken"; - Request shrinkRequest = new Request("PUT", "/" + index + "/_shrink/" + target); Settings.Builder settings = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1); if (randomBoolean()) { settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true); } - shrinkRequest.setJsonEntity("{\"settings\":" + Strings.toString(settings.build()) + "}"); - client().performRequest(shrinkRequest); + client().performRequest(newXContentRequest(HttpMethod.PUT, "/" + index + "/_shrink/" + target, (builder, params) -> { + builder.startObject("settings"); + settings.build().toXContent(builder, params); + return builder.endObject(); + })); ensureGreenLongWait(target); assertNumHits(target, numDocs + moreDocs, 1); } @@ -1557,9 +1600,11 @@ public void testResize() throws Exception { if (randomBoolean()) { settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true); } - Request splitRequest = new Request("PUT", "/" + index + "/_split/" + target); - splitRequest.setJsonEntity("{\"settings\":" + Strings.toString(settings.build()) + "}"); - client().performRequest(splitRequest); + client().performRequest(newXContentRequest(HttpMethod.PUT, "/" + index + "/_split/" + target, (builder, params) -> { + builder.startObject("settings"); + settings.build().toXContent(builder, params); + return builder.endObject(); + })); ensureGreenLongWait(target); assertNumHits(target, numDocs + moreDocs, 6); } @@ -1584,9 +1629,13 @@ public void testSystemIndexMetadataIsUpgraded() throws Exception { + "access to system indices will be prevented by default"; if (isRunningAgainstOldCluster()) { // create index - Request createTestIndex = new Request("PUT", "/test_index_old"); - createTestIndex.setJsonEntity("{\"settings\": {\"index.number_of_replicas\": 0}}"); - client().performRequest(createTestIndex); + client().performRequest( + newXContentRequest( + HttpMethod.PUT, + "/test_index_old", + (builder, params) -> builder.startObject("settings").field("index.number_of_replicas", 0).endObject() + ) + ); Request bulk = new Request("POST", "/_bulk"); bulk.addParameter("refresh", "true"); @@ -1597,16 +1646,16 @@ public void testSystemIndexMetadataIsUpgraded() throws Exception { client().performRequest(bulk); // start a async reindex job - Request reindex = new Request("POST", "/_reindex"); - reindex.setJsonEntity(""" - { - "source":{ - "index":"test_index_old" - }, - "dest":{ - "index":"test_index_reindex" - } - }"""); + Request reindex = newXContentRequest( + HttpMethod.POST, + "/_reindex", + (builder, params) -> builder.startObject("source") + .field("index", "test_index_old") + .endObject() + .startObject("dest") + .field("index", "test_index_reindex") + .endObject() + ); reindex.addParameter("wait_for_completion", "false"); Map response = entityAsMap(client().performRequest(reindex)); String taskId = (String) response.get("task"); @@ -1640,14 +1689,18 @@ public void testSystemIndexMetadataIsUpgraded() throws Exception { // upgraded properly. If we're already on 8.x, skip this part of the test. if (clusterHasFeature(RestTestLegacyFeatures.SYSTEM_INDICES_REST_ACCESS_ENFORCED) == false) { // Create an alias to make sure it gets upgraded properly - Request putAliasRequest = new Request("POST", "/_aliases"); - putAliasRequest.setJsonEntity(""" - { - "actions": [ - {"add": {"index": ".tasks", "alias": "test-system-alias"}}, - {"add": {"index": "test_index_reindex", "alias": "test-system-alias"}} - ] - }"""); + Request putAliasRequest = newXContentRequest(HttpMethod.POST, "/_aliases", (builder, params) -> { + builder.startArray("actions"); + for (var index : List.of(".tasks", "test_index_reindex")) { + builder.startObject() + .startObject("add") + .field("index", index) + .field("alias", "test-system-alias") + .endObject() + .endObject(); + } + return builder.endArray(); + }); putAliasRequest.setOptions(expectVersionSpecificWarnings(v -> { v.current(systemIndexWarning); v.compatible(systemIndexWarning); @@ -1711,41 +1764,37 @@ public void testEnableSoftDeletesOnRestore() throws Exception { i -> jsonBuilder().startObject().field("field", "value").endObject() ); // create repo - XContentBuilder repoConfig = JsonXContent.contentBuilder().startObject(); - { + client().performRequest(newXContentRequest(HttpMethod.PUT, "/_snapshot/repo", (repoConfig, params) -> { repoConfig.field("type", "fs"); repoConfig.startObject("settings"); - { - repoConfig.field("compress", randomBoolean()); - repoConfig.field("location", repoDirectory.getRoot().getPath()); - } + repoConfig.field("compress", randomBoolean()); + repoConfig.field("location", repoDirectory.getRoot().getPath()); repoConfig.endObject(); - } - repoConfig.endObject(); - Request createRepoRequest = new Request("PUT", "/_snapshot/repo"); - createRepoRequest.setJsonEntity(Strings.toString(repoConfig)); - client().performRequest(createRepoRequest); + return repoConfig; + })); // create snapshot - Request createSnapshot = new Request("PUT", "/_snapshot/repo/" + snapshot); + Request createSnapshot = newXContentRequest( + HttpMethod.PUT, + "/_snapshot/repo/" + snapshot, + (builder, params) -> builder.field("indices", index) + ); createSnapshot.addParameter("wait_for_completion", "true"); - createSnapshot.setJsonEntity("{\"indices\": \"" + index + "\"}"); client().performRequest(createSnapshot); } else { String restoredIndex = "restored-" + index; // Restore - XContentBuilder restoreCommand = JsonXContent.contentBuilder().startObject(); - restoreCommand.field("indices", index); - restoreCommand.field("rename_pattern", index); - restoreCommand.field("rename_replacement", restoredIndex); - restoreCommand.startObject("index_settings"); - { - restoreCommand.field("index.soft_deletes.enabled", true); - } - restoreCommand.endObject(); - restoreCommand.endObject(); - Request restoreRequest = new Request("POST", "/_snapshot/repo/" + snapshot + "/_restore"); + Request restoreRequest = newXContentRequest( + HttpMethod.POST, + "/_snapshot/repo/" + snapshot + "/_restore", + (restoreCommand, params) -> { + restoreCommand.field("indices", index); + restoreCommand.field("rename_pattern", index); + restoreCommand.field("rename_replacement", restoredIndex); + restoreCommand.startObject("index_settings").field("index.soft_deletes.enabled", true).endObject(); + return restoreCommand; + } + ); restoreRequest.addParameter("wait_for_completion", "true"); - restoreRequest.setJsonEntity(Strings.toString(restoreCommand)); client().performRequest(restoreRequest); ensureGreen(restoredIndex); int numDocs = countOfIndexedRandomDocuments(); @@ -1768,40 +1817,36 @@ public void testForbidDisableSoftDeletesOnRestore() throws Exception { i -> jsonBuilder().startObject().field("field", "value").endObject() ); // create repo - XContentBuilder repoConfig = JsonXContent.contentBuilder().startObject(); - { + client().performRequest(newXContentRequest(HttpMethod.PUT, "/_snapshot/repo", (repoConfig, params) -> { repoConfig.field("type", "fs"); repoConfig.startObject("settings"); - { - repoConfig.field("compress", randomBoolean()); - repoConfig.field("location", repoDirectory.getRoot().getPath()); - } + repoConfig.field("compress", randomBoolean()); + repoConfig.field("location", repoDirectory.getRoot().getPath()); repoConfig.endObject(); - } - repoConfig.endObject(); - Request createRepoRequest = new Request("PUT", "/_snapshot/repo"); - createRepoRequest.setJsonEntity(Strings.toString(repoConfig)); - client().performRequest(createRepoRequest); + return repoConfig; + })); // create snapshot - Request createSnapshot = new Request("PUT", "/_snapshot/repo/" + snapshot); + Request createSnapshot = newXContentRequest( + HttpMethod.PUT, + "/_snapshot/repo/" + snapshot, + (builder, params) -> builder.field("indices", index) + ); createSnapshot.addParameter("wait_for_completion", "true"); - createSnapshot.setJsonEntity("{\"indices\": \"" + index + "\"}"); client().performRequest(createSnapshot); } else { // Restore - XContentBuilder restoreCommand = JsonXContent.contentBuilder().startObject(); - restoreCommand.field("indices", index); - restoreCommand.field("rename_pattern", index); - restoreCommand.field("rename_replacement", "restored-" + index); - restoreCommand.startObject("index_settings"); - { - restoreCommand.field("index.soft_deletes.enabled", false); - } - restoreCommand.endObject(); - restoreCommand.endObject(); - Request restoreRequest = new Request("POST", "/_snapshot/repo/" + snapshot + "/_restore"); + Request restoreRequest = newXContentRequest( + HttpMethod.POST, + "/_snapshot/repo/" + snapshot + "/_restore", + (restoreCommand, params) -> { + restoreCommand.field("indices", index); + restoreCommand.field("rename_pattern", index); + restoreCommand.field("rename_replacement", "restored-" + index); + restoreCommand.startObject("index_settings").field("index.soft_deletes.enabled", false).endObject(); + return restoreCommand; + } + ); restoreRequest.addParameter("wait_for_completion", "true"); - restoreRequest.setJsonEntity(Strings.toString(restoreCommand)); final ResponseException error = expectThrows(ResponseException.class, () -> client().performRequest(restoreRequest)); assertThat(error.getMessage(), containsString("cannot disable setting [index.soft_deletes.enabled] on restore")); } @@ -1818,21 +1863,16 @@ public void testTransportCompressionSetting() throws IOException { .orElse(false); assumeTrue("the old transport.compress setting existed before 7.14", originalClusterCompressSettingIsBoolean); if (isRunningAgainstOldCluster()) { - final Request putSettingsRequest = new Request("PUT", "/_cluster/settings"); - try (XContentBuilder builder = jsonBuilder()) { - builder.startObject(); - { - builder.startObject("persistent"); - { - builder.field("cluster.remote.foo.seeds", Collections.singletonList("localhost:9200")); - builder.field("cluster.remote.foo.transport.compress", "true"); - } - builder.endObject(); - } - builder.endObject(); - putSettingsRequest.setJsonEntity(Strings.toString(builder)); - } - client().performRequest(putSettingsRequest); + client().performRequest( + newXContentRequest( + HttpMethod.PUT, + "/_cluster/settings", + (builder, params) -> builder.startObject("persistent") + .field("cluster.remote.foo.seeds", Collections.singletonList("localhost:9200")) + .field("cluster.remote.foo.transport.compress", "true") + .endObject() + ) + ); } else { final Request getSettingsRequest = new Request("GET", "/_cluster/settings"); final Response getSettingsResponse = client().performRequest(getSettingsRequest); diff --git a/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/MultiClusterSearchYamlTestSuiteIT.java b/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/MultiClusterSearchYamlTestSuiteIT.java index 0f92a19098026..0ebd36ec50f1a 100644 --- a/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/MultiClusterSearchYamlTestSuiteIT.java +++ b/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/MultiClusterSearchYamlTestSuiteIT.java @@ -13,47 +13,61 @@ import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; import org.apache.lucene.tests.util.TimeUnits; -import org.elasticsearch.Version; +import org.elasticsearch.test.rest.TestFeatureService; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ClientYamlTestClient; import org.elasticsearch.test.rest.yaml.ClientYamlTestExecutionContext; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; import org.junit.BeforeClass; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + @TimeoutSuite(millis = 5 * TimeUnits.MINUTE) // to account for slow as hell VMs public class MultiClusterSearchYamlTestSuiteIT extends ESClientYamlSuiteTestCase { - private static Version remoteEsVersion = null; + private static String remoteEsVersion = null; @BeforeClass - public static void determineRemoteClusterMinimumVersion() { + public static void readRemoteClusterVersion() { String remoteClusterVersion = System.getProperty("tests.rest.remote_cluster_version"); if (remoteClusterVersion != null) { - remoteEsVersion = Version.fromString(remoteClusterVersion); + remoteEsVersion = remoteClusterVersion; } } + @Override protected ClientYamlTestExecutionContext createRestTestExecutionContext( ClientYamlTestCandidate clientYamlTestCandidate, - ClientYamlTestClient clientYamlTestClient + ClientYamlTestClient clientYamlTestClient, + final Set nodesVersions, + final TestFeatureService testFeatureService, + final Set osSet ) { - return new ClientYamlTestExecutionContext(clientYamlTestCandidate, clientYamlTestClient, randomizeContentType()) { + /* + * Since the esVersion is used to skip tests in ESClientYamlSuiteTestCase, we also take into account the + * remote cluster version here. This is used to skip tests if some feature isn't available on the remote cluster yet. + */ + final Set commonVersions; + if (remoteEsVersion == null || nodesVersions.contains(remoteEsVersion)) { + commonVersions = nodesVersions; + } else { + var versionsCopy = new HashSet<>(nodesVersions); + versionsCopy.add(remoteEsVersion); + commonVersions = Collections.unmodifiableSet(versionsCopy); + } - /** - * Since the esVersion is used to skip tests in ESClientYamlSuiteTestCase, we also take into account the - * remote cluster version here and return it if it is lower than the local client version. This is used to - * skip tests if some feature isn't available on the remote cluster yet. - */ - @Override - public Version esVersion() { - Version clientEsVersion = clientYamlTestClient.getEsVersion(); - if (remoteEsVersion == null) { - return clientEsVersion; - } else { - return remoteEsVersion.before(clientEsVersion) ? remoteEsVersion : clientEsVersion; - } - } - }; + // TODO: same for os and features. Better to do that once this test(s) have been migrated to the new ElasticsearchCluster-based + // framework. See CcsCommonYamlTestSuiteIT for example. + return new ClientYamlTestExecutionContext( + clientYamlTestCandidate, + clientYamlTestClient, + randomizeContentType(), + commonVersions, + testFeatureService, + osSet + ); } @Override diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/DanglingIndicesRestIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/DanglingIndicesRestIT.java index 70df4aaeaf5de..f7f46671e2354 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/DanglingIndicesRestIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/DanglingIndicesRestIT.java @@ -8,6 +8,8 @@ package org.elasticsearch.http; +import io.netty.handler.codec.http.HttpMethod; + import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; @@ -18,6 +20,7 @@ import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.XContentTestUtils; +import org.elasticsearch.test.rest.ESRestTestCase; import java.io.IOException; import java.util.ArrayList; @@ -213,22 +216,12 @@ private Map createIndices(String... indices) throws IOException assert indices.length > 0; for (String index : indices) { - String indexSettings = """ - { - "settings": { - "index": { - "number_of_shards": 1, - "number_of_replicas": 2, - "routing": { - "allocation": { - "total_shards_per_node": 1 - } - } - } - } - }"""; - Request request = new Request("PUT", "/" + index); - request.setJsonEntity(indexSettings); + final var request = ESRestTestCase.newXContentRequest(HttpMethod.PUT, "/" + index, (builder, params) -> { + builder.startObject("settings").startObject("index"); + builder.field("number_of_shards", 1).field("number_of_replicas", 2); + builder.startObject("routing").startObject("allocation").field("total_shards_per_node", 1).endObject().endObject(); + return builder.endObject().endObject(); + }); assertOK(getRestClient().performRequest(request)); } ensureGreen(indices); diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/java/org/elasticsearch/ingest/IngestDocumentMustacheIT.java b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/java/org/elasticsearch/ingest/IngestDocumentMustacheIT.java index 724f5c2d51be6..f5a1839001e5c 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/java/org/elasticsearch/ingest/IngestDocumentMustacheIT.java +++ b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/java/org/elasticsearch/ingest/IngestDocumentMustacheIT.java @@ -23,10 +23,13 @@ public void testAccessMetadataViaTemplate() { Map document = new HashMap<>(); document.put("foo", "bar"); IngestDocument ingestDocument = new IngestDocument("index", "id", 1, null, null, document); - ingestDocument.setFieldValue(compile("field1"), ValueSource.wrap("1 {{foo}}", scriptService)); + ingestDocument.setFieldValue(ingestDocument.renderTemplate(compile("field1")), ValueSource.wrap("1 {{foo}}", scriptService)); assertThat(ingestDocument.getFieldValue("field1", String.class), equalTo("1 bar")); - ingestDocument.setFieldValue(compile("field1"), ValueSource.wrap("2 {{_source.foo}}", scriptService)); + ingestDocument.setFieldValue( + ingestDocument.renderTemplate(compile("field1")), + ValueSource.wrap("2 {{_source.foo}}", scriptService) + ); assertThat(ingestDocument.getFieldValue("field1", String.class), equalTo("2 bar")); } @@ -38,11 +41,14 @@ public void testAccessMapMetadataViaTemplate() { innerObject.put("qux", Collections.singletonMap("fubar", "hello qux and fubar")); document.put("foo", innerObject); IngestDocument ingestDocument = new IngestDocument("index", "id", 1, null, null, document); - ingestDocument.setFieldValue(compile("field1"), ValueSource.wrap("1 {{foo.bar}} {{foo.baz}} {{foo.qux.fubar}}", scriptService)); + ingestDocument.setFieldValue( + ingestDocument.renderTemplate(compile("field1")), + ValueSource.wrap("1 {{foo.bar}} {{foo.baz}} {{foo.qux.fubar}}", scriptService) + ); assertThat(ingestDocument.getFieldValue("field1", String.class), equalTo("1 hello bar hello baz hello qux and fubar")); ingestDocument.setFieldValue( - compile("field1"), + ingestDocument.renderTemplate(compile("field1")), ValueSource.wrap("2 {{_source.foo.bar}} {{_source.foo.baz}} {{_source.foo.qux.fubar}}", scriptService) ); assertThat(ingestDocument.getFieldValue("field1", String.class), equalTo("2 hello bar hello baz hello qux and fubar")); @@ -58,7 +64,10 @@ public void testAccessListMetadataViaTemplate() { list.add(null); document.put("list2", list); IngestDocument ingestDocument = new IngestDocument("index", "id", 1, null, null, document); - ingestDocument.setFieldValue(compile("field1"), ValueSource.wrap("1 {{list1.0}} {{list2.0}}", scriptService)); + ingestDocument.setFieldValue( + ingestDocument.renderTemplate(compile("field1")), + ValueSource.wrap("1 {{list1.0}} {{list2.0}}", scriptService) + ); assertThat(ingestDocument.getFieldValue("field1", String.class), equalTo("1 foo {field=value}")); } @@ -69,7 +78,7 @@ public void testAccessIngestMetadataViaTemplate() { document.put("_ingest", ingestMap); IngestDocument ingestDocument = new IngestDocument("index", "id", 1, null, null, document); ingestDocument.setFieldValue( - compile("ingest_timestamp"), + ingestDocument.renderTemplate(compile("ingest_timestamp")), ValueSource.wrap("{{_ingest.timestamp}} and {{_source._ingest.timestamp}}", scriptService) ); assertThat( diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/java/org/elasticsearch/ingest/ValueSourceMustacheIT.java b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/java/org/elasticsearch/ingest/ValueSourceMustacheIT.java index c93ef30731960..df4c5827cebc1 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/java/org/elasticsearch/ingest/ValueSourceMustacheIT.java +++ b/qa/smoke-test-ingest-with-all-dependencies/src/yamlRestTest/java/org/elasticsearch/ingest/ValueSourceMustacheIT.java @@ -57,9 +57,9 @@ public void testValueSourceWithTemplates() { public void testAccessSourceViaTemplate() { IngestDocument ingestDocument = new IngestDocument("marvel", "id", 1, null, null, new HashMap<>()); assertThat(ingestDocument.hasField("marvel"), is(false)); - ingestDocument.setFieldValue(compile("{{_index}}"), ValueSource.wrap("{{_index}}", scriptService)); + ingestDocument.setFieldValue(ingestDocument.renderTemplate(compile("{{_index}}")), ValueSource.wrap("{{_index}}", scriptService)); assertThat(ingestDocument.getFieldValue("marvel", String.class), equalTo("marvel")); - ingestDocument.removeField(compile("{{marvel}}")); + ingestDocument.removeField(ingestDocument.renderTemplate(compile("{{marvel}}"))); assertThat(ingestDocument.hasField("index"), is(false)); } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.check_in.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.check_in.json index e95621d30fc16..36535109df8e7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.check_in.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.check_in.json @@ -1,7 +1,7 @@ { "connector.check_in": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/check-in-connector-api.html", "description": "Updates the last_seen timestamp in the connector document." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.delete.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.delete.json index dcb3a4f83c287..88c4e85dac2ef 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.delete.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.delete.json @@ -1,7 +1,7 @@ { "connector.delete": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-connector-api.html", "description": "Deletes a connector." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.get.json index bcddef8cb5cb9..2645df28c5d1e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.get.json @@ -1,7 +1,7 @@ { "connector.get": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/get-connector-api.html", "description": "Returns the details about a connector." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.last_sync.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.last_sync.json index 7bc1504253070..f6d93555b72ed 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.last_sync.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.last_sync.json @@ -1,7 +1,7 @@ { "connector.last_sync": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-last-sync-api.html", "description": "Updates the stats of last sync in the connector document." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.list.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.list.json index 852a5fbd85998..bc8f12a933b1e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.list.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.list.json @@ -1,7 +1,7 @@ { "connector.list": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/list-connector-api.html", "description": "Lists all connectors." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.post.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.post.json index aadb59e99af7a..edc865012876e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.post.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.post.json @@ -1,7 +1,7 @@ { "connector.post": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/create-connector-api.html", "description": "Creates a connector." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.put.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.put.json index 0ab5c18671040..af733de6aa06c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.put.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.put.json @@ -1,7 +1,7 @@ { "connector.put": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/create-connector-api.html", "description": "Creates or updates a connector." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_configuration.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_configuration.json index a82f9e0f29225..1ececd7ea95f7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_configuration.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_configuration.json @@ -1,7 +1,7 @@ { "connector.update_configuration": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-configuration-api.html", "description": "Updates the connector configuration." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_error.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_error.json index 51d5a1b25973b..150f71ad033ac 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_error.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_error.json @@ -1,7 +1,7 @@ { "connector.update_error": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-error-api.html", "description": "Updates the error field in the connector document." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_filtering.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_filtering.json index b9815fc111c06..c2a9bf0720746 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_filtering.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_filtering.json @@ -1,7 +1,7 @@ { "connector.update_filtering": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-filtering-api.html", "description": "Updates the filtering field in the connector document." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_name.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_name.json index dabac5599932b..a7ca1a9730ab9 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_name.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_name.json @@ -1,7 +1,7 @@ { "connector.update_name": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-name-description-api.html", "description": "Updates the name and/or description fields in the connector document." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_pipeline.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_pipeline.json index 25687e41a48de..b7ab6abcf088d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_pipeline.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_pipeline.json @@ -1,7 +1,7 @@ { "connector.update_pipeline": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-pipeline-api.html", "description": "Updates the pipeline field in the connector document." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_scheduling.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_scheduling.json index 8d934b8025145..98cee5c257b90 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_scheduling.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector.update_scheduling.json @@ -1,7 +1,7 @@ { "connector.update_scheduling": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/update-connector-scheduling-api.html", "description": "Updates the scheduling field in the connector document." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.cancel.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.cancel.json index dbea6935f8a87..1e8cf154cf652 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.cancel.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.cancel.json @@ -1,7 +1,7 @@ { "connector_sync_job.cancel": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/cancel-connector-sync-job-api.html", "description": "Cancels a connector sync job." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.check_in.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.check_in.json index 8193d92395255..a6c96f506b115 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.check_in.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.check_in.json @@ -1,7 +1,7 @@ { "connector_sync_job.check_in": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/check-in-connector-sync-job-api.html", "description": "Checks in a connector sync job (refreshes 'last_seen')." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.delete.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.delete.json index ba9b5095a5275..11894a48db576 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.delete.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.delete.json @@ -1,7 +1,7 @@ { "connector_sync_job.delete": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/delete-connector-sync-job-api.html", "description": "Deletes a connector sync job." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.error.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.error.json index 394e6e2fcb38f..c6fbd15559e2d 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.error.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.error.json @@ -1,7 +1,7 @@ { "connector_sync_job.error": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/set-connector-sync-job-error-api.html", "description": "Sets an error for a connector sync job." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.get.json index d0f14b0001bd8..6dd29069badc4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.get.json @@ -1,7 +1,7 @@ { "connector_sync_job.get": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/get-connector-sync-job-api.html", "description": "Returns the details about a connector sync job." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.list.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.list.json index 86995477f060a..7b816cae1cd00 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.list.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.list.json @@ -1,7 +1,7 @@ { "connector_sync_job.list": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/list-connector-sync-jobs-api.html", "description": "Lists all connector sync jobs." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.post.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.post.json index 1db58c31dfa38..8050b34014d2c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.post.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.post.json @@ -1,7 +1,7 @@ { "connector_sync_job.post": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/create-connector-sync-job-api.html", "description": "Creates a connector sync job." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.update_stats.json b/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.update_stats.json index 825e5d8939e2d..d5f18df0a74da 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.update_stats.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/connector_sync_job.update_stats.json @@ -1,7 +1,7 @@ { "connector_sync_job.update_stats": { "documentation": { - "url": "https://www.elastic.co/guide/en/enterprise-search/current/connectors.html", + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/set-connector-sync-job-stats-api.html", "description": "Updates the stats fields in the connector sync job document." }, "stability": "experimental", diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/profiling.flamegraph.json b/rest-api-spec/src/main/resources/rest-api-spec/api/profiling.flamegraph.json new file mode 100644 index 0000000000000..f9cec6663b417 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/profiling.flamegraph.json @@ -0,0 +1,27 @@ +{ + "profiling.flamegraph":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/observability/current/universal-profiling.html", + "description":"Extracts a UI-optimized structure to render flamegraphs from Universal Profiling." + }, + "stability":"stable", + "visibility":"private", + "headers":{ + "accept": ["application/json"] + }, + "url":{ + "paths":[ + { + "path":"/_profiling/flamegraph", + "methods":[ + "POST" + ] + } + ] + }, + "body":{ + "description":"The filter conditions for the flamegraph", + "required":true + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/profiling.stacktraces.json b/rest-api-spec/src/main/resources/rest-api-spec/api/profiling.stacktraces.json new file mode 100644 index 0000000000000..547e2d628bd20 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/profiling.stacktraces.json @@ -0,0 +1,27 @@ +{ + "profiling.stacktraces":{ + "documentation":{ + "url":"https://www.elastic.co/guide/en/observability/current/universal-profiling.html", + "description":"Extracts raw stacktrace information from Universal Profiling." + }, + "stability":"stable", + "visibility":"private", + "headers":{ + "accept": ["application/json"] + }, + "url":{ + "paths":[ + { + "path":"/_profiling/stacktraces", + "methods":[ + "POST" + ] + } + ] + }, + "body":{ + "description":"The filter conditions for stacktraces", + "required":true + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/security.get_api_key.json b/rest-api-spec/src/main/resources/rest-api-spec/api/security.get_api_key.json index bf782e96a0499..452ad7cef607c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/security.get_api_key.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/security.get_api_key.json @@ -45,6 +45,11 @@ "type":"boolean", "default":false, "description": "flag to show the limited-by role descriptors of API Keys" + }, + "active_only":{ + "type":"boolean", + "default":false, + "description": "flag to limit response to only active (not invalidated or expired) API keys" } } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java index 4aa3598608fb6..94463cfa33271 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/ReloadSecureSettingsIT.java @@ -10,7 +10,9 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsRequest; import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsResponse; +import org.elasticsearch.action.admin.cluster.node.reload.TransportNodesReloadSecureSettingsAction; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.KeyStoreWrapper; @@ -58,6 +60,20 @@ public static void disableInFips() { ); } + private static void executeReloadSecureSettings( + String[] nodeIds, + SecureString password, + ActionListener listener + ) { + final var request = new NodesReloadSecureSettingsRequest(nodeIds); + request.setSecureStorePassword(password); + client().execute(TransportNodesReloadSecureSettingsAction.TYPE, request, listener); + } + + private static SecureString emptyPassword() { + return randomBoolean() ? new SecureString(new char[0]) : null; + } + public void testMissingKeystoreFile() throws Exception { final PluginsService pluginsService = internalCluster().getInstance(PluginsService.class); final MockReloadablePlugin mockReloadablePlugin = pluginsService.filterPlugins(MockReloadablePlugin.class).findFirst().get(); @@ -67,36 +83,32 @@ public void testMissingKeystoreFile() throws Exception { Files.deleteIfExists(KeyStoreWrapper.keystorePath(environment.configFile())); final int initialReloadCount = mockReloadablePlugin.getReloadCount(); final CountDownLatch latch = new CountDownLatch(1); - final SecureString emptyPassword = randomBoolean() ? new SecureString(new char[0]) : null; - clusterAdmin().prepareReloadSecureSettings() - .setSecureStorePassword(emptyPassword) - .setNodesIds(Strings.EMPTY_ARRAY) - .execute(new ActionListener() { - @Override - public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { - try { - assertThat(nodesReloadResponse, notNullValue()); - final Map nodesMap = nodesReloadResponse.getNodesMap(); - assertThat(nodesMap.size(), equalTo(cluster().size())); - for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) { - assertThat(nodeResponse.reloadException(), notNullValue()); - assertThat(nodeResponse.reloadException(), instanceOf(IllegalStateException.class)); - assertThat(nodeResponse.reloadException().getMessage(), containsString("Keystore is missing")); - } - } catch (final AssertionError e) { - reloadSettingsError.set(e); - } finally { - latch.countDown(); + executeReloadSecureSettings(Strings.EMPTY_ARRAY, emptyPassword(), new ActionListener<>() { + @Override + public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { + try { + assertThat(nodesReloadResponse, notNullValue()); + final Map nodesMap = nodesReloadResponse.getNodesMap(); + assertThat(nodesMap.size(), equalTo(cluster().size())); + for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) { + assertThat(nodeResponse.reloadException(), notNullValue()); + assertThat(nodeResponse.reloadException(), instanceOf(IllegalStateException.class)); + assertThat(nodeResponse.reloadException().getMessage(), containsString("Keystore is missing")); } - } - - @Override - public void onFailure(Exception e) { - reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + } catch (final AssertionError e) { + reloadSettingsError.set(e); + } finally { latch.countDown(); } - }); - latch.await(); + } + + @Override + public void onFailure(Exception e) { + reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + latch.countDown(); + } + }); + safeAwait(latch); if (reloadSettingsError.get() != null) { throw reloadSettingsError.get(); } @@ -118,34 +130,30 @@ public void testInvalidKeystoreFile() throws Exception { Files.copy(keystore, KeyStoreWrapper.keystorePath(environment.configFile()), StandardCopyOption.REPLACE_EXISTING); } final CountDownLatch latch = new CountDownLatch(1); - final SecureString emptyPassword = randomBoolean() ? new SecureString(new char[0]) : null; - clusterAdmin().prepareReloadSecureSettings() - .setSecureStorePassword(emptyPassword) - .setNodesIds(Strings.EMPTY_ARRAY) - .execute(new ActionListener() { - @Override - public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { - try { - assertThat(nodesReloadResponse, notNullValue()); - final Map nodesMap = nodesReloadResponse.getNodesMap(); - assertThat(nodesMap.size(), equalTo(cluster().size())); - for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) { - assertThat(nodeResponse.reloadException(), notNullValue()); - } - } catch (final AssertionError e) { - reloadSettingsError.set(e); - } finally { - latch.countDown(); + executeReloadSecureSettings(Strings.EMPTY_ARRAY, emptyPassword(), new ActionListener<>() { + @Override + public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { + try { + assertThat(nodesReloadResponse, notNullValue()); + final Map nodesMap = nodesReloadResponse.getNodesMap(); + assertThat(nodesMap.size(), equalTo(cluster().size())); + for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) { + assertThat(nodeResponse.reloadException(), notNullValue()); } - } - - @Override - public void onFailure(Exception e) { - reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + } catch (final AssertionError e) { + reloadSettingsError.set(e); + } finally { latch.countDown(); } - }); - latch.await(); + } + + @Override + public void onFailure(Exception e) { + reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + latch.countDown(); + } + }); + safeAwait(latch); if (reloadSettingsError.get() != null) { throw reloadSettingsError.get(); } @@ -162,31 +170,27 @@ public void testReloadAllNodesWithPasswordWithoutTLSFails() throws Exception { final char[] password = randomAlphaOfLength(12).toCharArray(); writeEmptyKeystore(environment, password); final CountDownLatch latch = new CountDownLatch(1); - clusterAdmin().prepareReloadSecureSettings() - // No filter should try to hit all nodes - .setNodesIds(Strings.EMPTY_ARRAY) - .setSecureStorePassword(new SecureString(password)) - .execute(new ActionListener() { - @Override - public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { - reloadSettingsError.set(new AssertionError("Nodes request succeeded when it should have failed", null)); - latch.countDown(); - } + executeReloadSecureSettings(Strings.EMPTY_ARRAY, new SecureString(password), new ActionListener<>() { + @Override + public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { + reloadSettingsError.set(new AssertionError("Nodes request succeeded when it should have failed", null)); + latch.countDown(); + } - @Override - public void onFailure(Exception e) { - try { - assertThat(e, instanceOf(ElasticsearchException.class)); - assertThat( - e.getMessage(), - containsString("Secure settings cannot be updated cluster wide when TLS for the transport layer is not enabled") - ); - } finally { - latch.countDown(); - } + @Override + public void onFailure(Exception e) { + try { + assertThat(e, instanceOf(ElasticsearchException.class)); + assertThat( + e.getMessage(), + containsString("Secure settings cannot be updated cluster wide when TLS for the transport layer is not enabled") + ); + } finally { + latch.countDown(); } - }); - latch.await(); + } + }); + safeAwait(latch); if (reloadSettingsError.get() != null) { throw reloadSettingsError.get(); } @@ -200,33 +204,30 @@ public void testReloadLocalNodeWithPasswordWithoutTLSSucceeds() throws Exception final char[] password = randomAlphaOfLength(12).toCharArray(); writeEmptyKeystore(environment, password); final CountDownLatch latch = new CountDownLatch(1); - clusterAdmin().prepareReloadSecureSettings() - .setNodesIds("_local") - .setSecureStorePassword(new SecureString(password)) - .execute(new ActionListener() { - @Override - public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { - try { - assertThat(nodesReloadResponse, notNullValue()); - final Map nodesMap = nodesReloadResponse.getNodesMap(); - assertThat(nodesMap.size(), equalTo(1)); - assertThat(nodesReloadResponse.getNodes().size(), equalTo(1)); - final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse = nodesReloadResponse.getNodes().get(0); - assertThat(nodeResponse.reloadException(), nullValue()); - } catch (final AssertionError e) { - reloadSettingsError.set(e); - } finally { - latch.countDown(); - } - } - - @Override - public void onFailure(Exception e) { - reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + executeReloadSecureSettings(new String[] { "_local" }, new SecureString(password), new ActionListener<>() { + @Override + public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { + try { + assertThat(nodesReloadResponse, notNullValue()); + final Map nodesMap = nodesReloadResponse.getNodesMap(); + assertThat(nodesMap.size(), equalTo(1)); + assertThat(nodesReloadResponse.getNodes().size(), equalTo(1)); + final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse = nodesReloadResponse.getNodes().get(0); + assertThat(nodeResponse.reloadException(), nullValue()); + } catch (final AssertionError e) { + reloadSettingsError.set(e); + } finally { latch.countDown(); } - }); - latch.await(); + } + + @Override + public void onFailure(Exception e) { + reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + latch.countDown(); + } + }); + safeAwait(latch); if (reloadSettingsError.get() != null) { throw reloadSettingsError.get(); } @@ -241,10 +242,10 @@ public void testWrongKeystorePassword() throws Exception { // "some" keystore should be present in this case writeEmptyKeystore(environment, new char[0]); final CountDownLatch latch = new CountDownLatch(1); - clusterAdmin().prepareReloadSecureSettings() - .setNodesIds("_local") - .setSecureStorePassword(new SecureString(new char[] { 'W', 'r', 'o', 'n', 'g' })) - .execute(new ActionListener() { + executeReloadSecureSettings( + new String[] { "_local" }, + new SecureString(new char[] { 'W', 'r', 'o', 'n', 'g' }), + new ActionListener<>() { @Override public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { try { @@ -267,8 +268,9 @@ public void onFailure(Exception e) { reloadSettingsError.set(new AssertionError("Nodes request failed", e)); latch.countDown(); } - }); - latch.await(); + } + ); + safeAwait(latch); if (reloadSettingsError.get() != null) { throw reloadSettingsError.get(); } @@ -297,35 +299,31 @@ public void testMisbehavingPlugin() throws Exception { Settings.builder().put(environment.settings()).setSecureSettings(secureSettings).build() ).toString(); final CountDownLatch latch = new CountDownLatch(1); - final SecureString emptyPassword = randomBoolean() ? new SecureString(new char[0]) : null; - clusterAdmin().prepareReloadSecureSettings() - .setSecureStorePassword(emptyPassword) - .setNodesIds(Strings.EMPTY_ARRAY) - .execute(new ActionListener() { - @Override - public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { - try { - assertThat(nodesReloadResponse, notNullValue()); - final Map nodesMap = nodesReloadResponse.getNodesMap(); - assertThat(nodesMap.size(), equalTo(cluster().size())); - for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) { - assertThat(nodeResponse.reloadException(), notNullValue()); - assertThat(nodeResponse.reloadException().getMessage(), containsString("If shouldThrow I throw")); - } - } catch (final AssertionError e) { - reloadSettingsError.set(e); - } finally { - latch.countDown(); + executeReloadSecureSettings(Strings.EMPTY_ARRAY, emptyPassword(), new ActionListener<>() { + @Override + public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { + try { + assertThat(nodesReloadResponse, notNullValue()); + final Map nodesMap = nodesReloadResponse.getNodesMap(); + assertThat(nodesMap.size(), equalTo(cluster().size())); + for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) { + assertThat(nodeResponse.reloadException(), notNullValue()); + assertThat(nodeResponse.reloadException().getMessage(), containsString("If shouldThrow I throw")); } - } - - @Override - public void onFailure(Exception e) { - reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + } catch (final AssertionError e) { + reloadSettingsError.set(e); + } finally { latch.countDown(); } - }); - latch.await(); + } + + @Override + public void onFailure(Exception e) { + reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + latch.countDown(); + } + }); + safeAwait(latch); if (reloadSettingsError.get() != null) { throw reloadSettingsError.get(); } @@ -364,10 +362,7 @@ public void testInvalidKeyInSettings() throws Exception { } PlainActionFuture actionFuture = new PlainActionFuture<>(); - clusterAdmin().prepareReloadSecureSettings() - .setSecureStorePassword(new SecureString(new char[0])) - .setNodesIds(Strings.EMPTY_ARRAY) - .execute(actionFuture); + executeReloadSecureSettings(Strings.EMPTY_ARRAY, new SecureString(new char[0]), actionFuture); actionFuture.get().getNodes().forEach(nodeResponse -> assertThat(nodeResponse.reloadException(), nullValue())); @@ -378,10 +373,7 @@ public void testInvalidKeyInSettings() throws Exception { } actionFuture = new PlainActionFuture<>(); - clusterAdmin().prepareReloadSecureSettings() - .setSecureStorePassword(new SecureString(new char[0])) - .setNodesIds(Strings.EMPTY_ARRAY) - .execute(actionFuture); + executeReloadSecureSettings(Strings.EMPTY_ARRAY, new SecureString(new char[0]), actionFuture); actionFuture.get() .getNodes() @@ -404,33 +396,30 @@ private void successfulReloadCall() throws InterruptedException { final AtomicReference reloadSettingsError = new AtomicReference<>(); final CountDownLatch latch = new CountDownLatch(1); final SecureString emptyPassword = randomBoolean() ? new SecureString(new char[0]) : null; - clusterAdmin().prepareReloadSecureSettings() - .setSecureStorePassword(emptyPassword) - .setNodesIds(Strings.EMPTY_ARRAY) - .execute(new ActionListener() { - @Override - public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { - try { - assertThat(nodesReloadResponse, notNullValue()); - final Map nodesMap = nodesReloadResponse.getNodesMap(); - assertThat(nodesMap.size(), equalTo(cluster().size())); - for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) { - assertThat(nodeResponse.reloadException(), nullValue()); - } - } catch (final AssertionError e) { - reloadSettingsError.set(e); - } finally { - latch.countDown(); + executeReloadSecureSettings(Strings.EMPTY_ARRAY, new SecureString(new char[0]), new ActionListener<>() { + @Override + public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { + try { + assertThat(nodesReloadResponse, notNullValue()); + final Map nodesMap = nodesReloadResponse.getNodesMap(); + assertThat(nodesMap.size(), equalTo(cluster().size())); + for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) { + assertThat(nodeResponse.reloadException(), nullValue()); } - } - - @Override - public void onFailure(Exception e) { - reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + } catch (final AssertionError e) { + reloadSettingsError.set(e); + } finally { latch.countDown(); } - }); - latch.await(); + } + + @Override + public void onFailure(Exception e) { + reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + latch.countDown(); + } + }); + safeAwait(latch); if (reloadSettingsError.get() != null) { throw reloadSettingsError.get(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java index 07c6ba4945eaa..b20f658a01510 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java @@ -272,7 +272,7 @@ public void testFailedToStartChildTaskAfterCancelled() throws Exception { TestRequest subRequest = generateTestRequest(nodes, 0, between(0, 1), false); beforeSendLatches.get(subRequest).countDown(); mainAction.startSubTask(taskId, subRequest, future); - TaskCancelledException te = expectThrows(TaskCancelledException.class, future::actionGet); + TaskCancelledException te = expectThrows(TaskCancelledException.class, future); assertThat(te.getMessage(), equalTo("parent task was cancelled [by user request]")); allowEntireRequest(rootRequest); waitForRootTask(rootTaskFuture, false); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java index 378c88002279a..9ce3b0c40b207 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java @@ -414,7 +414,7 @@ public void testSearchTaskHeaderLimit() { headers.put("Custom-Task-Header", randomAlphaOfLengthBetween(maxSize, maxSize + 100)); IllegalArgumentException ex = expectThrows( IllegalArgumentException.class, - () -> client().filterWithHeader(headers).admin().cluster().prepareListTasks().get() + client().filterWithHeader(headers).admin().cluster().prepareListTasks() ); assertThat(ex.getMessage(), startsWith("Request exceeded the maximum size of task headers ")); } @@ -516,7 +516,7 @@ public void testTasksCancellation() throws Exception { CancelTasksResponse cancelTasksResponse = clusterAdmin().prepareCancelTasks().setActions(TEST_TASK_ACTION.name()).get(); assertEquals(1, cancelTasksResponse.getTasks().size()); - expectThrows(TaskCancelledException.class, future::actionGet); + expectThrows(TaskCancelledException.class, future); logger.info("--> checking that test tasks are not running"); assertEquals(0, clusterAdmin().prepareListTasks().setActions(TEST_TASK_ACTION.name() + "*").get().getTasks().size()); @@ -650,7 +650,7 @@ public void testGetTaskWaitForTimeout() throws Exception { waitForTimeoutTestCase(id -> { Exception e = expectThrows( Exception.class, - () -> clusterAdmin().prepareGetTask(id).setWaitForCompletion(true).setTimeout(timeValueMillis(100)).get() + clusterAdmin().prepareGetTask(id).setWaitForCompletion(true).setTimeout(timeValueMillis(100)) ); return singleton(e); }); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoIT.java index cdbc19611eb24..395f8e5c67642 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoIT.java @@ -38,9 +38,7 @@ public void testRemoteClusterClientRole() { final String nodeWithoutRemoteClientRole = localCluster.startNode(NodeRoles.onlyRoles(Set.of(DiscoveryNodeRole.DATA_ROLE))); final IllegalArgumentException error = expectThrows( IllegalArgumentException.class, - () -> localCluster.client(nodeWithoutRemoteClientRole) - .execute(TransportRemoteInfoAction.TYPE, new RemoteInfoRequest()) - .actionGet() + localCluster.client(nodeWithoutRemoteClientRole).execute(TransportRemoteInfoAction.TYPE, new RemoteInfoRequest()) ); assertThat( error.getMessage(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java index 93fc17a9a02eb..1fda9c67a0beb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java @@ -207,7 +207,7 @@ public void testValuesSmokeScreen() throws IOException, ExecutionException, Inte ClusterStatsResponse response = clusterAdmin().prepareClusterStats().get(); String msg = response.toString(); assertThat(msg, response.getTimestamp(), greaterThan(946681200000L)); // 1 Jan 2000 - assertThat(msg, response.indicesStats.getStore().getSizeInBytes(), greaterThan(0L)); + assertThat(msg, response.indicesStats.getStore().sizeInBytes(), greaterThan(0L)); assertThat(msg, response.nodesStats.getFs().getTotal().getBytes(), greaterThan(0L)); assertThat(msg, response.nodesStats.getJvm().getVersions().size(), greaterThan(0)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/ListTasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/ListTasksIT.java index cb508334f835e..955ec4a0bbc99 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/ListTasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/tasks/ListTasksIT.java @@ -56,7 +56,7 @@ public void testListTasksValidation() { ActionRequestValidationException ex = expectThrows( ActionRequestValidationException.class, - () -> clusterAdmin().prepareListTasks().setDescriptions("*").get() + clusterAdmin().prepareListTasks().setDescriptions("*") ); assertThat(ex.getMessage(), containsString("matching on descriptions is not available when [detailed] is false")); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/alias/ValidateIndicesAliasesRequestIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/alias/ValidateIndicesAliasesRequestIT.java index 63239ec2419f9..7f6bb0239b730 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/alias/ValidateIndicesAliasesRequestIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/alias/ValidateIndicesAliasesRequestIT.java @@ -96,7 +96,7 @@ public void testNotAllowed() { final String origin = randomFrom("", "not-allowed"); final IndicesAliasesRequest request = new IndicesAliasesRequest().origin(origin); request.addAliasAction(IndicesAliasesRequest.AliasActions.add().index("index").alias("alias")); - final Exception e = expectThrows(IllegalStateException.class, () -> client().admin().indices().aliases(request).actionGet()); + final Exception e = expectThrows(IllegalStateException.class, client().admin().indices().aliases(request)); assertThat(e, hasToString(containsString("origin [" + origin + "] not allowed for index [index]"))); } @@ -113,7 +113,7 @@ public void testSomeAllowed() { final IndicesAliasesRequest request = new IndicesAliasesRequest().origin(origin); request.addAliasAction(IndicesAliasesRequest.AliasActions.add().index("foo").alias("alias")); request.addAliasAction(IndicesAliasesRequest.AliasActions.add().index("bar").alias("alias")); - final Exception e = expectThrows(IllegalStateException.class, () -> client().admin().indices().aliases(request).actionGet()); + final Exception e = expectThrows(IllegalStateException.class, client().admin().indices().aliases(request)); final String index = "foo_allowed".equals(origin) ? "bar" : "foo"; assertThat(e, hasToString(containsString("origin [" + origin + "] not allowed for index [" + index + "]"))); assertTrue(client().admin().indices().getAliases(new GetAliasesRequest("alias")).actionGet().getAliases().isEmpty()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/AutoCreateSystemIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/AutoCreateSystemIndexIT.java index 800df11513ae3..40657987d8db9 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/AutoCreateSystemIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/AutoCreateSystemIndexIT.java @@ -157,13 +157,8 @@ public void testSystemIndicesAutoCreatedAsHidden() throws Exception { public void testSystemIndicesAutoCreateRejectedWhenNotHidden() { CreateIndexRequest request = new CreateIndexRequest(UnmanagedSystemIndexTestPlugin.SYSTEM_INDEX_NAME); request.settings(Settings.builder().put(SETTING_INDEX_HIDDEN, false).build()); - ExecutionException exception = expectThrows( - ExecutionException.class, - () -> client().execute(AutoCreateAction.INSTANCE, request).get() - ); - assertThat( - exception.getCause().getMessage(), + expectThrows(IllegalStateException.class, client().execute(AutoCreateAction.INSTANCE, request)).getMessage(), containsString("Cannot auto-create system index [.unmanaged-system-idx] with [index.hidden] set to 'false'") ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java index 1cf0bbee8a3e3..c4cd400344de6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/CreateIndexIT.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.admin.indices.create; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.alias.Alias; @@ -42,7 +43,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -118,8 +118,7 @@ public void testEmptyNestedMappings() throws Exception { public void testMappingParamAndNestedMismatch() throws Exception { MapperParsingException e = expectThrows( MapperParsingException.class, - () -> prepareCreate("test").setMapping(XContentFactory.jsonBuilder().startObject().startObject("type2").endObject().endObject()) - .get() + prepareCreate("test").setMapping(XContentFactory.jsonBuilder().startObject().startObject("type2").endObject().endObject()) ); assertThat(e.getMessage(), startsWith("Failed to parse mapping: Root mapping definition has unsupported parameters")); } @@ -298,10 +297,10 @@ public void testFailureToCreateIndexCleansUpIndicesService() { .build(); assertAcked(indicesAdmin().prepareCreate("test-idx-1").setSettings(settings).addAlias(new Alias("alias1").writeIndex(true)).get()); - assertRequestBuilderThrows( - indicesAdmin().prepareCreate("test-idx-2").setSettings(settings).addAlias(new Alias("alias1").writeIndex(true)), - IllegalStateException.class - ); + ActionRequestBuilder builder = indicesAdmin().prepareCreate("test-idx-2") + .setSettings(settings) + .addAlias(new Alias("alias1").writeIndex(true)); + expectThrows(IllegalStateException.class, builder); IndicesService indicesService = internalCluster().getInstance(IndicesService.class, internalCluster().getMasterName()); for (IndexService indexService : indicesService) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java index 0647869aabf04..06af1f0867594 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java @@ -468,9 +468,7 @@ public void testCreateShrinkWithIndexSort() throws Exception { // check that index sort cannot be set on the target index IllegalArgumentException exc = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareResizeIndex("source", "target") - .setSettings(indexSettings(2, 0).put("index.sort.field", "foo").build()) - .get() + indicesAdmin().prepareResizeIndex("source", "target").setSettings(indexSettings(2, 0).put("index.sort.field", "foo").build()) ); assertThat(exc.getMessage(), containsString("can't override index sort when resizing an index")); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java index 3f2549828c115..b5e191909e3a8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java @@ -491,10 +491,9 @@ public void testCreateSplitWithIndexSort() throws Exception { // check that index sort cannot be set on the target index IllegalArgumentException exc = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareResizeIndex("source", "target") + indicesAdmin().prepareResizeIndex("source", "target") .setResizeType(ResizeType.SPLIT) .setSettings(indexSettings(4, 0).put("index.sort.field", "foo").build()) - .get() ); assertThat(exc.getMessage(), containsString("can't override index sort when resizing an index")); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksIT.java index ebffcff3a7152..0dd91b98e1acb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksIT.java @@ -71,7 +71,7 @@ public void testClusterBlockMessageHasIndexName() { ensureGreen("test"); updateIndexSettings(Settings.builder().put(IndexMetadata.SETTING_READ_ONLY_ALLOW_DELETE, true), "test"); IndexRequestBuilder indexRequestBuilder = prepareIndex("test").setId("1").setSource("foo", "bar"); - ClusterBlockException e = expectThrows(ClusterBlockException.class, indexRequestBuilder::get); + ClusterBlockException e = expectThrows(ClusterBlockException.class, indexRequestBuilder); indexRequestBuilder.request().decRef(); assertEquals( "index [test] blocked by: [TOO_MANY_REQUESTS/12/disk usage exceeded flood-stage watermark, " diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerIT.java index d53b7241d12c5..eef0341ee8f78 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerIT.java @@ -67,12 +67,12 @@ public static class EngineTestPlugin extends Plugin implements EnginePlugin { public Optional getEngineFactory(IndexSettings indexSettings) { return Optional.of(config -> new InternalEngine(config) { @Override - public void flush(boolean force, boolean waitIfOngoing, ActionListener listener) throws EngineException { + protected void flushHoldingLock(boolean force, boolean waitIfOngoing, ActionListener listener) { final ShardId shardId = config.getShardId(); if (failOnFlushShards.contains(shardId)) { listener.onFailure(new EngineException(shardId, "simulated IO")); } else { - super.flush(force, waitIfOngoing, listener); + super.flushHoldingLock(force, waitIfOngoing, listener); } } }); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/mapping/put/ValidateMappingRequestPluginIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/mapping/put/ValidateMappingRequestPluginIT.java index 9297cf9a60282..e94b0a6e0fb76 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/mapping/put/ValidateMappingRequestPluginIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/mapping/put/ValidateMappingRequestPluginIT.java @@ -57,7 +57,7 @@ public void testValidateMappingRequest() { { String origin = randomFrom("", "3", "4", "5"); PutMappingRequest request = new PutMappingRequest().indices("index_1").source("t1", "type=keyword").origin(origin); - Exception e = expectThrows(IllegalStateException.class, () -> indicesAdmin().putMapping(request).actionGet()); + Exception e = expectThrows(IllegalStateException.class, indicesAdmin().putMapping(request)); assertThat(e.getMessage(), equalTo("not allowed: index[index_1] origin[" + origin + "]")); } { @@ -70,7 +70,7 @@ public void testValidateMappingRequest() { { String origin = randomFrom("", "1", "4", "5"); PutMappingRequest request = new PutMappingRequest().indices("index_2").source("t2", "type=keyword").origin(origin); - Exception e = expectThrows(IllegalStateException.class, () -> indicesAdmin().putMapping(request).actionGet()); + Exception e = expectThrows(IllegalStateException.class, indicesAdmin().putMapping(request)); assertThat(e.getMessage(), equalTo("not allowed: index[index_2] origin[" + origin + "]")); } { @@ -83,7 +83,7 @@ public void testValidateMappingRequest() { { String origin = randomFrom("", "1", "3", "4"); PutMappingRequest request = new PutMappingRequest().indices("*").source("t3", "type=keyword").origin(origin); - Exception e = expectThrows(IllegalStateException.class, () -> indicesAdmin().putMapping(request).actionGet()); + Exception e = expectThrows(IllegalStateException.class, indicesAdmin().putMapping(request)); assertThat(e.getMessage(), containsString("not allowed:")); } { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java index 7ae7fc5c4a180..94b4bcb62885a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java @@ -155,7 +155,7 @@ public void testRolloverWithNoWriteIndex() { } IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareRolloverIndex("alias").dryRun(randomBoolean()).get() + indicesAdmin().prepareRolloverIndex("alias").dryRun(randomBoolean()) ); assertThat(exception.getMessage(), equalTo("rollover target [alias] does not point to a write index")); } @@ -590,7 +590,7 @@ public void testRejectIfAliasFoundInTemplate() throws Exception { ensureYellow("logs-write"); final IllegalArgumentException error = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareRolloverIndex("logs-write").get() + indicesAdmin().prepareRolloverIndex("logs-write") ); assertThat( error.getMessage(), @@ -777,7 +777,7 @@ public void testMultiThreadedRollover() throws Exception { }); // We should *NOT* have a third index, it should have rolled over *exactly* once - expectThrows(Exception.class, () -> indicesAdmin().prepareGetIndex().addIndices(writeIndexPrefix + "000003").get()); + expectThrows(Exception.class, indicesAdmin().prepareGetIndex().addIndices(writeIndexPrefix + "000003")); } public void testRolloverConcurrently() throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java index 615dc4636dec7..8c2c4a33cf769 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestIT.java @@ -31,6 +31,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.TimeUnit; import java.util.function.Predicate; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -48,9 +49,13 @@ protected Collection> nodePlugins() { return Arrays.asList(MockFSIndexStore.TestPlugin.class); } + private static IndicesShardStoresResponse execute(IndicesShardStoresRequest request) { + return client().execute(TransportIndicesShardStoresAction.TYPE, request).actionGet(10, TimeUnit.SECONDS); + } + public void testEmpty() { ensureGreen(); - IndicesShardStoresResponse rsp = indicesAdmin().prepareShardStores().get(); + IndicesShardStoresResponse rsp = execute(new IndicesShardStoresRequest()); assertThat(rsp.getStoreStatuses().size(), equalTo(0)); } @@ -62,11 +67,11 @@ public void testBasic() throws Exception { ensureGreen(index); // no unallocated shards - IndicesShardStoresResponse response = indicesAdmin().prepareShardStores(index).get(); + IndicesShardStoresResponse response = execute(new IndicesShardStoresRequest(index)); assertThat(response.getStoreStatuses().size(), equalTo(0)); // all shards - response = indicesAdmin().shardStores(new IndicesShardStoresRequest(index).shardStatuses("all")).get(); + response = execute(new IndicesShardStoresRequest(index).shardStatuses("all")); assertThat(response.getStoreStatuses().containsKey(index), equalTo(true)); Map> shardStores = response.getStoreStatuses().get(index); assertThat(shardStores.size(), equalTo(2)); @@ -88,7 +93,7 @@ public void testBasic() throws Exception { assertNoTimeout(clusterAdmin().prepareHealth().setWaitForNodes("" + (num - 1))); ClusterState clusterState = clusterAdmin().prepareState().get().getState(); List unassignedShards = clusterState.routingTable().index(index).shardsWithState(ShardRoutingState.UNASSIGNED); - response = indicesAdmin().shardStores(new IndicesShardStoresRequest(index)).get(); + response = execute(new IndicesShardStoresRequest(index)); assertThat(response.getStoreStatuses().containsKey(index), equalTo(true)); Map> shardStoresStatuses = response.getStoreStatuses().get(index); assertThat(shardStoresStatuses.size(), equalTo(unassignedShards.size())); @@ -108,14 +113,17 @@ public void testIndices() throws Exception { String index1 = "test1"; String index2 = "test2"; internalCluster().ensureAtLeastNumDataNodes(2); - assertAcked(prepareCreate(index1).setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "2"))); - assertAcked(prepareCreate(index2).setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "2"))); + for (final var index : List.of(index1, index2)) { + final var settings = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2); + if (randomBoolean()) { + settings.put(IndexMetadata.SETTING_INDEX_HIDDEN, randomBoolean()); + } + assertAcked(prepareCreate(index).setSettings(settings)); + } indexRandomData(index1); indexRandomData(index2); ensureGreen(); - IndicesShardStoresResponse response = indicesAdmin().shardStores( - new IndicesShardStoresRequest(new String[] {}).shardStatuses("all") - ).get(); + IndicesShardStoresResponse response = execute(new IndicesShardStoresRequest(new String[] {}).shardStatuses("all")); Map>> shardStatuses = response.getStoreStatuses(); assertThat(shardStatuses.containsKey(index1), equalTo(true)); assertThat(shardStatuses.containsKey(index2), equalTo(true)); @@ -123,7 +131,7 @@ public void testIndices() throws Exception { assertThat(shardStatuses.get(index2).size(), equalTo(2)); // ensure index filtering works - response = indicesAdmin().shardStores(new IndicesShardStoresRequest(index1).shardStatuses("all")).get(); + response = execute(new IndicesShardStoresRequest(index1).shardStatuses("all")); shardStatuses = response.getStoreStatuses(); assertThat(shardStatuses.containsKey(index1), equalTo(true)); assertThat(shardStatuses.containsKey(index2), equalTo(false)); @@ -170,7 +178,7 @@ public void testCorruptedShards() throws Exception { } assertBusy(() -> { // IndicesClusterStateService#failAndRemoveShard() called asynchronously but we need it to have completed here. - IndicesShardStoresResponse rsp = indicesAdmin().prepareShardStores(index).setShardStatuses("all").get(); + IndicesShardStoresResponse rsp = execute(new IndicesShardStoresRequest(index).shardStatuses("all")); Map> shardStatuses = rsp.getStoreStatuses().get(index); assertNotNull(shardStatuses); assertThat(shardStatuses.size(), greaterThan(0)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java index 944e0f4f6191f..1ab14678e9b61 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/bulk/BulkIntegrationIT.java @@ -133,7 +133,7 @@ public void testBulkWithGlobalDefaults() throws Exception { String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk-missing-index-type.json"); try (BulkRequestBuilder bulkBuilder = client().prepareBulk()) { bulkBuilder.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, XContentType.JSON); - ActionRequestValidationException ex = expectThrows(ActionRequestValidationException.class, bulkBuilder::get); + ActionRequestValidationException ex = expectThrows(ActionRequestValidationException.class, bulkBuilder); assertThat(ex.validationErrors(), containsInAnyOrder("index is missing", "index is missing", "index is missing")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java index c1ee153a1fa9b..e573bae146e09 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/search/PointInTimeIT.java @@ -84,7 +84,7 @@ public void testBasic() { } refresh("test"); String pitId = openPointInTime(new String[] { "test" }, TimeValue.timeValueMinutes(2)); - assertResponse(prepareSearch().setPreference(null).setPointInTime(new PointInTimeBuilder(pitId)), resp1 -> { + assertResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), resp1 -> { assertThat(resp1.pointInTimeId(), equalTo(pitId)); assertHitCount(resp1, numDocs); }); @@ -100,13 +100,13 @@ public void testBasic() { if (randomBoolean()) { final int delDocCount = deletedDocs; assertNoFailuresAndResponse( - prepareSearch("test").setPreference(null).setQuery(new MatchAllQueryBuilder()), + prepareSearch("test").setQuery(new MatchAllQueryBuilder()), resp2 -> assertHitCount(resp2, numDocs - delDocCount) ); } try { assertNoFailuresAndResponse( - prepareSearch().setPreference(null).setQuery(new MatchAllQueryBuilder()).setPointInTime(new PointInTimeBuilder(pitId)), + prepareSearch().setQuery(new MatchAllQueryBuilder()).setPointInTime(new PointInTimeBuilder(pitId)), resp3 -> { assertHitCount(resp3, numDocs); assertThat(resp3.pointInTimeId(), equalTo(pitId)); @@ -132,7 +132,7 @@ public void testMultipleIndices() { String pitId = openPointInTime(new String[] { "*" }, TimeValue.timeValueMinutes(2)); try { int moreDocs = randomIntBetween(10, 50); - assertNoFailuresAndResponse(prepareSearch().setPreference(null).setPointInTime(new PointInTimeBuilder(pitId)), resp -> { + assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), resp -> { assertHitCount(resp, numDocs); assertNotNull(resp.pointInTimeId()); assertThat(resp.pointInTimeId(), equalTo(pitId)); @@ -144,7 +144,7 @@ public void testMultipleIndices() { refresh(); }); assertNoFailuresAndResponse(prepareSearch(), resp -> assertHitCount(resp, numDocs + moreDocs)); - assertNoFailuresAndResponse(prepareSearch().setPreference(null).setPointInTime(new PointInTimeBuilder(pitId)), resp -> { + assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), resp -> { assertHitCount(resp, numDocs); assertNotNull(resp.pointInTimeId()); assertThat(resp.pointInTimeId(), equalTo(pitId)); @@ -213,7 +213,7 @@ public void testRelocation() throws Exception { refresh(); String pitId = openPointInTime(new String[] { "test" }, TimeValue.timeValueMinutes(2)); try { - assertNoFailuresAndResponse(prepareSearch().setPreference(null).setPointInTime(new PointInTimeBuilder(pitId)), resp -> { + assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), resp -> { assertHitCount(resp, numDocs); assertThat(resp.pointInTimeId(), equalTo(pitId)); }); @@ -233,7 +233,7 @@ public void testRelocation() throws Exception { } refresh(); } - assertNoFailuresAndResponse(prepareSearch().setPreference(null).setPointInTime(new PointInTimeBuilder(pitId)), resp -> { + assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), resp -> { assertHitCount(resp, numDocs); assertThat(resp.pointInTimeId(), equalTo(pitId)); }); @@ -246,7 +246,7 @@ public void testRelocation() throws Exception { .collect(Collectors.toSet()); assertThat(assignedNodes, everyItem(not(in(excludedNodes)))); }, 30, TimeUnit.SECONDS); - assertNoFailuresAndResponse(prepareSearch().setPreference(null).setPointInTime(new PointInTimeBuilder(pitId)), resp -> { + assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), resp -> { assertHitCount(resp, numDocs); assertThat(resp.pointInTimeId(), equalTo(pitId)); }); @@ -264,7 +264,7 @@ public void testPointInTimeNotFound() throws Exception { } refresh(); String pit = openPointInTime(new String[] { "index" }, TimeValue.timeValueSeconds(5)); - assertNoFailuresAndResponse(prepareSearch().setPreference(null).setPointInTime(new PointInTimeBuilder(pit)), resp1 -> { + assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pit)), resp1 -> { assertHitCount(resp1, index1); if (rarely()) { try { @@ -281,7 +281,7 @@ public void testPointInTimeNotFound() throws Exception { }); SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch().setPreference(null).setPointInTime(new PointInTimeBuilder(pit)).get() + prepareSearch().setPointInTime(new PointInTimeBuilder(pit)) ); for (ShardSearchFailure failure : e.shardFailures()) { assertThat(ExceptionsHelper.unwrapCause(failure.getCause()), instanceOf(SearchContextMissingException.class)); @@ -307,7 +307,7 @@ public void testIndexNotFound() { String pit = openPointInTime(new String[] { "index-*" }, TimeValue.timeValueMinutes(2)); try { assertNoFailuresAndResponse( - prepareSearch().setPreference(null).setPointInTime(new PointInTimeBuilder(pit)), + prepareSearch().setPointInTime(new PointInTimeBuilder(pit)), resp -> assertHitCount(resp, index1 + index2) ); indicesAdmin().prepareDelete("index-1").get(); @@ -316,21 +316,15 @@ public void testIndexNotFound() { } // Allow partial search result - assertResponse( - prepareSearch().setPreference(null).setAllowPartialSearchResults(true).setPointInTime(new PointInTimeBuilder(pit)), - resp -> { - assertFailures(resp); - assertHitCount(resp, index2); - } - ); + assertResponse(prepareSearch().setAllowPartialSearchResults(true).setPointInTime(new PointInTimeBuilder(pit)), resp -> { + assertFailures(resp); + assertHitCount(resp, index2); + }); // Do not allow partial search result expectThrows( ElasticsearchException.class, - () -> prepareSearch().setPreference(null) - .setAllowPartialSearchResults(false) - .setPointInTime(new PointInTimeBuilder(pit)) - .get() + prepareSearch().setAllowPartialSearchResults(false).setPointInTime(new PointInTimeBuilder(pit)) ); } finally { closePointInTime(pit); @@ -366,7 +360,6 @@ public void testCanMatch() throws Exception { assertResponse( prepareSearch().setQuery(new RangeQueryBuilder("created_date").gte("2020-01-02").lte("2020-01-03")) .setSearchType(SearchType.QUERY_THEN_FETCH) - .setPreference(null) .setPreFilterShardSize(randomIntBetween(2, 3)) .setMaxConcurrentShardRequests(randomIntBetween(1, 2)) .setPointInTime(new PointInTimeBuilder(pitId)), @@ -423,20 +416,17 @@ public void testPartialResults() throws Exception { refresh(); String pitId = openPointInTime(new String[] { "test-*" }, TimeValue.timeValueMinutes(2)); try { - assertNoFailuresAndResponse(prepareSearch().setPreference(null).setPointInTime(new PointInTimeBuilder(pitId)), resp -> { + assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), resp -> { assertHitCount(resp, numDocs1 + numDocs2); assertThat(resp.pointInTimeId(), equalTo(pitId)); }); internalCluster().restartNode(assignedNodeForIndex1); - assertResponse( - prepareSearch().setPreference(null).setAllowPartialSearchResults(true).setPointInTime(new PointInTimeBuilder(pitId)), - resp -> { - assertFailures(resp); - assertThat(resp.pointInTimeId(), equalTo(pitId)); - assertHitCount(resp, numDocs2); - } - ); + assertResponse(prepareSearch().setAllowPartialSearchResults(true).setPointInTime(new PointInTimeBuilder(pitId)), resp -> { + assertFailures(resp); + assertThat(resp.pointInTimeId(), equalTo(pitId)); + assertHitCount(resp, numDocs2); + }); } finally { closePointInTime(pitId); } @@ -487,10 +477,7 @@ public void testPITTiebreak() throws Exception { } public void testCloseInvalidPointInTime() { - expectThrows( - Exception.class, - () -> client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest("")).actionGet() - ); + expectThrows(Exception.class, client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(""))); List tasks = clusterAdmin().prepareListTasks().setActions(TransportClosePointInTimeAction.TYPE.name()).get().getTasks(); assertThat(tasks, empty()); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/search/TransportSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/search/TransportSearchIT.java index 71927df290867..cc222de8a0d38 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/search/TransportSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/search/TransportSearchIT.java @@ -325,8 +325,7 @@ public void testWaitForRefreshIndexValidation() throws Exception { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> prepareSearch("testFailedAlias").setWaitForCheckpoints(Collections.singletonMap("testFailedAlias", validCheckpoints)) - .get() + prepareSearch("testFailedAlias").setWaitForCheckpoints(Collections.singletonMap("testFailedAlias", validCheckpoints)) ); assertThat( e.getMessage(), @@ -338,7 +337,7 @@ public void testWaitForRefreshIndexValidation() throws Exception { IllegalArgumentException e2 = expectThrows( IllegalArgumentException.class, - () -> prepareSearch("test1").setWaitForCheckpoints(Collections.singletonMap("test1", new long[2])).get() + prepareSearch("test1").setWaitForCheckpoints(Collections.singletonMap("test1", new long[2])) ); assertThat( e2.getMessage(), @@ -352,7 +351,7 @@ public void testWaitForRefreshIndexValidation() throws Exception { IllegalArgumentException e3 = expectThrows( IllegalArgumentException.class, - () -> prepareSearch("testAlias").setWaitForCheckpoints(Collections.singletonMap("testAlias", new long[2])).get() + prepareSearch("testAlias").setWaitForCheckpoints(Collections.singletonMap("testAlias", new long[2])) ); assertThat( e3.getMessage(), @@ -366,7 +365,7 @@ public void testWaitForRefreshIndexValidation() throws Exception { IllegalArgumentException e4 = expectThrows( IllegalArgumentException.class, - () -> prepareSearch("testAlias").setWaitForCheckpoints(Collections.singletonMap("test2", validCheckpoints)).get() + prepareSearch("testAlias").setWaitForCheckpoints(Collections.singletonMap("test2", validCheckpoints)) ); assertThat( e4.getMessage(), @@ -389,7 +388,7 @@ public void testShardCountLimit() throws Exception { updateClusterSettings(Settings.builder().put(TransportSearchAction.SHARD_COUNT_LIMIT_SETTING.getKey(), numPrimaries1 - 1)); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> prepareSearch("test1").get()); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, prepareSearch("test1")); assertThat( e.getMessage(), containsString("Trying to query " + numPrimaries1 + " shards, which is over the limit of " + (numPrimaries1 - 1)) @@ -400,7 +399,7 @@ public void testShardCountLimit() throws Exception { // no exception prepareSearch("test1").get().decRef(); - e = expectThrows(IllegalArgumentException.class, () -> prepareSearch("test1", "test2").get()); + e = expectThrows(IllegalArgumentException.class, prepareSearch("test1", "test2")); assertThat( e.getMessage(), containsString( @@ -490,7 +489,7 @@ public void onFailure(Exception e) { assertBusy(() -> { Exception exc = expectThrows( Exception.class, - () -> client.prepareSearch("test").addAggregation(new TestAggregationBuilder("test")).get().decRef() + client.prepareSearch("test").addAggregation(new TestAggregationBuilder("test")) ); assertThat(exc.getCause().getMessage(), containsString("")); }); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java index 9e4a13d8d3b4f..8b442c0d6dfcf 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java @@ -43,7 +43,6 @@ import java.util.concurrent.ExecutionException; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -423,7 +422,7 @@ public void testDuelESLucene() throws Exception { for (TestConfig test : testConfigs) { TermVectorsRequestBuilder request = getRequestForConfig(test); if (test.expectedException != null) { - assertRequestBuilderThrows(request, test.expectedException); + expectThrows(test.expectedException, request); continue; } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/aliases/IndexAliasesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/aliases/IndexAliasesIT.java index 0af4c24d7661c..2182d7d775359 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/aliases/IndexAliasesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/aliases/IndexAliasesIT.java @@ -20,7 +20,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; -import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.AliasMetadata; import org.elasticsearch.cluster.metadata.IndexAbstraction; @@ -91,10 +90,7 @@ public void testAliases() throws Exception { { logger.info("--> indexing against [alias1], should fail now"); IndexRequest indexRequest = new IndexRequest("alias1").id("1").source(source("2", "test"), XContentType.JSON); - IllegalArgumentException exception = expectThrows( - IllegalArgumentException.class, - () -> client().index(indexRequest).actionGet() - ); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> client().index(indexRequest)); assertThat( exception.getMessage(), equalTo( @@ -129,7 +125,7 @@ public void testAliases() throws Exception { { logger.info("--> indexing against [alias1], should fail now"); IndexRequest indexRequest = new IndexRequest("alias1").id("1").source(source("2", "test"), XContentType.JSON); - Exception exception = expectThrows(IllegalArgumentException.class, () -> client().index(indexRequest).actionGet()); + Exception exception = expectThrows(IllegalArgumentException.class, () -> client().index(indexRequest)); assertThat( exception.getMessage(), equalTo( @@ -143,10 +139,7 @@ public void testAliases() throws Exception { { logger.info("--> deleting against [alias1], should fail now"); - Exception exception = expectThrows( - IllegalArgumentException.class, - () -> client().delete(new DeleteRequest("alias1").id("1")).actionGet() - ); + Exception exception = expectThrows(IllegalArgumentException.class, () -> client().delete(new DeleteRequest("alias1").id("1"))); assertThat( exception.getMessage(), equalTo( @@ -199,17 +192,11 @@ public void testFailedFilter() throws Exception { createIndex("test"); // invalid filter, invalid json - Exception e = expectThrows( - IllegalArgumentException.class, - () -> indicesAdmin().prepareAliases().addAlias("test", "alias1", "abcde").get() - ); + Exception e = expectThrows(IllegalArgumentException.class, indicesAdmin().prepareAliases().addAlias("test", "alias1", "abcde")); assertThat(e.getMessage(), equalTo("failed to parse filter for alias [alias1]")); // valid json , invalid filter - e = expectThrows( - IllegalArgumentException.class, - () -> indicesAdmin().prepareAliases().addAlias("test", "alias1", "{ \"test\": {} }").get() - ); + e = expectThrows(IllegalArgumentException.class, indicesAdmin().prepareAliases().addAlias("test", "alias1", "{ \"test\": {} }")); assertThat(e.getMessage(), equalTo("failed to parse filter for alias [alias1]")); } @@ -240,7 +227,7 @@ public void testEmptyFilter() throws Exception { logger.info("--> aliasing index [test] with [alias1] and empty filter"); IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareAliases().addAlias("test", "alias1", "{}").get() + indicesAdmin().prepareAliases().addAlias("test", "alias1", "{}") ); assertEquals("failed to parse filter for alias [alias1]", iae.getMessage()); } @@ -684,7 +671,7 @@ public void testDeleteAliases() throws Exception { assertFalse(indicesAdmin().prepareGetAliases("foo").setIndices("bar_bar").get().getAliases().isEmpty()); IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareAliases().addAliasAction(AliasActions.remove().index("foo").alias("foo")).get() + indicesAdmin().prepareAliases().addAliasAction(AliasActions.remove().index("foo").alias("foo")) ); assertEquals( "The provided expression [foo] matches an alias, specify the corresponding concrete indices instead.", @@ -1097,7 +1084,7 @@ public void testAliasesCanBeAddedToIndicesOnly() throws Exception { IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index("week_20").alias("tmp")).get() + indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index("week_20").alias("tmp")) ); assertEquals( "The provided expression [week_20] matches an alias, specify the corresponding concrete indices instead.", @@ -1218,10 +1205,7 @@ public void testAliasActionRemoveIndex() throws InterruptedException, ExecutionE assertAcked(indicesAdmin().prepareAliases().addAlias("bar_bar", "foo")); }); - IllegalArgumentException iae = expectThrows( - IllegalArgumentException.class, - () -> indicesAdmin().prepareAliases().removeIndex("foo").get() - ); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, indicesAdmin().prepareAliases().removeIndex("foo")); assertEquals( "The provided expression [foo] matches an alias, specify the corresponding concrete indices instead.", iae.getMessage() @@ -1258,11 +1242,10 @@ public void testHiddenAliasesMustBeConsistent() { assertAcked(indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index(index1).alias(alias))); - IllegalStateException ex = expectThrows(IllegalStateException.class, () -> { - AcknowledgedResponse res = indicesAdmin().prepareAliases() - .addAliasAction(AliasActions.add().index(index2).alias(alias).isHidden(true)) - .get(); - }); + IllegalStateException ex = expectThrows( + IllegalStateException.class, + indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index(index2).alias(alias).isHidden(true)) + ); logger.error("exception: {}", ex.getMessage()); assertThat(ex.getMessage(), containsString("has is_hidden set to true on indices")); @@ -1270,18 +1253,18 @@ public void testHiddenAliasesMustBeConsistent() { assertAcked(indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index(index1).alias(alias).isHidden(false))); expectThrows( IllegalStateException.class, - () -> indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index(index2).alias(alias).isHidden(true)).get() + indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index(index2).alias(alias).isHidden(true)) ); assertAcked(indicesAdmin().prepareAliases().addAliasAction(AliasActions.remove().index(index1).alias(alias))); assertAcked(indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index(index1).alias(alias).isHidden(true))); expectThrows( IllegalStateException.class, - () -> indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index(index2).alias(alias).isHidden(false)).get() + indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index(index2).alias(alias).isHidden(false)) ); expectThrows( IllegalStateException.class, - () -> indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index(index2).alias(alias)).get() + indicesAdmin().prepareAliases().addAliasAction(AliasActions.add().index(index2).alias(alias)) ); // Both visible @@ -1367,7 +1350,7 @@ public void testCreateIndexAndAliasWithSameNameFails() { final String indexName = "index-name"; final IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareCreate(indexName).addAlias(new Alias(indexName)).get() + indicesAdmin().prepareCreate(indexName).addAlias(new Alias(indexName)) ); assertEquals("alias name [" + indexName + "] self-conflicts with index name", iae.getMessage()); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/blocks/SimpleBlocksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/blocks/SimpleBlocksIT.java index ab1c406feb383..5d4f0b3671ca5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/blocks/SimpleBlocksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/blocks/SimpleBlocksIT.java @@ -12,7 +12,6 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; -import org.elasticsearch.action.admin.indices.readonly.AddIndexBlockRequestBuilder; import org.elasticsearch.action.admin.indices.readonly.AddIndexBlockResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.support.ActiveShardCount; @@ -196,10 +195,7 @@ public void testAddBlocksWhileExistingBlocks() { } public void testAddBlockToMissingIndex() { - IndexNotFoundException e = expectThrows( - IndexNotFoundException.class, - () -> indicesAdmin().prepareAddBlock(randomAddableBlock(), "test").get() - ); + IndexNotFoundException e = expectThrows(IndexNotFoundException.class, indicesAdmin().prepareAddBlock(randomAddableBlock(), "test")); assertThat(e.getMessage(), is("no such index [test]")); } @@ -207,7 +203,7 @@ public void testAddBlockToOneMissingIndex() { createIndex("test1"); final IndexNotFoundException e = expectThrows( IndexNotFoundException.class, - () -> indicesAdmin().prepareAddBlock(randomAddableBlock(), "test1", "test2").get() + indicesAdmin().prepareAddBlock(randomAddableBlock(), "test1", "test2") ); assertThat(e.getMessage(), is("no such index [test2]")); } @@ -226,7 +222,7 @@ public void testCloseOneMissingIndexIgnoreMissing() throws Exception { public void testAddBlockNoIndex() { final ActionRequestValidationException e = expectThrows( ActionRequestValidationException.class, - () -> indicesAdmin().prepareAddBlock(randomAddableBlock()).get() + indicesAdmin().prepareAddBlock(randomAddableBlock()) ); assertThat(e.getMessage(), containsString("index is missing")); } @@ -237,8 +233,10 @@ public void testAddBlockNullIndex() { public void testCannotAddReadOnlyAllowDeleteBlock() { createIndex("test1"); - final AddIndexBlockRequestBuilder request = indicesAdmin().prepareAddBlock(APIBlock.READ_ONLY_ALLOW_DELETE, "test1"); - final ActionRequestValidationException e = expectThrows(ActionRequestValidationException.class, request::get); + final ActionRequestValidationException e = expectThrows( + ActionRequestValidationException.class, + indicesAdmin().prepareAddBlock(APIBlock.READ_ONLY_ALLOW_DELETE, "test1") + ); assertThat(e.getMessage(), containsString("read_only_allow_delete block is for internal use only")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterHealthIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterHealthIT.java index 214e3f73144d9..7a8accf8cc7ce 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterHealthIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterHealthIT.java @@ -46,8 +46,8 @@ public void testSimpleLocalHealth() { .prepareHealth() .setLocal(true) .setWaitForEvents(Priority.LANGUID) - .setTimeout("30s") - .get("10s"); + .setTimeout(TimeValue.timeValueSeconds(30)) + .get(TimeValue.timeValueSeconds(10)); logger.info("--> got cluster health on [{}]", node); assertFalse("timed out on " + node, health.isTimedOut()); assertThat("health status on " + node, health.getStatus(), equalTo(ClusterHealthStatus.GREEN)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/NoMasterNodeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/NoMasterNodeIT.java index d37bcb842b5e2..811e99fc236ae 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/NoMasterNodeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/NoMasterNodeIT.java @@ -360,14 +360,11 @@ public void testNoMasterActionsMetadataWriteMasterBlock() throws Exception { GetResponse getResponse = client(randomFrom(nodesWithShards)).prepareGet("test1", "1").get(); assertExists(getResponse); - expectThrows(Exception.class, () -> client(partitionedNode).prepareGet("test1", "1").get()); + expectThrows(Exception.class, client(partitionedNode).prepareGet("test1", "1")); assertHitCount(client(randomFrom(nodesWithShards)).prepareSearch("test1").setAllowPartialSearchResults(true).setSize(0), 1L); - expectThrows( - Exception.class, - () -> client(partitionedNode).prepareSearch("test1").setAllowPartialSearchResults(true).setSize(0).get() - ); + expectThrows(Exception.class, client(partitionedNode).prepareSearch("test1").setAllowPartialSearchResults(true).setSize(0)); TimeValue timeout = TimeValue.timeValueMillis(200); { @@ -377,6 +374,12 @@ public void testNoMasterActionsMetadataWriteMasterBlock() throws Exception { updateRequestBuilder.get(); updateRequestBuilder.request().decRef(); } + { + UpdateRequestBuilder updateRequestBuilder = client(partitionedNode).prepareUpdate("test1", "1") + .setDoc(Requests.INDEX_CONTENT_TYPE, "field", "value2") + .setTimeout(timeout); + expectThrows(Exception.class, updateRequestBuilder); + } expectThrows(Exception.class, () -> { UpdateRequestBuilder updateRequestBuilder = client(partitionedNode).prepareUpdate("test1", "1") @@ -398,44 +401,34 @@ public void testNoMasterActionsMetadataWriteMasterBlock() throws Exception { indexRequestBuilder.request().decRef(); } - // dynamic mapping updates fail - expectThrows(MasterNotDiscoveredException.class, () -> { + { + // dynamic mapping updates fail IndexRequestBuilder indexRequestBuilder = client(randomFrom(nodesWithShards)).prepareIndex("test1") .setId("1") .setSource(XContentFactory.jsonBuilder().startObject().field("new_field", "value").endObject()) .setTimeout(timeout); - try { - indexRequestBuilder.get(); - } finally { - indexRequestBuilder.request().decRef(); - } - }); + expectThrows(MasterNotDiscoveredException.class, indexRequestBuilder); + indexRequestBuilder.request().decRef(); + } - // dynamic index creation fails - expectThrows(MasterNotDiscoveredException.class, () -> { + { + // dynamic index creation fails IndexRequestBuilder indexRequestBuilder = client(randomFrom(nodesWithShards)).prepareIndex("test2") .setId("1") .setSource(XContentFactory.jsonBuilder().startObject().endObject()) .setTimeout(timeout); - try { - indexRequestBuilder.get(); - } finally { - indexRequestBuilder.request().decRef(); - } - }); + expectThrows(MasterNotDiscoveredException.class, indexRequestBuilder); + indexRequestBuilder.request().decRef(); + } - expectThrows(Exception.class, () -> { + { IndexRequestBuilder indexRequestBuilder = client(partitionedNode).prepareIndex("test1") .setId("1") .setSource(XContentFactory.jsonBuilder().startObject().endObject()) .setTimeout(timeout); - try { - indexRequestBuilder.get(); - } finally { - indexRequestBuilder.request().decRef(); - } - }); - + expectThrows(Exception.class, indexRequestBuilder); + indexRequestBuilder.request().decRef(); + } internalCluster().clearDisruptionScheme(true); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java index c4f06cc90fdf3..e6ea4823e86f0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java @@ -253,7 +253,7 @@ public void testForceAwarenessSettingValidation() { final IllegalArgumentException illegalArgumentException = expectThrows( IllegalArgumentException.class, - () -> clusterAdmin().prepareUpdateSettings().setPersistentSettings(Settings.builder().put(prefix + "nonsense", "foo")).get() + clusterAdmin().prepareUpdateSettings().setPersistentSettings(Settings.builder().put(prefix + "nonsense", "foo")) ); assertThat(illegalArgumentException.getMessage(), containsString("[cluster.routing.allocation.awareness.force.]")); assertThat(illegalArgumentException.getCause(), instanceOf(SettingsException.class)); @@ -262,9 +262,7 @@ public void testForceAwarenessSettingValidation() { assertThat( expectThrows( IllegalArgumentException.class, - () -> clusterAdmin().prepareUpdateSettings() - .setPersistentSettings(Settings.builder().put(prefix + "attr.not_values", "foo")) - .get() + clusterAdmin().prepareUpdateSettings().setPersistentSettings(Settings.builder().put(prefix + "attr.not_values", "foo")) ).getMessage(), containsString("[cluster.routing.allocation.awareness.force.attr.not_values]") ); @@ -272,9 +270,7 @@ public void testForceAwarenessSettingValidation() { assertThat( expectThrows( IllegalArgumentException.class, - () -> clusterAdmin().prepareUpdateSettings() - .setPersistentSettings(Settings.builder().put(prefix + "attr.values.junk", "foo")) - .get() + clusterAdmin().prepareUpdateSettings().setPersistentSettings(Settings.builder().put(prefix + "attr.values.junk", "foo")) ).getMessage(), containsString("[cluster.routing.allocation.awareness.force.attr.values.junk]") ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java index e5075361316ad..37be0a2b8dd1d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java @@ -192,9 +192,8 @@ public void testInvalidIPFilterClusterSettings() { ); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> clusterAdmin().prepareUpdateSettings() + clusterAdmin().prepareUpdateSettings() .setPersistentSettings(Settings.builder().put(filterSetting.getKey() + ipKey, "192.168.1.1.")) - .get() ); assertEquals("invalid IP address [192.168.1.1.] for [" + filterSetting.getKey() + ipKey + "]", e.getMessage()); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java index a893e28969257..003986daf9a1f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java @@ -10,7 +10,9 @@ import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequestBuilder; +import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; +import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; @@ -216,9 +218,7 @@ public void testFailedAllocationOfStalePrimaryToDataNodeWithNoData() throws Exce logger.info("--> force allocation of stale copy to node that does not have shard copy"); Throwable iae = expectThrows( IllegalArgumentException.class, - () -> clusterAdmin().prepareReroute() - .add(new AllocateStalePrimaryAllocationCommand("test", 0, dataNodeWithNoShardCopy, true)) - .get() + clusterAdmin().prepareReroute().add(new AllocateStalePrimaryAllocationCommand("test", 0, dataNodeWithNoShardCopy, true)) ); assertThat(iae.getMessage(), equalTo("No data for shard [0] of index [test] found on any node")); @@ -257,10 +257,10 @@ public void testForceStaleReplicaToBePromotedToPrimary() throws Exception { boolean useStaleReplica = randomBoolean(); // if true, use stale replica, otherwise a completely empty copy logger.info("--> explicitly promote old primary shard"); final String idxName = "test"; - Map> storeStatuses = indicesAdmin().prepareShardStores(idxName) - .get() - .getStoreStatuses() - .get(idxName); + Map> storeStatuses = client().execute( + TransportIndicesShardStoresAction.TYPE, + new IndicesShardStoresRequest(idxName) + ).get().getStoreStatuses().get(idxName); ClusterRerouteRequestBuilder rerouteBuilder = clusterAdmin().prepareReroute(); for (Map.Entry> shardStoreStatuses : storeStatuses.entrySet()) { int shardId = shardStoreStatuses.getKey(); @@ -342,7 +342,7 @@ public void testForceStaleReplicaToBePromotedToPrimaryOnWrongNode() throws Excep final int shardId = 0; final List nodeNames = new ArrayList<>(Arrays.asList(internalCluster().getNodeNames())); nodeNames.remove(master); - indicesAdmin().prepareShardStores(idxName) + client().execute(TransportIndicesShardStoresAction.TYPE, new IndicesShardStoresRequest(idxName)) .get() .getStoreStatuses() .get(idxName) @@ -352,9 +352,7 @@ public void testForceStaleReplicaToBePromotedToPrimaryOnWrongNode() throws Excep final String nodeWithoutData = nodeNames.get(0); Throwable iae = expectThrows( IllegalArgumentException.class, - () -> clusterAdmin().prepareReroute() - .add(new AllocateStalePrimaryAllocationCommand(idxName, shardId, nodeWithoutData, true)) - .get() + clusterAdmin().prepareReroute().add(new AllocateStalePrimaryAllocationCommand(idxName, shardId, nodeWithoutData, true)) ); assertThat( iae.getMessage(), @@ -372,9 +370,7 @@ public void testForceStaleReplicaToBePromotedForGreenIndex() { final int shardId = 0; IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, - () -> clusterAdmin().prepareReroute() - .add(new AllocateStalePrimaryAllocationCommand(idxName, shardId, nodeWithoutData, true)) - .get() + clusterAdmin().prepareReroute().add(new AllocateStalePrimaryAllocationCommand(idxName, shardId, nodeWithoutData, true)) ); assertThat(iae.getMessage(), equalTo("[allocate_stale_primary] primary [" + idxName + "][" + shardId + "] is already assigned")); } @@ -385,7 +381,7 @@ public void testForceStaleReplicaToBePromotedForMissingIndex() { final String idxName = "test"; IndexNotFoundException ex = expectThrows( IndexNotFoundException.class, - () -> clusterAdmin().prepareReroute().add(new AllocateStalePrimaryAllocationCommand(idxName, 0, dataNode, true)).get() + clusterAdmin().prepareReroute().add(new AllocateStalePrimaryAllocationCommand(idxName, 0, dataNode, true)) ); assertThat(ex.getIndex().getName(), equalTo(idxName)); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java index 9818b0a89bc8e..a142d594fe06e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/settings/ClusterSettingsIT.java @@ -369,21 +369,23 @@ private void testRemoveArchiveSettingsWithBlocks(boolean readOnly, boolean readO assertTrue(state.getMetadata().persistentSettings().getAsBoolean("archived.this.is.unknown", false)); // cannot remove read only block due to archived settings - final IllegalArgumentException e1 = expectThrows(IllegalArgumentException.class, () -> { + { Settings.Builder builder = Settings.builder(); clearOrSetFalse(builder, readOnly, Metadata.SETTING_READ_ONLY_SETTING); clearOrSetFalse(builder, readOnlyAllowDelete, Metadata.SETTING_READ_ONLY_ALLOW_DELETE_SETTING); - clusterAdmin().prepareUpdateSettings().setPersistentSettings(builder).setTransientSettings(builder).get(); - }); - assertTrue(e1.getMessage().contains("unknown setting [archived.this.is.unknown]")); + final IllegalArgumentException e1 = expectThrows( + IllegalArgumentException.class, + clusterAdmin().prepareUpdateSettings().setPersistentSettings(builder).setTransientSettings(builder) + ); + assertTrue(e1.getMessage().contains("unknown setting [archived.this.is.unknown]")); + } // fail to clear archived settings with non-archived settings final ClusterBlockException e2 = expectThrows( ClusterBlockException.class, - () -> clusterAdmin().prepareUpdateSettings() + clusterAdmin().prepareUpdateSettings() .setPersistentSettings(Settings.builder().putNull("cluster.routing.allocation.enable")) .setTransientSettings(Settings.builder().putNull("archived.*")) - .get() ); if (readOnly) { assertTrue(e2.getMessage().contains("cluster read-only (api)")); @@ -395,7 +397,7 @@ private void testRemoveArchiveSettingsWithBlocks(boolean readOnly, boolean readO // fail to clear archived settings due to cluster read only block final ClusterBlockException e3 = expectThrows( ClusterBlockException.class, - () -> clusterAdmin().prepareUpdateSettings().setPersistentSettings(Settings.builder().putNull("archived.*")).get() + clusterAdmin().prepareUpdateSettings().setPersistentSettings(Settings.builder().putNull("archived.*")) ); if (readOnly) { assertTrue(e3.getMessage().contains("cluster read-only (api)")); @@ -404,8 +406,8 @@ private void testRemoveArchiveSettingsWithBlocks(boolean readOnly, boolean readO assertTrue(e3.getMessage().contains("cluster read-only / allow delete (api)")); } - // fail to clear archived settings with adding cluster block - final ClusterBlockException e4 = expectThrows(ClusterBlockException.class, () -> { + { + // fail to clear archived settings with adding cluster block Settings.Builder builder = Settings.builder().putNull("archived.*"); if (randomBoolean()) { builder.put(Metadata.SETTING_READ_ONLY_SETTING.getKey(), "true"); @@ -415,27 +417,33 @@ private void testRemoveArchiveSettingsWithBlocks(boolean readOnly, boolean readO } else { builder.put(Metadata.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.getKey(), "true"); } - clusterAdmin().prepareUpdateSettings().setPersistentSettings(builder).get(); - }); - if (readOnly) { - assertTrue(e4.getMessage().contains("cluster read-only (api)")); - } - if (readOnlyAllowDelete) { - assertTrue(e4.getMessage().contains("cluster read-only / allow delete (api)")); + final ClusterBlockException e4 = expectThrows( + ClusterBlockException.class, + clusterAdmin().prepareUpdateSettings().setPersistentSettings(builder) + ); + if (readOnly) { + assertTrue(e4.getMessage().contains("cluster read-only (api)")); + } + if (readOnlyAllowDelete) { + assertTrue(e4.getMessage().contains("cluster read-only / allow delete (api)")); + } } - // fail to set archived settings to non-null value even with clearing blocks together - final ClusterBlockException e5 = expectThrows(ClusterBlockException.class, () -> { + { + // fail to set archived settings to non-null value even with clearing blocks together Settings.Builder builder = Settings.builder().put("archived.this.is.unknown", "false"); clearOrSetFalse(builder, readOnly, Metadata.SETTING_READ_ONLY_SETTING); clearOrSetFalse(builder, readOnlyAllowDelete, Metadata.SETTING_READ_ONLY_ALLOW_DELETE_SETTING); - clusterAdmin().prepareUpdateSettings().setPersistentSettings(builder).get(); - }); - if (readOnly) { - assertTrue(e5.getMessage().contains("cluster read-only (api)")); - } - if (readOnlyAllowDelete) { - assertTrue(e5.getMessage().contains("cluster read-only / allow delete (api)")); + final ClusterBlockException e5 = expectThrows( + ClusterBlockException.class, + clusterAdmin().prepareUpdateSettings().setPersistentSettings(builder) + ); + if (readOnly) { + assertTrue(e5.getMessage().contains("cluster read-only (api)")); + } + if (readOnlyAllowDelete) { + assertTrue(e5.getMessage().contains("cluster read-only / allow delete (api)")); + } } // we can clear read-only block with archived settings together @@ -536,7 +544,7 @@ private void testLoggerLevelUpdate(final BiConsumer throwBuilder.get()); + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, throwBuilder); assertEquals("Unknown level constant [BOOM].", e.getMessage()); try { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java index 2db8474993d31..2bc6856479ab7 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/shards/ClusterShardLimitIT.java @@ -123,10 +123,7 @@ public void testIndexCreationOverLimitFromTemplate() { .setSettings(indexSettings(counts.getFailingIndexShards(), counts.getFailingIndexReplicas())) ); - final IllegalArgumentException e = expectThrows( - IllegalArgumentException.class, - () -> indicesAdmin().prepareCreate("should-fail").get() - ); + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, indicesAdmin().prepareCreate("should-fail")); verifyException(dataNodes, counts, e); ClusterState clusterState = clusterAdmin().prepareState().get().getState(); assertFalse(clusterState.getMetadata().hasIndex("should-fail")); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java index 6c32ac1eca8b4..d9c8c1777e7c7 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/discovery/ClusterDisruptionIT.java @@ -109,7 +109,7 @@ static ConflictMode randomMode() { public void testAckedIndexing() throws Exception { final int seconds = (TEST_NIGHTLY && rarely()) == false ? 1 : 5; - final String timeout = seconds + "s"; + final TimeValue timeout = TimeValue.timeValueSeconds(seconds); final List nodes = startCluster(rarely() ? 5 : 3); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/env/NodeRepurposeCommandIT.java b/server/src/internalClusterTest/java/org/elasticsearch/env/NodeRepurposeCommandIT.java index 060b5f00f411a..439372690ce1e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/env/NodeRepurposeCommandIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/env/NodeRepurposeCommandIT.java @@ -81,7 +81,7 @@ public void testRepurpose() throws Exception { internalCluster().startCoordinatingOnlyNode(dataNodeDataPathSettings); assertTrue(indexExists(indexName)); - expectThrows(NoShardAvailableActionException.class, () -> client().prepareGet(indexName, "1").get()); + expectThrows(NoShardAvailableActionException.class, client().prepareGet(indexName, "1")); logger.info("--> Restarting and repurposing other node"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java index 3c03f84a0f1f8..bdf3ed138b287 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java @@ -416,7 +416,7 @@ public void testRecoverBrokenIndexMetadata() throws Exception { assertEquals(IndexMetadata.State.CLOSE, state.getMetadata().index(metadata.getIndex()).getState()); assertEquals("boolean", state.getMetadata().index(metadata.getIndex()).getSettings().get("archived.index.similarity.BM25.type")); // try to open it with the broken setting - fail again! - ElasticsearchException ex = expectThrows(ElasticsearchException.class, () -> indicesAdmin().prepareOpen("test").get()); + ElasticsearchException ex = expectThrows(ElasticsearchException.class, indicesAdmin().prepareOpen("test")); assertEquals(ex.getMessage(), "Failed to verify index " + metadata.getIndex()); assertNotNull(ex.getCause()); assertEquals(IllegalArgumentException.class, ex.getCause().getClass()); @@ -482,7 +482,7 @@ public void testRecoverMissingAnalyzer() throws Exception { indicesAdmin().prepareClose("test").get(); // try to open it with the broken setting - fail again! - ElasticsearchException ex = expectThrows(ElasticsearchException.class, () -> indicesAdmin().prepareOpen("test").get()); + ElasticsearchException ex = expectThrows(ElasticsearchException.class, indicesAdmin().prepareOpen("test")); assertEquals(ex.getMessage(), "Failed to verify index " + metadata.getIndex()); assertNotNull(ex.getCause()); assertEquals(MapperParsingException.class, ex.getCause().getClass()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java index a10e7598ec46e..56cd235931665 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/get/GetActionIT.java @@ -206,10 +206,7 @@ public void testGetWithAliasPointingToMultipleIndices() { DocWriteResponse indexResponse = index("index1", "id", Collections.singletonMap("foo", "bar")); assertThat(indexResponse.status().getStatus(), equalTo(RestStatus.CREATED.getStatus())); - IllegalArgumentException exception = expectThrows( - IllegalArgumentException.class, - () -> client().prepareGet("alias1", "_alias_id").get() - ); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, client().prepareGet("alias1", "_alias_id")); assertThat(exception.getMessage(), endsWith("can't execute a single index op")); } @@ -549,13 +546,13 @@ public void testGetFieldsNonLeafField() throws Exception { IllegalArgumentException exc = expectThrows( IllegalArgumentException.class, - () -> client().prepareGet(indexOrAlias(), "1").setStoredFields("field1").get() + client().prepareGet(indexOrAlias(), "1").setStoredFields("field1") ); assertThat(exc.getMessage(), equalTo("field [field1] isn't a leaf field")); flush(); - exc = expectThrows(IllegalArgumentException.class, () -> client().prepareGet(indexOrAlias(), "1").setStoredFields("field1").get()); + exc = expectThrows(IllegalArgumentException.class, client().prepareGet(indexOrAlias(), "1").setStoredFields("field1")); assertThat(exc.getMessage(), equalTo("field [field1] isn't a leaf field")); } @@ -829,7 +826,7 @@ void indexSingleDocumentWithStringFieldsGeneratedFromText(boolean stored, boolea } public void testGetRemoteIndex() { - IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> client().prepareGet("cluster:index", "id").get()); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, client().prepareGet("cluster:index", "id")); assertEquals( "Cross-cluster calls are not supported in this context but remote indices were requested: [cluster:index]", iae.getMessage() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/health/GetHealthActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/health/GetHealthActionIT.java index a31c3a08b8a4f..798f0e9bfb09f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/health/GetHealthActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/health/GetHealthActionIT.java @@ -27,13 +27,11 @@ import java.util.List; import java.util.Map; import java.util.NoSuchElementException; -import java.util.concurrent.ExecutionException; import java.util.stream.Stream; import static org.elasticsearch.common.util.CollectionUtils.appendToCopy; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.instanceOf; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) public class GetHealthActionIT extends ESIntegTestCase { @@ -172,16 +170,10 @@ public void testGetHealth() throws Exception { testIndicator(client, ilmIndicatorStatus, true); // Next, test that if we ask for a nonexistent indicator, we get an exception - { - ExecutionException exception = expectThrows( - ExecutionException.class, - () -> client.execute( - GetHealthAction.INSTANCE, - new GetHealthAction.Request(NONEXISTENT_INDICATOR_NAME, randomBoolean(), 1000) - ).get() - ); - assertThat(exception.getCause(), instanceOf(ResourceNotFoundException.class)); - } + expectThrows( + ResourceNotFoundException.class, + client.execute(GetHealthAction.INSTANCE, new GetHealthAction.Request(NONEXISTENT_INDICATOR_NAME, randomBoolean(), 1000)) + ); // Check health api stats { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/HiddenIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/HiddenIndexIT.java index ce560f17affe5..c95e638de8a20 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/HiddenIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/HiddenIndexIT.java @@ -123,10 +123,9 @@ public void testGlobalTemplatesDoNotApply() { public void testGlobalTemplateCannotMakeIndexHidden() { InvalidIndexTemplateException invalidIndexTemplateException = expectThrows( InvalidIndexTemplateException.class, - () -> indicesAdmin().preparePutTemplate("a_global_template") + indicesAdmin().preparePutTemplate("a_global_template") .setPatterns(List.of("*")) .setSettings(Settings.builder().put("index.hidden", randomBoolean()).build()) - .get() ); assertThat(invalidIndexTemplateException.getMessage(), containsString("global templates may not specify the setting index.hidden")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/IndexSortIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/IndexSortIT.java index 2c9c8ef615c27..7858735f88538 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/IndexSortIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/IndexSortIT.java @@ -80,25 +80,22 @@ public void testIndexSort() { public void testInvalidIndexSort() { IllegalArgumentException exc = expectThrows( IllegalArgumentException.class, - () -> prepareCreate("test").setSettings(Settings.builder().put(indexSettings()).putList("index.sort.field", "invalid_field")) + prepareCreate("test").setSettings(Settings.builder().put(indexSettings()).putList("index.sort.field", "invalid_field")) .setMapping(TEST_MAPPING) - .get() ); assertThat(exc.getMessage(), containsString("unknown index sort field:[invalid_field]")); exc = expectThrows( IllegalArgumentException.class, - () -> prepareCreate("test").setSettings(Settings.builder().put(indexSettings()).putList("index.sort.field", "numeric")) + prepareCreate("test").setSettings(Settings.builder().put(indexSettings()).putList("index.sort.field", "numeric")) .setMapping(TEST_MAPPING) - .get() ); assertThat(exc.getMessage(), containsString("docvalues not found for index sort field:[numeric]")); exc = expectThrows( IllegalArgumentException.class, - () -> prepareCreate("test").setSettings(Settings.builder().put(indexSettings()).putList("index.sort.field", "keyword")) + prepareCreate("test").setSettings(Settings.builder().put(indexSettings()).putList("index.sort.field", "keyword")) .setMapping(TEST_MAPPING) - .get() ); assertThat(exc.getMessage(), containsString("docvalues not found for index sort field:[keyword]")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/engine/MaxDocsLimitIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/engine/MaxDocsLimitIT.java index fa19774efe640..10d8f961a7eee 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/engine/MaxDocsLimitIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/engine/MaxDocsLimitIT.java @@ -103,10 +103,7 @@ public void testMaxDocsLimit() throws Exception { indexingResult = indexDocs(rejectedRequests, between(1, 8)); assertThat(indexingResult.numFailures, equalTo(rejectedRequests)); assertThat(indexingResult.numSuccess, equalTo(0)); - final IllegalArgumentException deleteError = expectThrows( - IllegalArgumentException.class, - () -> client().prepareDelete("test", "any-id").get() - ); + final IllegalArgumentException deleteError = expectThrows(IllegalArgumentException.class, client().prepareDelete("test", "any-id")); assertThat(deleteError.getMessage(), containsString("Number of documents in the index can't exceed [" + maxDocs.get() + "]")); indicesAdmin().prepareRefresh("test").get(); assertNoFailuresAndResponse( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java index bee5a240a5664..5373b420efde0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java @@ -24,7 +24,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.GeoBoundingBoxQueryBuilder; import org.elasticsearch.index.query.MatchQueryBuilder; import org.elasticsearch.plugins.Plugin; @@ -190,10 +189,10 @@ public void onFailure(Exception e) { }); masterBlockedLatch.await(); - final IndexRequestBuilder indexRequestBuilder = prepareIndex("index").setId("2").setSource("nested3", Map.of("foo", "bar")); try { assertThat( - expectThrows(IllegalArgumentException.class, () -> indexRequestBuilder.get(TimeValue.timeValueSeconds(10))).getMessage(), + expectThrows(IllegalArgumentException.class, prepareIndex("index").setId("2").setSource("nested3", Map.of("foo", "bar"))) + .getMessage(), Matchers.containsString("Limit of nested fields [2] has been exceeded") ); } finally { @@ -226,9 +225,8 @@ public void onFailure(Exception e) { }); masterBlockedLatch.await(); - final IndexRequestBuilder indexRequestBuilder = prepareIndex("index").setId("2").setSource("field2", "value2"); try { - Exception e = expectThrows(DocumentParsingException.class, () -> indexRequestBuilder.get(TimeValue.timeValueSeconds(10))); + Exception e = expectThrows(DocumentParsingException.class, prepareIndex("index").setId("2").setSource("field2", "value2")); assertThat(e.getMessage(), Matchers.containsString("failed to parse")); assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); assertThat( @@ -273,7 +271,7 @@ public void testTotalFieldsLimitWithRuntimeFields() { // introduction of a new object with 2 new sub-fields fails final IndexRequestBuilder indexRequestBuilder = prepareIndex("index1").setId("1") .setSource("field3", "value3", "my_object2", Map.of("new_field1", "value1", "new_field2", "value2")); - Exception exc = expectThrows(DocumentParsingException.class, () -> indexRequestBuilder.get(TimeValue.timeValueSeconds(10))); + Exception exc = expectThrows(DocumentParsingException.class, indexRequestBuilder); indexRequestBuilder.request().decRef(); assertThat(exc.getMessage(), Matchers.containsString("failed to parse")); assertThat(exc.getCause(), instanceOf(IllegalArgumentException.class)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java index d85fe9d4cc444..8d94a8d8d12f2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedFileIT.java @@ -21,7 +21,9 @@ import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; +import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.nodes.BaseNodeResponse; @@ -620,7 +622,10 @@ public void testReplicaCorruption() throws Exception { final Index index = resolveIndex("test"); - final IndicesShardStoresResponse stores = indicesAdmin().prepareShardStores(index.getName()).get(); + final IndicesShardStoresResponse stores = client().execute( + TransportIndicesShardStoresAction.TYPE, + new IndicesShardStoresRequest(index.getName()) + ).get(); for (Map.Entry> shards : stores.getStoreStatuses() .get(index.getName()) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedTranslogIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedTranslogIT.java index f70c3e344f378..207f7737080cc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedTranslogIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/store/CorruptedTranslogIT.java @@ -100,7 +100,7 @@ public void onAllNodesStopped() throws Exception { }); assertThat( - expectThrows(SearchPhaseExecutionException.class, () -> prepareSearch("test").setQuery(matchAllQuery()).get()).getMessage(), + expectThrows(SearchPhaseExecutionException.class, prepareSearch("test").setQuery(matchAllQuery())).getMessage(), containsString("all shards failed") ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java index 34b015408a0fc..20b2c27964d7b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndicesOptionsIntegrationIT.java @@ -408,9 +408,9 @@ public void testAllMissingLenient() throws Exception { public void testAllMissingStrict() throws Exception { createIndex("test1"); - expectThrows(IndexNotFoundException.class, () -> prepareSearch("test2").setQuery(matchAllQuery()).get()); + expectThrows(IndexNotFoundException.class, prepareSearch("test2").setQuery(matchAllQuery())); - expectThrows(IndexNotFoundException.class, () -> prepareSearch("test2", "test3").setQuery(matchAllQuery()).get()); + expectThrows(IndexNotFoundException.class, prepareSearch("test2", "test3").setQuery(matchAllQuery())); // you should still be able to run empty searches without things blowing up prepareSearch().setQuery(matchAllQuery()).get().decRef(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java index 9733b2408f886..e0b13d727caa3 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/analyze/AnalyzeActionIT.java @@ -77,9 +77,8 @@ public void testAnalyzeNumericField() throws IOException { assertAcked(prepareCreate("test").addAlias(new Alias("alias")).setMapping("long", "type=long", "double", "type=double")); ensureGreen("test"); - expectThrows(IllegalArgumentException.class, () -> indicesAdmin().prepareAnalyze(indexOrAlias(), "123").setField("long").get()); - - expectThrows(IllegalArgumentException.class, () -> indicesAdmin().prepareAnalyze(indexOrAlias(), "123.0").setField("double").get()); + expectThrows(IllegalArgumentException.class, indicesAdmin().prepareAnalyze(indexOrAlias(), "123").setField("long")); + expectThrows(IllegalArgumentException.class, indicesAdmin().prepareAnalyze(indexOrAlias(), "123.0").setField("double")); } public void testAnalyzeWithNoIndex() throws Exception { @@ -280,7 +279,7 @@ public void testDetailAnalyzeWithMultiValues() throws Exception { public void testNonExistTokenizer() { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareAnalyze("this is a test").setAnalyzer("not_exist_analyzer").get() + indicesAdmin().prepareAnalyze("this is a test").setAnalyzer("not_exist_analyzer") ); assertThat(e.getMessage(), startsWith("failed to find global analyzer")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/flush/FlushIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/flush/FlushIT.java index f27db0803da87..1451f3a1a4188 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/flush/FlushIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/flush/FlushIT.java @@ -95,10 +95,7 @@ public void testRejectIllegalFlushParameters() { indexRequestBuilder.request().decRef(); } assertThat( - expectThrows( - ValidationException.class, - () -> indicesAdmin().flush(new FlushRequest().force(true).waitIfOngoing(false)).actionGet() - ).getMessage(), + expectThrows(ValidationException.class, indicesAdmin().flush(new FlushRequest().force(true).waitIfOngoing(false))).getMessage(), containsString("wait_if_ongoing must be true for a force flush") ); assertThat(indicesAdmin().flush(new FlushRequest().force(true).waitIfOngoing(true)).actionGet().getShardFailures(), emptyArray()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/MalformedDynamicTemplateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/MalformedDynamicTemplateIT.java index bf3f956d02225..c46a9eca5c7ec 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/MalformedDynamicTemplateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/MalformedDynamicTemplateIT.java @@ -67,9 +67,9 @@ public void testBWCMalformedDynamicTemplate() { MapperParsingException ex = expectThrows( MapperParsingException.class, - () -> prepareCreate("malformed_dynamic_template_8.0").setSettings( + prepareCreate("malformed_dynamic_template_8.0").setSettings( Settings.builder().put(indexSettings()).put("number_of_shards", 1).put("index.version.created", IndexVersion.current()) - ).setMapping(mapping).get() + ).setMapping(mapping) ); assertThat(ex.getMessage(), containsString("dynamic template [my_template] has invalid content")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java index 0f7ca38ca8f6b..984082ec65193 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsIT.java @@ -159,7 +159,9 @@ public void testSimpleGetFieldMappingsWithPretty() throws Exception { String responseStrings = Strings.toString(responseBuilder); XContentBuilder prettyJsonBuilder = XContentFactory.jsonBuilder().prettyPrint(); - prettyJsonBuilder.copyCurrentStructure(createParser(JsonXContent.jsonXContent, responseStrings)); + try (var parser = createParser(JsonXContent.jsonXContent, responseStrings)) { + prettyJsonBuilder.copyCurrentStructure(parser); + } assertThat(responseStrings, equalTo(Strings.toString(prettyJsonBuilder))); params.put("pretty", "false"); @@ -170,7 +172,9 @@ public void testSimpleGetFieldMappingsWithPretty() throws Exception { responseStrings = Strings.toString(responseBuilder); prettyJsonBuilder = XContentFactory.jsonBuilder().prettyPrint(); - prettyJsonBuilder.copyCurrentStructure(createParser(JsonXContent.jsonXContent, responseStrings)); + try (var parser = createParser(JsonXContent.jsonXContent, responseStrings)) { + prettyJsonBuilder.copyCurrentStructure(parser); + } assertThat(responseStrings, not(equalTo(Strings.toString(prettyJsonBuilder)))); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/DanglingIndicesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/DanglingIndicesIT.java index 7c0ef90ca8161..3582fa6930f54 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/DanglingIndicesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/DanglingIndicesIT.java @@ -8,12 +8,16 @@ package org.elasticsearch.indices.recovery; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.admin.indices.dangling.DanglingIndexInfo; import org.elasticsearch.action.admin.indices.dangling.delete.DeleteDanglingIndexRequest; +import org.elasticsearch.action.admin.indices.dangling.delete.TransportDeleteDanglingIndexAction; import org.elasticsearch.action.admin.indices.dangling.import_index.ImportDanglingIndexRequest; +import org.elasticsearch.action.admin.indices.dangling.import_index.TransportImportDanglingIndexAction; import org.elasticsearch.action.admin.indices.dangling.list.ListDanglingIndicesRequest; import org.elasticsearch.action.admin.indices.dangling.list.ListDanglingIndicesResponse; import org.elasticsearch.action.admin.indices.dangling.list.NodeListDanglingIndicesResponse; +import org.elasticsearch.action.admin.indices.dangling.list.TransportListDanglingIndicesAction; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; @@ -28,7 +32,9 @@ import java.util.ArrayList; import java.util.List; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; @@ -95,7 +101,7 @@ public void testDanglingIndicesCanBeListed() throws Exception { final String stoppedNodeName = createDanglingIndices(INDEX_NAME); - final ListDanglingIndicesResponse response = clusterAdmin().listDanglingIndices(new ListDanglingIndicesRequest()).actionGet(); + final ListDanglingIndicesResponse response = executeListDanglingIndicesAction(); assertThat(response.status(), equalTo(RestStatus.OK)); final List nodeResponses = response.getNodes(); @@ -123,27 +129,22 @@ public void testDanglingIndicesCanBeImported() throws Exception { final String danglingIndexUUID = findDanglingIndexForNode(stoppedNodeName, INDEX_NAME); - final ImportDanglingIndexRequest request = new ImportDanglingIndexRequest(danglingIndexUUID, true); - clusterAdmin().importDanglingIndex(request).get(); + importDanglingIndex(new ImportDanglingIndexRequest(danglingIndexUUID, true)); assertTrue("Expected dangling index " + INDEX_NAME + " to be recovered", indexExists(INDEX_NAME)); } /** - * Check that the when sending an import-dangling-indices request, the specified UUIDs are validated as - * being dangling. + * Check that when sending an import-dangling-indices request, the specified UUIDs are validated as being dangling. */ public void testDanglingIndicesMustExistToBeImported() { internalCluster().startNodes(1, buildSettings(0, true)); final ImportDanglingIndexRequest request = new ImportDanglingIndexRequest("NonExistentUUID", true); - - final IllegalArgumentException e = expectThrows( - IllegalArgumentException.class, - () -> clusterAdmin().importDanglingIndex(request).actionGet() + assertThat( + expectThrows(ExecutionException.class, IllegalArgumentException.class, () -> importDanglingIndex(request)).getMessage(), + containsString("No dangling index found for UUID [NonExistentUUID]") ); - - assertThat(e.getMessage(), containsString("No dangling index found for UUID [NonExistentUUID]")); } /** @@ -157,9 +158,10 @@ public void testMustAcceptDataLossToImportDanglingIndex() throws Exception { final ImportDanglingIndexRequest request = new ImportDanglingIndexRequest(danglingIndexUUID, false); - Exception e = expectThrows(Exception.class, () -> clusterAdmin().importDanglingIndex(request).actionGet()); - - assertThat(e.getMessage(), containsString("accept_data_loss must be set to true")); + assertThat( + expectThrows(Exception.class, () -> importDanglingIndex(request)).getMessage(), + containsString("accept_data_loss must be set to true") + ); } /** @@ -180,7 +182,7 @@ public void testDanglingIndexCanBeDeleted() throws Exception { final String stoppedNodeName = createDanglingIndices(INDEX_NAME, OTHER_INDEX_NAME); final String danglingIndexUUID = findDanglingIndexForNode(stoppedNodeName, INDEX_NAME); - clusterAdmin().deleteDanglingIndex(new DeleteDanglingIndexRequest(danglingIndexUUID, true)).actionGet(); + deleteDanglingIndex(new DeleteDanglingIndexRequest(danglingIndexUUID, true)); // The dangling index that we deleted ought to have been removed from disk. Check by // creating and deleting another index, which creates a new tombstone entry, which should @@ -231,7 +233,7 @@ public Settings onNodeStopped(String nodeName) throws Exception { danglingIndices.set(results); // Try to delete the index - this request should succeed - clusterAdmin().deleteDanglingIndex(new DeleteDanglingIndexRequest(danglingIndices.get().get(0).getIndexUUID(), true)).actionGet(); + deleteDanglingIndex(new DeleteDanglingIndexRequest(danglingIndices.get().get(0).getIndexUUID(), true)); // The dangling index that we deleted ought to have been removed from disk. Check by // creating and deleting another index, which creates a new tombstone entry, which should @@ -252,12 +254,16 @@ public void testDeleteDanglingIndicesRequiresDataLossFlagToBeTrue() throws Excep final String stoppedNodeName = createDanglingIndices(INDEX_NAME, OTHER_INDEX_NAME); final String danglingIndexUUID = findDanglingIndexForNode(stoppedNodeName, INDEX_NAME); - Exception e = expectThrows( - Exception.class, - () -> clusterAdmin().deleteDanglingIndex(new DeleteDanglingIndexRequest(danglingIndexUUID, false)).actionGet() + assertThat( + ExceptionsHelper.unwrapCause( + expectThrows( + ExecutionException.class, + Exception.class, + () -> deleteDanglingIndex(new DeleteDanglingIndexRequest(danglingIndexUUID, false)) + ) + ).getMessage(), + containsString("accept_data_loss must be set to true") ); - - assertThat(e.getMessage(), containsString("accept_data_loss must be set to true")); } /** @@ -279,7 +285,7 @@ public void testDanglingIndicesImportedAndDeletedCannotBeReimported() throws Exc safeAwait(startLatch); while (isImporting.get()) { try { - clusterAdmin().importDanglingIndex(new ImportDanglingIndexRequest(danglingIndexUUID, true)).get(); + importDanglingIndex(new ImportDanglingIndexRequest(danglingIndexUUID, true)); } catch (Exception e) { // failures are expected } @@ -325,8 +331,8 @@ public void testDanglingIndicesImportedAndDeletedCannotBeReimported() throws Exc /** * Helper that fetches the current list of dangling indices. */ - private List listDanglingIndices() { - final ListDanglingIndicesResponse response = clusterAdmin().listDanglingIndices(new ListDanglingIndicesRequest()).actionGet(); + private static List listDanglingIndices() { + final ListDanglingIndicesResponse response = executeListDanglingIndicesAction(); assertThat(response.status(), equalTo(RestStatus.OK)); final List nodeResponses = response.getNodes(); @@ -340,6 +346,30 @@ private List listDanglingIndices() { return results; } + private static ListDanglingIndicesResponse executeListDanglingIndicesAction() { + try { + return client().execute(TransportListDanglingIndicesAction.TYPE, new ListDanglingIndicesRequest()).get(10, TimeUnit.SECONDS); + } catch (Exception e) { + return fail(e); + } + } + + private static void importDanglingIndex(ImportDanglingIndexRequest request) throws ExecutionException { + try { + client().execute(TransportImportDanglingIndexAction.TYPE, request).get(10, TimeUnit.SECONDS); + } catch (InterruptedException | TimeoutException ex) { + fail(ex); + } + } + + private static void deleteDanglingIndex(DeleteDanglingIndexRequest request) throws ExecutionException { + try { + client().execute(TransportDeleteDanglingIndexAction.TYPE, request).get(10, TimeUnit.SECONDS); + } catch (InterruptedException | TimeoutException ex) { + fail(ex); + } + } + /** * Simple helper that creates one or more indices, and importantly, * checks that they are green before proceeding. This is important @@ -390,7 +420,7 @@ public Settings onNodeStopped(String nodeName) throws Exception { private String findDanglingIndexForNode(String stoppedNodeName, String indexName) { String danglingIndexUUID = null; - final ListDanglingIndicesResponse response = clusterAdmin().listDanglingIndices(new ListDanglingIndicesRequest()).actionGet(); + final ListDanglingIndicesResponse response = executeListDanglingIndicesAction(); assertThat(response.status(), equalTo(RestStatus.OK)); final List nodeResponses = response.getNodes(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java index cea2d6765981e..6467af8505b12 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/IndexRecoveryIT.java @@ -1746,12 +1746,12 @@ public void testReservesBytesDuringPeerRecoveryPhaseOne() throws Exception { .getNodes() .get(0) .getIndices(); - assertThat(nodeIndicesStats.getStore().getReservedSize().getBytes(), equalTo(0L)); + assertThat(nodeIndicesStats.getStore().reservedSizeInBytes(), equalTo(0L)); assertThat( nodeIndicesStats.getShardStats(clusterState.metadata().index(indexName).getIndex()) .stream() .flatMap(s -> Arrays.stream(s.getShards())) - .map(s -> s.getStats().getStore().getReservedSize().getBytes()) + .map(s -> s.getStats().getStore().reservedSizeInBytes()) .toList(), everyItem(equalTo(StoreStats.UNKNOWN_RESERVED_BYTES)) ); @@ -1767,8 +1767,7 @@ public void testReservesBytesDuringPeerRecoveryPhaseOne() throws Exception { .get(0) .getIndices() .getStore() - .getReservedSize() - .getBytes(), + .reservedSizeInBytes(), greaterThan(0L) ); } @@ -1786,7 +1785,7 @@ public void testReservesBytesDuringPeerRecoveryPhaseOne() throws Exception { .get() .getNodes() .stream() - .mapToLong(n -> n.getIndices().getStore().getReservedSize().getBytes()) + .mapToLong(n -> n.getIndices().getStore().reservedSizeInBytes()) .sum(), equalTo(0L) ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/InternalSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/InternalSettingsIT.java index a0a070b3e0eec..3c62f859b3f31 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/InternalSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/InternalSettingsIT.java @@ -42,9 +42,7 @@ public void testUpdateInternalIndexSettingViaSettingsAPI() { // we can not update the setting via the update settings API final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareUpdateSettings("test") - .setSettings(Settings.builder().put("index.internal", "internal-update")) - .get() + indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.internal", "internal-update")) ); final String message = "can not update internal setting [index.internal]; this setting is managed via a dedicated API"; assertThat(e, hasToString(containsString(message))); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/PrivateSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/PrivateSettingsIT.java index 3e8d34222b1e3..0ebd276511795 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/PrivateSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/PrivateSettingsIT.java @@ -42,7 +42,7 @@ public void testUpdatePrivateIndexSettingViaSettingsAPI() { // we can not update the setting via the update settings API final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.private", "private-update")).get() + indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.private", "private-update")) ); final String message = "can not update private setting [index.private]; this setting is managed by Elasticsearch"; assertThat(e, hasToString(containsString(message))); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java index 0973eb450a2c1..be0be311c4638 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java @@ -40,7 +40,6 @@ import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_READ_ONLY; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -52,9 +51,8 @@ public void testInvalidUpdateOnClosedIndex() { assertAcked(indicesAdmin().prepareClose("test").get()); IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareUpdateSettings("test") + indicesAdmin().prepareUpdateSettings("test") .setSettings(Settings.builder().put("index.analysis.char_filter.invalid_char.type", "invalid")) - .get() ); assertEquals(exception.getMessage(), "Unknown char_filter type [invalid] for [invalid_char]"); } @@ -63,7 +61,7 @@ public void testInvalidDynamicUpdate() { createIndex("test"); IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.dummy", "boom")).get() + indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.dummy", "boom")) ); assertEquals(exception.getCause().getMessage(), "this setting goes boom"); IndexMetadata indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); @@ -143,22 +141,21 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { public void testUpdateDependentClusterSettings() { IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, - () -> clusterAdmin().prepareUpdateSettings().setPersistentSettings(Settings.builder().put("cluster.acc.test.pw", "asdf")).get() + clusterAdmin().prepareUpdateSettings().setPersistentSettings(Settings.builder().put("cluster.acc.test.pw", "asdf")) ); assertEquals("missing required setting [cluster.acc.test.user] for setting [cluster.acc.test.pw]", iae.getMessage()); iae = expectThrows( IllegalArgumentException.class, - () -> clusterAdmin().prepareUpdateSettings().setTransientSettings(Settings.builder().put("cluster.acc.test.pw", "asdf")).get() + clusterAdmin().prepareUpdateSettings().setTransientSettings(Settings.builder().put("cluster.acc.test.pw", "asdf")) ); assertEquals("missing required setting [cluster.acc.test.user] for setting [cluster.acc.test.pw]", iae.getMessage()); iae = expectThrows( IllegalArgumentException.class, - () -> clusterAdmin().prepareUpdateSettings() + clusterAdmin().prepareUpdateSettings() .setTransientSettings(Settings.builder().put("cluster.acc.test.pw", "asdf")) .setPersistentSettings(Settings.builder().put("cluster.acc.test.user", "asdf")) - .get() ); assertEquals("missing required setting [cluster.acc.test.user] for setting [cluster.acc.test.pw]", iae.getMessage()); @@ -168,7 +165,7 @@ public void testUpdateDependentClusterSettings() { .get(); iae = expectThrows( IllegalArgumentException.class, - () -> clusterAdmin().prepareUpdateSettings().setTransientSettings(Settings.builder().putNull("cluster.acc.test.user")).get() + clusterAdmin().prepareUpdateSettings().setTransientSettings(Settings.builder().putNull("cluster.acc.test.user")) ); assertEquals("missing required setting [cluster.acc.test.user] for setting [cluster.acc.test.pw]", iae.getMessage()); clusterAdmin().prepareUpdateSettings() @@ -181,9 +178,7 @@ public void testUpdateDependentClusterSettings() { iae = expectThrows( IllegalArgumentException.class, - () -> clusterAdmin().prepareUpdateSettings() - .setPersistentSettings(Settings.builder().putNull("cluster.acc.test.user")) - .get() + clusterAdmin().prepareUpdateSettings().setPersistentSettings(Settings.builder().putNull("cluster.acc.test.user")) ); assertEquals("missing required setting [cluster.acc.test.user] for setting [cluster.acc.test.pw]", iae.getMessage()); @@ -194,7 +189,7 @@ public void testUpdateDependentClusterSettings() { public void testUpdateDependentIndexSettings() { IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, - () -> prepareCreate("test", Settings.builder().put("index.acc.test.pw", "asdf")).get() + prepareCreate("test", Settings.builder().put("index.acc.test.pw", "asdf")) ); assertEquals("missing required setting [index.acc.test.user] for setting [index.acc.test.pw]", iae.getMessage()); @@ -207,7 +202,7 @@ public void testUpdateDependentIndexSettings() { iae = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.acc.test.pw", "asdf")).get() + indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.acc.test.pw", "asdf")) ); assertEquals("missing required setting [index.acc.test.user] for setting [index.acc.test.pw]", iae.getMessage()); @@ -220,7 +215,7 @@ public void testUpdateDependentIndexSettings() { // now try to remove it and make sure it fails iae = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().putNull("index.acc.test.user")).get() + indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().putNull("index.acc.test.user")) ); assertEquals("missing required setting [index.acc.test.user] for setting [index.acc.test.pw]", iae.getMessage()); @@ -291,23 +286,22 @@ public void testOpenCloseUpdateSettings() throws Exception { createIndex("test"); expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareUpdateSettings("test") + indicesAdmin().prepareUpdateSettings("test") .setSettings( Settings.builder() .put("index.refresh_interval", -1) // this one can change .put("index.fielddata.cache", "none") ) // this one can't - .get() + ); expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareUpdateSettings("test") + indicesAdmin().prepareUpdateSettings("test") .setSettings( Settings.builder() .put("index.refresh_interval", -1) // this one can change .put("index.final", "no") ) // this one can't - .get() ); IndexMetadata indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); assertThat(indexMetadata.getSettings().get("index.refresh_interval"), nullValue()); @@ -361,13 +355,12 @@ public void testOpenCloseUpdateSettings() throws Exception { IllegalArgumentException ex = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().prepareUpdateSettings("test") + indicesAdmin().prepareUpdateSettings("test") .setSettings( Settings.builder() .put("index.refresh_interval", -1) // this one can change .put("index.final", "no") ) // this one really can't - .get() ); assertThat(ex.getMessage(), containsString("final test setting [index.final], not updateable")); indexMetadata = clusterAdmin().prepareState().get().getState().metadata().index("test"); @@ -391,10 +384,7 @@ public void testEngineGCDeletesSetting() throws Exception { client().prepareDelete("test", "1").setVersionType(VersionType.EXTERNAL).setVersion(2).get(); // delete is still in cache this should fail indexRequestBuilder = prepareIndex("test"); - assertRequestBuilderThrows( - indexRequestBuilder.setId("1").setSource("f", 3).setVersionType(VersionType.EXTERNAL).setVersion(1), - VersionConflictEngineException.class - ); + expectThrows(VersionConflictEngineException.class, indexRequestBuilder); indexRequestBuilder.request().decRef(); assertAcked(indicesAdmin().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.gc_deletes", 0))); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java index 2b07f36551279..6b1aafe2f9b17 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllIT.java @@ -37,7 +37,7 @@ public void testCloseAllRequiresName() { IllegalStateException illegalStateException = expectThrows( IllegalStateException.class, - () -> indicesAdmin().prepareClose("test_no_close").get() + indicesAdmin().prepareClose("test_no_close") ); assertEquals( illegalStateException.getMessage(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java index 6cdb6675b7bbf..8d9dea1d7cdcb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/CloseIndexIT.java @@ -80,16 +80,13 @@ public Settings indexSettings() { } public void testCloseMissingIndex() { - IndexNotFoundException e = expectThrows(IndexNotFoundException.class, () -> indicesAdmin().prepareClose("test").get()); + IndexNotFoundException e = expectThrows(IndexNotFoundException.class, indicesAdmin().prepareClose("test")); assertThat(e.getMessage(), is("no such index [test]")); } public void testCloseOneMissingIndex() { createIndex("test1"); - final IndexNotFoundException e = expectThrows( - IndexNotFoundException.class, - () -> indicesAdmin().prepareClose("test1", "test2").get() - ); + final IndexNotFoundException e = expectThrows(IndexNotFoundException.class, indicesAdmin().prepareClose("test1", "test2")); assertThat(e.getMessage(), is("no such index [test2]")); } @@ -100,17 +97,14 @@ public void testCloseOneMissingIndexIgnoreMissing() throws Exception { } public void testCloseNoIndex() { - final ActionRequestValidationException e = expectThrows( - ActionRequestValidationException.class, - () -> indicesAdmin().prepareClose().get() - ); + final ActionRequestValidationException e = expectThrows(ActionRequestValidationException.class, indicesAdmin().prepareClose()); assertThat(e.getMessage(), containsString("index is missing")); } public void testCloseNullIndex() { final ActionRequestValidationException e = expectThrows( ActionRequestValidationException.class, - () -> indicesAdmin().prepareClose((String[]) null).get() + indicesAdmin().prepareClose((String[]) null) ); assertThat(e.getMessage(), containsString("index is missing")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java index efa822b34b33e..eab8bc729f7e5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/state/OpenCloseIndexIT.java @@ -62,7 +62,7 @@ public void testSimpleCloseOpen() { } public void testSimpleOpenMissingIndex() { - Exception e = expectThrows(IndexNotFoundException.class, () -> indicesAdmin().prepareOpen("test1").get()); + Exception e = expectThrows(IndexNotFoundException.class, indicesAdmin().prepareOpen("test1")); assertThat(e.getMessage(), is("no such index [test1]")); } @@ -71,7 +71,7 @@ public void testOpenOneMissingIndex() { createIndex("test1"); ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().get(); assertThat(healthResponse.isTimedOut(), equalTo(false)); - Exception e = expectThrows(IndexNotFoundException.class, () -> client.admin().indices().prepareOpen("test1", "test2").get()); + Exception e = expectThrows(IndexNotFoundException.class, client.admin().indices().prepareOpen("test1", "test2")); assertThat(e.getMessage(), is("no such index [test2]")); } @@ -162,12 +162,12 @@ public void testCloseOpenAllWildcard() { } public void testOpenNoIndex() { - Exception e = expectThrows(ActionRequestValidationException.class, () -> indicesAdmin().prepareOpen().get()); + Exception e = expectThrows(ActionRequestValidationException.class, indicesAdmin().prepareOpen()); assertThat(e.getMessage(), containsString("index is missing")); } public void testOpenNullIndex() { - Exception e = expectThrows(ActionRequestValidationException.class, () -> indicesAdmin().prepareOpen((String[]) null).get()); + Exception e = expectThrows(ActionRequestValidationException.class, indicesAdmin().prepareOpen((String[]) null)); assertThat(e.getMessage(), containsString("index is missing")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java index 68de6178aaca7..7ec43a094835f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/template/SimpleIndexTemplateIT.java @@ -7,13 +7,13 @@ */ package org.elasticsearch.indices.template; +import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; -import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequestBuilder; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.fieldcaps.FieldCapabilities; @@ -126,27 +126,25 @@ public void testSimpleIndexTemplateTests() throws Exception { .get(); // test create param - assertRequestBuilderThrows( - indicesAdmin().preparePutTemplate("template_2") - .setPatterns(Collections.singletonList("test*")) - .setSettings(indexSettings()) - .setCreate(true) - .setOrder(1) - .setMapping( - XContentFactory.jsonBuilder() - .startObject() - .startObject("_doc") - .startObject("properties") - .startObject("field2") - .field("type", "text") - .field("store", false) - .endObject() - .endObject() - .endObject() - .endObject() - ), - IllegalArgumentException.class - ); + ActionRequestBuilder builder = indicesAdmin().preparePutTemplate("template_2") + .setPatterns(Collections.singletonList("test*")) + .setSettings(indexSettings()) + .setCreate(true) + .setOrder(1) + .setMapping( + XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("field2") + .field("type", "text") + .field("store", false) + .endObject() + .endObject() + .endObject() + .endObject() + ); + expectThrows(IllegalArgumentException.class, builder); response = indicesAdmin().prepareGetTemplates().get(); assertThat(response.getIndexTemplates(), hasSize(2)); @@ -435,10 +433,9 @@ public void testBrokenMapping() throws Exception { MapperParsingException e = expectThrows( MapperParsingException.class, - () -> indicesAdmin().preparePutTemplate("template_1") + indicesAdmin().preparePutTemplate("template_1") .setPatterns(Collections.singletonList("te*")) .setMapping("{\"foo\": \"abcde\"}", XContentType.JSON) - .get() ); assertThat(e.getMessage(), containsString("Failed to parse mapping")); @@ -456,10 +453,9 @@ public void testInvalidSettings() throws Exception { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().preparePutTemplate("template_1") + indicesAdmin().preparePutTemplate("template_1") .setPatterns(Collections.singletonList("te*")) .setSettings(Settings.builder().put("does_not_exist", "test")) - .get() ); assertEquals( "unknown setting [index.does_not_exist] please check that any required plugins are" @@ -628,11 +624,12 @@ public void testAliasInvalidFilterValidJson() throws Exception { public void testAliasInvalidFilterInvalidJson() throws Exception { // invalid json: put index template fails - PutIndexTemplateRequestBuilder putIndexTemplateRequestBuilder = indicesAdmin().preparePutTemplate("template_1") - .setPatterns(Collections.singletonList("te*")) - .addAlias(new Alias("invalid_alias").filter("abcde")); - - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> putIndexTemplateRequestBuilder.get()); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + indicesAdmin().preparePutTemplate("template_1") + .setPatterns(Collections.singletonList("te*")) + .addAlias(new Alias("invalid_alias").filter("abcde")) + ); assertThat(e.getMessage(), equalTo("failed to parse filter for alias [invalid_alias]")); GetIndexTemplatesResponse response = indicesAdmin().prepareGetTemplates("template_1").get(); @@ -649,20 +646,22 @@ public void testAliasNameExistingIndex() throws Exception { } public void testAliasEmptyName() throws Exception { - PutIndexTemplateRequestBuilder putIndexTemplateRequestBuilder = indicesAdmin().preparePutTemplate("template_1") - .setPatterns(Collections.singletonList("te*")) - .addAlias(new Alias(" ").indexRouting("1,2,3")); - - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> putIndexTemplateRequestBuilder.get()); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + indicesAdmin().preparePutTemplate("template_1") + .setPatterns(Collections.singletonList("te*")) + .addAlias(new Alias(" ").indexRouting("1,2,3")) + ); assertThat(e.getMessage(), equalTo("alias name is required")); } public void testAliasWithMultipleIndexRoutings() throws Exception { - PutIndexTemplateRequestBuilder putIndexTemplateRequestBuilder = indicesAdmin().preparePutTemplate("template_1") - .setPatterns(Collections.singletonList("te*")) - .addAlias(new Alias("alias").indexRouting("1,2,3")); - - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> putIndexTemplateRequestBuilder.get()); + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + indicesAdmin().preparePutTemplate("template_1") + .setPatterns(Collections.singletonList("te*")) + .addAlias(new Alias("alias").indexRouting("1,2,3")) + ); assertThat(e.getMessage(), equalTo("alias [alias] has several index routing values associated with it")); } @@ -783,7 +782,7 @@ public void testCombineTemplates() throws Exception { // put template using custom_1 analyzer MapperParsingException e = expectThrows( MapperParsingException.class, - () -> indicesAdmin().preparePutTemplate("template_2") + indicesAdmin().preparePutTemplate("template_2") .setPatterns(Collections.singletonList("test*")) .setCreate(true) .setOrder(1) @@ -800,7 +799,6 @@ public void testCombineTemplates() throws Exception { .endObject() .endObject() ) - .get() ); assertThat(e.getMessage(), containsString("analyzer [custom_1] has not been configured in mappings")); @@ -888,10 +886,9 @@ public void testPartitionedTemplate() throws Exception { // provide more partitions than shards IllegalArgumentException eBadSettings = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().preparePutTemplate("template_1") + indicesAdmin().preparePutTemplate("template_1") .setPatterns(Collections.singletonList("te*")) .setSettings(Settings.builder().put("index.number_of_shards", "5").put("index.routing_partition_size", "6")) - .get() ); assertThat( eBadSettings.getMessage(), @@ -901,11 +898,10 @@ public void testPartitionedTemplate() throws Exception { // provide an invalid mapping for a partitioned index IllegalArgumentException eBadMapping = expectThrows( IllegalArgumentException.class, - () -> indicesAdmin().preparePutTemplate("template_2") + indicesAdmin().preparePutTemplate("template_2") .setPatterns(Collections.singletonList("te*")) .setMapping("{\"_doc\":{\"_routing\":{\"required\":false}}}", XContentType.JSON) .setSettings(Settings.builder().put("index.number_of_shards", "6").put("index.routing_partition_size", "3")) - .get() ); assertThat(eBadMapping.getMessage(), containsString("must have routing required for partitioned index")); @@ -923,8 +919,7 @@ public void testPartitionedTemplate() throws Exception { // create an index with too few shards IllegalArgumentException eBadIndex = expectThrows( IllegalArgumentException.class, - () -> prepareCreate("test_bad", Settings.builder().put("index.number_of_shards", 5).put("index.number_of_routing_shards", 5)) - .get() + prepareCreate("test_bad", Settings.builder().put("index.number_of_shards", 5).put("index.number_of_routing_shards", 5)) ); assertThat( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestClientIT.java b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestClientIT.java index 623285fe85395..dcfc1650dd989 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestClientIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestClientIT.java @@ -283,7 +283,7 @@ public void testPutWithPipelineFactoryError() throws Exception { .endObject() ); PutPipelineRequest putPipelineRequest = new PutPipelineRequest("_id2", source, XContentType.JSON); - Exception e = expectThrows(ElasticsearchParseException.class, () -> clusterAdmin().putPipeline(putPipelineRequest).actionGet()); + Exception e = expectThrows(ElasticsearchParseException.class, clusterAdmin().putPipeline(putPipelineRequest)); assertThat(e.getMessage(), equalTo("processor [test] doesn't support one or more provided configuration parameters [unused]")); GetPipelineResponse response = clusterAdmin().prepareGetPipeline("_id2").get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestFileSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestFileSettingsIT.java index fc0efca802370..6ca4b209da8a8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestFileSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestFileSettingsIT.java @@ -177,7 +177,7 @@ private void assertPipelinesSaveOK(CountDownLatch savedClusterState, AtomicLong + "[[my_ingest_pipeline] set as read-only by [file_settings]]", expectThrows( IllegalArgumentException.class, - () -> client().execute(PutPipelineAction.INSTANCE, sampleRestRequest("my_ingest_pipeline")).actionGet() + client().execute(PutPipelineAction.INSTANCE, sampleRestRequest("my_ingest_pipeline")) ).getMessage() ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestStatsNamesAndTypesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestStatsNamesAndTypesIT.java index 4a5ed9863f7be..02d1825fa2fce 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestStatsNamesAndTypesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/ingest/IngestStatsNamesAndTypesIT.java @@ -146,7 +146,10 @@ public void testIngestStatsNamesAndTypes() throws IOException { builder.startObject(); response.toXContent(builder, new ToXContent.MapParams(Map.of())); builder.endObject(); - Map stats = createParser(JsonXContent.jsonXContent, Strings.toString(builder)).map(); + Map stats; + try (var parser = createParser(JsonXContent.jsonXContent, Strings.toString(builder))) { + stats = parser.map(); + } int setProcessorCount = path(stats, "nodes.ingest.processor_stats.set.count"); assertThat(setProcessorCount, equalTo(3)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIT.java index 626a1573a66db..a49fadb0c4b5b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIT.java @@ -37,8 +37,8 @@ public void testDeleteIndexIsRejected() throws Exception { // Special "match none" pattern succeeds, since non-destructive assertAcked(indicesAdmin().prepareDelete("*", "-*").get()); - expectThrows(IllegalArgumentException.class, () -> indicesAdmin().prepareDelete("i*").get()); - expectThrows(IllegalArgumentException.class, () -> indicesAdmin().prepareDelete("_all").get()); + expectThrows(IllegalArgumentException.class, indicesAdmin().prepareDelete("i*")); + expectThrows(IllegalArgumentException.class, indicesAdmin().prepareDelete("_all")); } public void testDeleteIndexDefaultBehaviour() throws Exception { @@ -67,8 +67,8 @@ public void testCloseIndexIsRejected() throws Exception { // Special "match none" pattern succeeds, since non-destructive assertAcked(indicesAdmin().prepareClose("*", "-*").get()); - expectThrows(IllegalArgumentException.class, () -> indicesAdmin().prepareClose("i*").get()); - expectThrows(IllegalArgumentException.class, () -> indicesAdmin().prepareClose("_all").get()); + expectThrows(IllegalArgumentException.class, indicesAdmin().prepareClose("i*")); + expectThrows(IllegalArgumentException.class, indicesAdmin().prepareClose("_all")); } public void testCloseIndexDefaultBehaviour() throws Exception { @@ -99,8 +99,8 @@ public void testOpenIndexIsRejected() throws Exception { // Special "match none" pattern succeeds, since non-destructive assertAcked(indicesAdmin().prepareOpen("*", "-*").get()); - expectThrows(IllegalArgumentException.class, () -> indicesAdmin().prepareOpen("i*").get()); - expectThrows(IllegalArgumentException.class, () -> indicesAdmin().prepareOpen("_all").get()); + expectThrows(IllegalArgumentException.class, indicesAdmin().prepareOpen("i*")); + expectThrows(IllegalArgumentException.class, indicesAdmin().prepareOpen("_all")); } public void testOpenIndexDefaultBehaviour() throws Exception { @@ -133,8 +133,8 @@ public void testAddIndexBlockIsRejected() throws Exception { // Special "match none" pattern succeeds, since non-destructive assertAcked(indicesAdmin().prepareAddBlock(WRITE, "*", "-*").get()); - expectThrows(IllegalArgumentException.class, () -> indicesAdmin().prepareAddBlock(WRITE, "i*").get()); - expectThrows(IllegalArgumentException.class, () -> indicesAdmin().prepareAddBlock(WRITE, "_all").get()); + expectThrows(IllegalArgumentException.class, indicesAdmin().prepareAddBlock(WRITE, "i*")); + expectThrows(IllegalArgumentException.class, indicesAdmin().prepareAddBlock(WRITE, "_all")); } public void testAddIndexBlockDefaultBehaviour() throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/readiness/ReadinessClusterIT.java b/server/src/internalClusterTest/java/org/elasticsearch/readiness/ReadinessClusterIT.java index 450b27eb0db8b..3c06a4c084e04 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/readiness/ReadinessClusterIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/readiness/ReadinessClusterIT.java @@ -103,10 +103,7 @@ private void assertMasterNode(Client client, String node) { } private void expectMasterNotFound() { - expectThrows( - MasterNotDiscoveredException.class, - () -> clusterAdmin().prepareState().setMasterNodeTimeout("100ms").get().getState().nodes().getMasterNodeId() - ); + expectThrows(MasterNotDiscoveredException.class, clusterAdmin().prepareState().setMasterNodeTimeout("100ms")); } public void testReadinessDuringRestarts() throws Exception { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java index f77cc9ce20020..4b9e4e0fa0932 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/IndexSnapshotsServiceIT.java @@ -64,7 +64,7 @@ public void testGetShardSnapshotFromUnknownRepoReturnsAnError() throws Exception ); } } else { - expectThrows(RepositoryException.class, responseFuture::actionGet); + expectThrows(RepositoryException.class, responseFuture); } disableRepoConsistencyCheck("This test checks an empty repository"); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/InvalidRepositoryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/InvalidRepositoryIT.java index 730cdba059a69..f931eb717457d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/InvalidRepositoryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/InvalidRepositoryIT.java @@ -106,7 +106,7 @@ public void testCreateInvalidRepository() throws Exception { // verification should fail with some node has InvalidRepository final var expectedException = expectThrows( RepositoryVerificationException.class, - () -> clusterAdmin().prepareVerifyRepository(repositoryName).get() + clusterAdmin().prepareVerifyRepository(repositoryName) ); for (Throwable suppressed : expectedException.getSuppressed()) { Throwable outerCause = suppressed.getCause(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreSizeLimitIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreSizeLimitIT.java index ffe6133e034bc..efc534043ac1e 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreSizeLimitIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreSizeLimitIT.java @@ -35,7 +35,7 @@ public void testBlobStoreSizeIsLimited() throws Exception { ); final List snapshotNames = createNSnapshots(repoName, maxSnapshots); final ActionFuture failingSnapshotFuture = startFullSnapshot(repoName, "failing-snapshot"); - final SnapshotException snapshotException = expectThrows(SnapshotException.class, failingSnapshotFuture::actionGet); + final SnapshotException snapshotException = expectThrows(SnapshotException.class, failingSnapshotFuture); assertThat(snapshotException.getRepositoryName(), equalTo(repoName)); assertThat(snapshotException.getSnapshotName(), equalTo("failing-snapshot")); assertThat(snapshotException.getCause(), instanceOf(RepositoryException.class)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/ComponentTemplatesFileSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/ComponentTemplatesFileSettingsIT.java index 895cd3d2a01e7..a0b93ce8dba92 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/ComponentTemplatesFileSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/ComponentTemplatesFileSettingsIT.java @@ -416,14 +416,14 @@ private void assertClusterStateSaveOK(CountDownLatch savedClusterState, AtomicLo assertTrue( expectThrows( IllegalArgumentException.class, - () -> client().execute(PutComponentTemplateAction.INSTANCE, sampleComponentRestRequest("component_template1")).actionGet() + client().execute(PutComponentTemplateAction.INSTANCE, sampleComponentRestRequest("component_template1")) ).getMessage().contains("[[component_template:component_template1] set as read-only by [file_settings]]") ); assertTrue( expectThrows( IllegalArgumentException.class, - () -> client().execute(PutComposableIndexTemplateAction.INSTANCE, sampleIndexTemplateRestRequest("template_1")).actionGet() + client().execute(PutComposableIndexTemplateAction.INSTANCE, sampleIndexTemplateRestRequest("template_1")) ).getMessage().contains("[[composable_index_template:template_1] set as read-only by [file_settings]]") ); } @@ -485,8 +485,7 @@ private void assertComponentAndIndexTemplateDelete(CountDownLatch savedClusterSt assertTrue( expectThrows( IllegalArgumentException.class, - () -> client().execute(PutComposableIndexTemplateAction.INSTANCE, sampleIndexTemplateRestRequest("template_other")) - .actionGet() + client().execute(PutComposableIndexTemplateAction.INSTANCE, sampleIndexTemplateRestRequest("template_other")) ).getMessage() .contains( "with errors: [[component_template:runtime_component_template, " @@ -501,14 +500,14 @@ private void assertComponentAndIndexTemplateDelete(CountDownLatch savedClusterSt assertTrue( expectThrows( IllegalArgumentException.class, - () -> client().execute(PutComponentTemplateAction.INSTANCE, sampleComponentRestRequest("component_template1")).actionGet() + client().execute(PutComponentTemplateAction.INSTANCE, sampleComponentRestRequest("component_template1")) ).getMessage().contains("[[component_template:component_template1] set as read-only by [file_settings]]") ); assertTrue( expectThrows( IllegalArgumentException.class, - () -> client().execute(PutComposableIndexTemplateAction.INSTANCE, sampleIndexTemplateRestRequest("template_1")).actionGet() + client().execute(PutComposableIndexTemplateAction.INSTANCE, sampleIndexTemplateRestRequest("template_1")) ).getMessage().contains("[[composable_index_template:template_1] set as read-only by [file_settings]]") ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/RepositoriesFileSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/RepositoriesFileSettingsIT.java index 9d6a53d8bc818..fa5d8d93c9e45 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/RepositoriesFileSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/reservedstate/service/RepositoriesFileSettingsIT.java @@ -149,10 +149,8 @@ private void assertClusterStateSaveOK(CountDownLatch savedClusterState, AtomicLo "Failed to process request " + "[org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest/unset] " + "with errors: [[repo] set as read-only by [file_settings]]", - expectThrows( - IllegalArgumentException.class, - () -> client().execute(TransportPutRepositoryAction.TYPE, sampleRestRequest("repo")).actionGet() - ).getMessage() + expectThrows(IllegalArgumentException.class, client().execute(TransportPutRepositoryAction.TYPE, sampleRestRequest("repo"))) + .getMessage() ); } @@ -206,7 +204,7 @@ private void assertClusterStateNotSaved(CountDownLatch savedClusterState, Atomic "[err-repo] missing", expectThrows( RepositoryMissingException.class, - () -> client().execute(GetRepositoriesAction.INSTANCE, new GetRepositoriesRequest(new String[] { "err-repo" })).actionGet() + client().execute(GetRepositoriesAction.INSTANCE, new GetRepositoriesRequest(new String[] { "err-repo" })) ).getMessage() ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/script/StoredScriptsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/script/StoredScriptsIT.java index adac5b3482107..619e7c9d9edec 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/script/StoredScriptsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/script/StoredScriptsIT.java @@ -54,9 +54,9 @@ public void testBasics() { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> { clusterAdmin().preparePutStoredScript().setId("id#").setContent(new BytesArray(Strings.format(""" + clusterAdmin().preparePutStoredScript().setId("id#").setContent(new BytesArray(Strings.format(""" {"script": {"lang": "%s", "source": "1"} } - """, LANG)), XContentType.JSON).get(); } + """, LANG)), XContentType.JSON) ); assertEquals("Validation Failed: 1: id cannot contain '#' for stored script;", e.getMessage()); } @@ -64,9 +64,9 @@ public void testBasics() { public void testMaxScriptSize() { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> { clusterAdmin().preparePutStoredScript().setId("foobar").setContent(new BytesArray(Strings.format(""" + clusterAdmin().preparePutStoredScript().setId("foobar").setContent(new BytesArray(Strings.format(""" {"script": { "lang": "%s", "source":"0123456789abcdef"} }\ - """, LANG)), XContentType.JSON).get(); } + """, LANG)), XContentType.JSON) ); assertEquals("exceeded max allowed stored script size in bytes [64] with size [65] for script [foobar]", e.getMessage()); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java index aaf218e3579be..12fe4ed3dd509 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java @@ -249,11 +249,10 @@ public void testCancelFailedSearchWhenPartialResultDisallowed() throws Exception Thread searchThread = new Thread(() -> { SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH) + prepareSearch("test").setSearchType(SearchType.QUERY_THEN_FETCH) .setQuery(scriptQuery(new Script(ScriptType.INLINE, "mockscript", SEARCH_BLOCK_SCRIPT_NAME, Collections.emptyMap()))) .setAllowPartialSearchResults(false) .setSize(1000) - .get() ); assertThat(e.getMessage(), containsString("Partial shards failure")); }); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchTimeoutIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchTimeoutIT.java index e23de41e9e8fe..fa5a89d93beb7 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchTimeoutIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchTimeoutIT.java @@ -97,10 +97,9 @@ public void testPartialResultsIntolerantTimeout() throws Exception { ElasticsearchException ex = expectThrows( ElasticsearchException.class, - () -> prepareSearch("test").setTimeout(new TimeValue(10, TimeUnit.MILLISECONDS)) + prepareSearch("test").setTimeout(new TimeValue(10, TimeUnit.MILLISECONDS)) .setQuery(scriptQuery(new Script(ScriptType.INLINE, "mockscript", SCRIPT_NAME, Collections.emptyMap()))) .setAllowPartialSearchResults(false) // this line causes timeouts to report failures - .get() ); assertTrue(ex.toString().contains("Time exceeded")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java index dc9563a4b8e3f..2fd27d5f51b5f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java @@ -772,9 +772,8 @@ public void testRangeWithFormatStringValue() throws Exception { // providing numeric input without format should throw an exception ElasticsearchException e = expectThrows( ElasticsearchException.class, - () -> prepareSearch(indexName).setSize(0) + prepareSearch(indexName).setSize(0) .addAggregation(dateRange("date_range").field("date").addRange(1000000, 3000000).addRange(3000000, 4000000)) - .get() ); assertThat(e.getDetailedMessage(), containsString("failed to parse date field [1000000] with format [strict_hour_minute_second]")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java index f96c76569e9b7..a98f2d6428b5d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/DoubleTermsIT.java @@ -265,12 +265,7 @@ private void getMultiSortDocs(List builders) throws IOExcep public void testSizeIsZero() { IllegalArgumentException exception = expectThrows( IllegalArgumentException.class, - () -> prepareSearch("high_card_idx").addAggregation( - new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME) - .minDocCount(randomInt(1)) - .size(0) - .collectMode(randomFrom(SubAggCollectionMode.values())) - ).get() + () -> new TermsAggregationBuilder("terms").field(SINGLE_VALUED_FIELD_NAME).minDocCount(randomInt(1)).size(0) ); assertThat(exception.getMessage(), containsString("[size] must be greater than 0. Found [0] in [terms]")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java index 29633d6f86f1a..4ccbf9753fab2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/HistogramIT.java @@ -1240,17 +1240,17 @@ public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAsCompound public void testInvalidBounds() { SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("empty_bucket_idx").addAggregation( + prepareSearch("empty_bucket_idx").addAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).hardBounds(new DoubleBounds(0.0, 10.0)).extendedBounds(3, 20) - ).get() + ) ); assertThat(e.toString(), containsString("Extended bounds have to be inside hard bounds, hard bounds")); e = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("empty_bucket_idx").addAggregation( + prepareSearch("empty_bucket_idx").addAggregation( histogram("histo").field(SINGLE_VALUED_FIELD_NAME).hardBounds(new DoubleBounds(3.0, null)).extendedBounds(0, 20) - ).get() + ) ); assertThat(e.toString(), containsString("Extended bounds have to be inside hard bounds, hard bounds")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java index c9b915d24372c..91a5fbdee9689 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksIT.java @@ -183,7 +183,6 @@ public void testNullValuesField() throws Exception { .numberOfSignificantValueDigits(sigDigits) .field("value") ) - .get() ); assertThat(e.getMessage(), equalTo("[values] must not be null: [percentile_ranks]")); } @@ -199,7 +198,6 @@ public void testEmptyValuesField() throws Exception { .numberOfSignificantValueDigits(sigDigits) .field("value") ) - .get() ); assertThat(e.getMessage(), equalTo("[values] must not be an empty array: [percentile_ranks]")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java index 4415f6a30fdd7..03a3007bc1673 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.support.XContentMapValues; @@ -1255,12 +1254,13 @@ public void testConflictingAggAndScriptParams() { Script combineScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op aggregation", Collections.emptyMap()); Script reduceScript = new Script(ScriptType.INLINE, CustomScriptPlugin.NAME, "no-op list aggregation", Collections.emptyMap()); - SearchRequestBuilder builder = prepareSearch("idx").setQuery(matchAllQuery()) - .addAggregation( - scriptedMetric("scripted").params(params).mapScript(mapScript).combineScript(combineScript).reduceScript(reduceScript) - ); - - SearchPhaseExecutionException ex = expectThrows(SearchPhaseExecutionException.class, builder::get); + SearchPhaseExecutionException ex = expectThrows( + SearchPhaseExecutionException.class, + prepareSearch("idx").setQuery(matchAllQuery()) + .addAggregation( + scriptedMetric("scripted").params(params).mapScript(mapScript).combineScript(combineScript).reduceScript(reduceScript) + ) + ); assertThat(ex.getCause().getMessage(), containsString("Parameter name \"param1\" used in both aggregation and script parameters")); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java index daa423f3f3ec4..7bfde5665e1f8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksIT.java @@ -127,7 +127,6 @@ public void testNullValuesField() throws Exception { IllegalArgumentException.class, () -> prepareSearch("idx").setQuery(matchAllQuery()) .addAggregation(percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.TDIGEST).field("value")) - .get() ); assertThat(e.getMessage(), equalTo("[values] must not be null: [percentile_ranks]")); } @@ -138,7 +137,6 @@ public void testEmptyValuesField() throws Exception { IllegalArgumentException.class, () -> prepareSearch("idx").setQuery(matchAllQuery()) .addAggregation(percentileRanks("percentile_ranks", pcts).method(PercentilesMethod.TDIGEST).field("value")) - .get() ); assertThat(e.getMessage(), equalTo("[values] must not be an empty array: [percentile_ranks]")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java index 193c70b8dc242..8ee33188a978b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java @@ -1002,11 +1002,11 @@ public void testTooHighResultWindow() throws Exception { Exception e = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("idx").addAggregation( + prepareSearch("idx").addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(TERMS_AGGS_FIELD) .subAggregation(topHits("hits").from(100).size(10).sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC))) - ).get() + ) ); assertThat( e.getCause().getMessage(), @@ -1014,11 +1014,11 @@ public void testTooHighResultWindow() throws Exception { ); e = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("idx").addAggregation( + prepareSearch("idx").addAggregation( terms("terms").executionHint(randomExecutionHint()) .field(TERMS_AGGS_FIELD) .subAggregation(topHits("hits").from(10).size(100).sort(SortBuilders.fieldSort(SORT_FIELD).order(SortOrder.DESC))) - ).get() + ) ); assertThat( e.getCause().getMessage(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java index 776f7a2f329d2..649b548828dd6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptIT.java @@ -660,10 +660,10 @@ public void testSingleBucketPathAgg() throws Exception { .field("lang", CustomScriptPlugin.NAME) .endObject() .endObject(); - BucketScriptPipelineAggregationBuilder bucketScriptAgg = BucketScriptPipelineAggregationBuilder.PARSER.parse( - createParser(content), - "seriesArithmetic" - ); + BucketScriptPipelineAggregationBuilder bucketScriptAgg; + try (var parser = createParser(content)) { + bucketScriptAgg = BucketScriptPipelineAggregationBuilder.PARSER.parse(parser, "seriesArithmetic"); + } assertNoFailuresAndResponse( prepareSearch("idx", "idx_unmapped").addAggregation( @@ -706,10 +706,10 @@ public void testArrayBucketPathAgg() throws Exception { .field("lang", CustomScriptPlugin.NAME) .endObject() .endObject(); - BucketScriptPipelineAggregationBuilder bucketScriptAgg = BucketScriptPipelineAggregationBuilder.PARSER.parse( - createParser(content), - "seriesArithmetic" - ); + BucketScriptPipelineAggregationBuilder bucketScriptAgg; + try (var parser = createParser(content)) { + bucketScriptAgg = BucketScriptPipelineAggregationBuilder.PARSER.parse(parser, "seriesArithmetic"); + } assertNoFailuresAndResponse( prepareSearch("idx", "idx_unmapped").addAggregation( @@ -764,10 +764,10 @@ public void testObjectBucketPathAgg() throws Exception { .field("lang", CustomScriptPlugin.NAME) .endObject() .endObject(); - BucketScriptPipelineAggregationBuilder bucketScriptAgg = BucketScriptPipelineAggregationBuilder.PARSER.parse( - createParser(content), - "seriesArithmetic" - ); + BucketScriptPipelineAggregationBuilder bucketScriptAgg; + try (var parser = createParser(content)) { + bucketScriptAgg = BucketScriptPipelineAggregationBuilder.PARSER.parse(parser, "seriesArithmetic"); + } assertNoFailuresAndResponse( prepareSearch("idx", "idx_unmapped").addAggregation( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java index 793d3e1f3c366..69526abfe60f1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketIT.java @@ -165,7 +165,7 @@ public void testBadSigmaAsSubAgg() throws Exception { .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) ) .subAggregation(extendedStatsBucket("extended_stats_bucket", "histo>sum").sigma(-1.0)) - ).get() + ) ); Throwable cause = ExceptionsHelper.unwrapCause(ex); if (cause == null) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchRedStateIndexIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchRedStateIndexIT.java index 4dad254b5f6e5..289de8ae42d74 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchRedStateIndexIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/basic/SearchRedStateIndexIT.java @@ -81,7 +81,7 @@ public void testDisallowPartialsWithRedState() throws Exception { SearchPhaseExecutionException ex = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch().setSize(0).setAllowPartialSearchResults(false).get() + prepareSearch().setSize(0).setAllowPartialSearchResults(false) ); assertThat(ex.getDetailedMessage(), containsString("Search rejected due to missing shard")); } @@ -90,7 +90,7 @@ public void testClusterDisallowPartialsWithRedState() throws Exception { buildRedIndex(cluster().numDataNodes() + 2); setClusterDefaultAllowPartialResults(false); - SearchPhaseExecutionException ex = expectThrows(SearchPhaseExecutionException.class, () -> prepareSearch().setSize(0).get()); + SearchPhaseExecutionException ex = expectThrows(SearchPhaseExecutionException.class, prepareSearch().setSize(0)); assertThat(ex.getDetailedMessage(), containsString("Search rejected due to missing shard")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java index 6763355844a52..92c8e174f44fa 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fetch/subphase/InnerHitsIT.java @@ -986,11 +986,11 @@ public void testTooHighResultWindow() throws Exception { Exception e = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("index2").setQuery( + prepareSearch("index2").setQuery( nestedQuery("nested", matchQuery("nested.field", "value1"), ScoreMode.Avg).innerHit( new InnerHitBuilder().setFrom(100).setSize(10).setName("_name") ) - ).get() + ) ); assertThat( e.getCause().getMessage(), @@ -998,11 +998,11 @@ public void testTooHighResultWindow() throws Exception { ); e = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("index2").setQuery( + prepareSearch("index2").setQuery( nestedQuery("nested", matchQuery("nested.field", "value1"), ScoreMode.Avg).innerHit( new InnerHitBuilder().setFrom(10).setSize(100).setName("_name") ) - ).get() + ) ); assertThat( e.getCause().getMessage(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/CCSFieldCapabilitiesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/CCSFieldCapabilitiesIT.java index 444ce165f6115..0464b82573909 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/CCSFieldCapabilitiesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/CCSFieldCapabilitiesIT.java @@ -86,7 +86,7 @@ public void testFailuresFromRemote() { // if we only query the remote we should get back an exception only ex = expectThrows( IllegalArgumentException.class, - () -> client().prepareFieldCaps("remote_cluster:*").setFields("*").setIndexFilter(new ExceptionOnRewriteQueryBuilder()).get() + client().prepareFieldCaps("remote_cluster:*").setFields("*").setIndexFilter(new ExceptionOnRewriteQueryBuilder()) ); assertEquals("I throw because I choose to.", ex.getMessage()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java index 3d0c6072a5c9a..c331611982279 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/fieldcaps/FieldCapabilitiesIT.java @@ -429,10 +429,7 @@ public void testFailures() throws InterruptedException { // if all requested indices failed, we fail the request by throwing the exception IllegalArgumentException ex = expectThrows( IllegalArgumentException.class, - () -> client().prepareFieldCaps("index1-error", "index2-error") - .setFields("*") - .setIndexFilter(new ExceptionOnRewriteQueryBuilder()) - .get() + client().prepareFieldCaps("index1-error", "index2-error").setFields("*").setIndexFilter(new ExceptionOnRewriteQueryBuilder()) ); assertEquals("I throw because I choose to.", ex.getMessage()); } @@ -505,7 +502,7 @@ public void testNoActiveCopy() throws Exception { { final ElasticsearchException ex = expectThrows( ElasticsearchException.class, - () -> client().prepareFieldCaps("log-index-*").setFields("*").get() + client().prepareFieldCaps("log-index-*").setFields("*") ); assertThat(ex.getMessage(), equalTo("index [log-index-inactive] has no active shard copy")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java index e2a7d42105b41..f2e13bc33175c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/DecayFunctionScoreIT.java @@ -670,14 +670,14 @@ public void testExceptionThrownIfScaleLE0() throws Exception { SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, - () -> client().search( + client().search( new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) .source( searchSource().query( functionScoreQuery(termQuery("test", "value"), gaussDecayFunction("num1", "2013-05-28", "-1d")) ) ) - ).actionGet() + ) ); assertThat(e.getMessage(), is("all shards failed")); } @@ -996,7 +996,7 @@ public void testParsingExceptionIfFieldDoesNotExist() throws Exception { SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, - () -> client().search( + client().search( new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) .source( searchSource().size(numDocs) @@ -1006,7 +1006,7 @@ public void testParsingExceptionIfFieldDoesNotExist() throws Exception { ) ) ) - ).actionGet() + ) ); assertThat(e.getMessage(), is("all shards failed")); } @@ -1037,7 +1037,7 @@ public void testParsingExceptionIfFieldTypeDoesNotMatch() throws Exception { // so, we indexed a string field, but now we try to score a num field SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, - () -> client().search( + client().search( new SearchRequest(new String[] {}).searchType(SearchType.QUERY_THEN_FETCH) .source( searchSource().query( @@ -1046,7 +1046,7 @@ public void testParsingExceptionIfFieldTypeDoesNotMatch() throws Exception { ) ) ) - ).actionGet() + ) ); assertThat(e.getMessage(), is("all shards failed")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java index d69b582445d68..8ef3d78a4635b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/QueryRescorerIT.java @@ -814,21 +814,19 @@ public void testRescorePhaseWithInvalidSort() throws Exception { Exception exc = expectThrows( Exception.class, - () -> prepareSearch().addSort(SortBuilders.fieldSort("number")) + prepareSearch().addSort(SortBuilders.fieldSort("number")) .setTrackScores(true) .addRescorer(new QueryRescorerBuilder(matchAllQuery()), 50) - .get() ); assertNotNull(exc.getCause()); assertThat(exc.getCause().getMessage(), containsString("Cannot use [sort] option in conjunction with [rescore].")); exc = expectThrows( Exception.class, - () -> prepareSearch().addSort(SortBuilders.fieldSort("number")) + prepareSearch().addSort(SortBuilders.fieldSort("number")) .addSort(SortBuilders.scoreSort()) .setTrackScores(true) .addRescorer(new QueryRescorerBuilder(matchAllQuery()), 50) - .get() ); assertNotNull(exc.getCause()); assertThat(exc.getCause().getMessage(), containsString("Cannot use [sort] option in conjunction with [rescore].")); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java index a6281aef93e7e..dfadf38adf503 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/morelikethis/MoreLikeThisIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.search.morelikethis; +import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; @@ -42,7 +43,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertOrderedSearchHits; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; @@ -379,20 +379,18 @@ public void testNumericField() throws Exception { ); // Explicit list of fields including numeric fields -> fail - assertRequestBuilderThrows( - prepareSearch().setQuery( - new MoreLikeThisQueryBuilder(new String[] { "string_value", "int_value" }, null, new Item[] { new Item("test", "1") }) - .minTermFreq(1) - .minDocFreq(1) - ), - SearchPhaseExecutionException.class + ActionRequestBuilder builder5 = prepareSearch().setQuery( + new MoreLikeThisQueryBuilder(new String[] { "string_value", "int_value" }, null, new Item[] { new Item("test", "1") }) + .minTermFreq(1) + .minDocFreq(1) ); + expectThrows(SearchPhaseExecutionException.class, builder5); // mlt query with no field -> exception because _all is not enabled) - assertRequestBuilderThrows( - prepareSearch().setQuery(moreLikeThisQuery(new String[] { "index" }).minTermFreq(1).minDocFreq(1)), - SearchPhaseExecutionException.class + ActionRequestBuilder builder4 = prepareSearch().setQuery( + moreLikeThisQuery(new String[] { "index" }).minTermFreq(1).minDocFreq(1) ); + expectThrows(SearchPhaseExecutionException.class, builder4); // mlt query with string fields assertHitCount( @@ -403,18 +401,16 @@ public void testNumericField() throws Exception { ); // mlt query with at least a numeric field -> fail by default - assertRequestBuilderThrows( - prepareSearch().setQuery(moreLikeThisQuery(new String[] { "string_value", "int_value" }, new String[] { "index" }, null)), - SearchPhaseExecutionException.class + ActionRequestBuilder builder3 = prepareSearch().setQuery( + moreLikeThisQuery(new String[] { "string_value", "int_value" }, new String[] { "index" }, null) ); + expectThrows(SearchPhaseExecutionException.class, builder3); // mlt query with at least a numeric field -> fail by command - assertRequestBuilderThrows( - prepareSearch().setQuery( - moreLikeThisQuery(new String[] { "string_value", "int_value" }, new String[] { "index" }, null).failOnUnsupportedField(true) - ), - SearchPhaseExecutionException.class + ActionRequestBuilder builder2 = prepareSearch().setQuery( + moreLikeThisQuery(new String[] { "string_value", "int_value" }, new String[] { "index" }, null).failOnUnsupportedField(true) ); + expectThrows(SearchPhaseExecutionException.class, builder2); // mlt query with at least a numeric field but fail_on_unsupported_field set to false assertHitCount( @@ -427,22 +423,18 @@ public void testNumericField() throws Exception { ); // mlt field query on a numeric field -> failure by default - assertRequestBuilderThrows( - prepareSearch().setQuery( - moreLikeThisQuery(new String[] { "int_value" }, new String[] { "42" }, null).minTermFreq(1).minDocFreq(1) - ), - SearchPhaseExecutionException.class + ActionRequestBuilder builder1 = prepareSearch().setQuery( + moreLikeThisQuery(new String[] { "int_value" }, new String[] { "42" }, null).minTermFreq(1).minDocFreq(1) ); + expectThrows(SearchPhaseExecutionException.class, builder1); // mlt field query on a numeric field -> failure by command - assertRequestBuilderThrows( - prepareSearch().setQuery( - moreLikeThisQuery(new String[] { "int_value" }, new String[] { "42" }, null).minTermFreq(1) - .minDocFreq(1) - .failOnUnsupportedField(true) - ), - SearchPhaseExecutionException.class + ActionRequestBuilder builder = prepareSearch().setQuery( + moreLikeThisQuery(new String[] { "int_value" }, new String[] { "42" }, null).minTermFreq(1) + .minDocFreq(1) + .failOnUnsupportedField(true) ); + expectThrows(SearchPhaseExecutionException.class, builder); // mlt field query on a numeric field but fail_on_unsupported_field set to false assertHitCount( @@ -834,9 +826,9 @@ public void testWithMissingRouting() throws IOException { logger.info("Running moreLikeThis with one item without routing attribute"); SearchPhaseExecutionException exception = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch().setQuery( + prepareSearch().setQuery( new MoreLikeThisQueryBuilder(null, new Item[] { new Item("test", "1") }).minTermFreq(1).minDocFreq(1) - ).get() + ) ); Throwable cause = exception.getCause(); @@ -848,12 +840,12 @@ public void testWithMissingRouting() throws IOException { logger.info("Running moreLikeThis with one item with routing attribute and two items without routing attribute"); SearchPhaseExecutionException exception = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch().setQuery( + prepareSearch().setQuery( new MoreLikeThisQueryBuilder( null, new Item[] { new Item("test", "1").routing("1"), new Item("test", "2"), new Item("test", "3") } ).minTermFreq(1).minDocFreq(1) - ).get() + ) ); Throwable cause = exception.getCause(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/QueryStringIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/QueryStringIT.java index 70706d44982c9..510cad5c19059 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/QueryStringIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/QueryStringIT.java @@ -186,7 +186,7 @@ public void testPhraseQueryOnFieldWithNoPositions() throws Exception { Exception exc = expectThrows( Exception.class, - () -> prepareSearch("test").setQuery(queryStringQuery("f4:\"eggplant parmesan\"").lenient(false)).get() + prepareSearch("test").setQuery(queryStringQuery("f4:\"eggplant parmesan\"").lenient(false)) ); IllegalStateException ise = (IllegalStateException) ExceptionsHelper.unwrap(exc, IllegalStateException.class); assertNotNull(ise); @@ -194,7 +194,7 @@ public void testPhraseQueryOnFieldWithNoPositions() throws Exception { } public void testBooleanStrictQuery() throws Exception { - Exception e = expectThrows(Exception.class, () -> prepareSearch("test").setQuery(queryStringQuery("foo").field("f_bool")).get()); + Exception e = expectThrows(Exception.class, prepareSearch("test").setQuery(queryStringQuery("foo").field("f_bool"))); assertThat( ExceptionsHelper.unwrap(e, IllegalArgumentException.class).getMessage(), containsString("Can't parse boolean value [foo], expected [true] or [false]") @@ -204,7 +204,7 @@ public void testBooleanStrictQuery() throws Exception { public void testAllFieldsWithSpecifiedLeniency() throws IOException { Exception e = expectThrows( Exception.class, - () -> prepareSearch("test").setQuery(queryStringQuery("f_date:[now-2D TO now]").lenient(false)).get() + prepareSearch("test").setQuery(queryStringQuery("f_date:[now-2D TO now]").lenient(false)) ); assertThat(e.getCause().getMessage(), containsString("unit [D] not supported for date math [-2D]")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/ScriptScoreQueryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/ScriptScoreQueryIT.java index 685a8966d15ae..06b7ba5cc0975 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/ScriptScoreQueryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/ScriptScoreQueryIT.java @@ -146,7 +146,7 @@ public void testDisallowExpensiveQueries() { ElasticsearchException e = expectThrows( ElasticsearchException.class, - () -> prepareSearch("test-index").setQuery(scriptScoreQuery(matchQuery("field1", "text0"), script)).get() + prepareSearch("test-index").setQuery(scriptScoreQuery(matchQuery("field1", "text0"), script)) ); assertEquals( "[script score] queries cannot be executed when 'search.allow_expensive_queries' is set to false.", diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/SearchQueryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/SearchQueryIT.java index 8aa7dd151b917..c9d1bb8382e23 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -355,7 +355,7 @@ public void testDateRangeInQueryString() { SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch().setQuery(queryStringQuery("future:[now/D TO now+2M/d]").lenient(false)).get() + prepareSearch().setQuery(queryStringQuery("future:[now/D TO now+2M/d]").lenient(false)) ); assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); assertThat(e.toString(), containsString("unit [D] not supported for date math")); @@ -549,7 +549,7 @@ public void testMatchQueryNumeric() throws Exception { assertResponse(prepareSearch().setQuery(matchQuery("double", "2")), response -> { assertHitCount(response, 1L); assertFirstHit(response, hasId("2")); - expectThrows(SearchPhaseExecutionException.class, () -> prepareSearch().setQuery(matchQuery("double", "2 3 4")).get()); + expectThrows(SearchPhaseExecutionException.class, prepareSearch().setQuery(matchQuery("double", "2 3 4"))); }); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/query/SimpleQueryStringIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/query/SimpleQueryStringIT.java index d5736d9b10e1a..c53e6533d8f6c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/query/SimpleQueryStringIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/query/SimpleQueryStringIT.java @@ -525,7 +525,7 @@ public void testAllFieldsWithSpecifiedLeniency() throws Exception { SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("test").setQuery(simpleQueryStringQuery("foo123").lenient(false)).get() + prepareSearch("test").setQuery(simpleQueryStringQuery("foo123").lenient(false)) ); assertThat(e.getDetailedMessage(), containsString("NumberFormatException: For input string: \"foo123\"")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/scriptfilter/ScriptQuerySearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/scriptfilter/ScriptQuerySearchIT.java index 86c63a495b378..1d30703a584f5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/scriptfilter/ScriptQuerySearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/scriptfilter/ScriptQuerySearchIT.java @@ -240,7 +240,7 @@ public void testDisallowExpensiveQueries() { // Set search.allow_expensive_queries to "false" => assert failure ElasticsearchException e = expectThrows( ElasticsearchException.class, - () -> prepareSearch("test-index").setQuery(scriptQuery(script)).get() + prepareSearch("test-index").setQuery(scriptQuery(script)) ); assertEquals( "[script] queries cannot be executed when 'search.allow_expensive_queries' is set to false.", diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollIT.java index 8aaa0eccb04fb..32cd1aee07884 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/scroll/SearchScrollIT.java @@ -352,22 +352,14 @@ public void testClearIllegalScrollId() throws Exception { createIndex("idx"); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> client().prepareClearScroll().addScrollId("c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1").get() + client().prepareClearScroll().addScrollId("c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1") ); assertEquals("Cannot parse scroll id", e.getMessage()); - - e = expectThrows( - IllegalArgumentException.class, - // Fails during base64 decoding (Base64-encoded string must have at least four characters) - () -> client().prepareClearScroll().addScrollId("a").get() - ); + // Fails during base64 decoding (Base64-encoded string must have at least four characters) + e = expectThrows(IllegalArgumentException.class, client().prepareClearScroll().addScrollId("a")); assertEquals("Cannot parse scroll id", e.getMessage()); - - e = expectThrows( - IllegalArgumentException.class, - // Other invalid base64 - () -> client().prepareClearScroll().addScrollId("abcabc").get() - ); + // Other invalid base64 + e = expectThrows(IllegalArgumentException.class, client().prepareClearScroll().addScrollId("abcabc")); assertEquals("Cannot parse scroll id", e.getMessage()); } @@ -566,9 +558,8 @@ public void testCloseAndReopenOrDeleteWithActiveScroll() { public void testScrollInvalidDefaultKeepAlive() throws IOException { IllegalArgumentException exc = expectThrows( IllegalArgumentException.class, - () -> clusterAdmin().prepareUpdateSettings() + clusterAdmin().prepareUpdateSettings() .setPersistentSettings(Settings.builder().put("search.max_keep_alive", "1m").put("search.default_keep_alive", "2m")) - .get() ); assertThat(exc.getMessage(), containsString("was (2m > 1m)")); @@ -578,9 +569,7 @@ public void testScrollInvalidDefaultKeepAlive() throws IOException { exc = expectThrows( IllegalArgumentException.class, - () -> clusterAdmin().prepareUpdateSettings() - .setPersistentSettings(Settings.builder().put("search.default_keep_alive", "3m")) - .get() + clusterAdmin().prepareUpdateSettings().setPersistentSettings(Settings.builder().put("search.default_keep_alive", "3m")) ); assertThat(exc.getMessage(), containsString("was (3m > 2m)")); @@ -588,7 +577,7 @@ public void testScrollInvalidDefaultKeepAlive() throws IOException { exc = expectThrows( IllegalArgumentException.class, - () -> clusterAdmin().prepareUpdateSettings().setPersistentSettings(Settings.builder().put("search.max_keep_alive", "30s")).get() + clusterAdmin().prepareUpdateSettings().setPersistentSettings(Settings.builder().put("search.max_keep_alive", "30s")) ); assertThat(exc.getMessage(), containsString("was (1m > 30s)")); } @@ -603,7 +592,7 @@ public void testInvalidScrollKeepAlive() throws IOException { Exception exc = expectThrows( Exception.class, - () -> prepareSearch().setQuery(matchAllQuery()).setSize(1).setScroll(TimeValue.timeValueHours(2)).get() + prepareSearch().setQuery(matchAllQuery()).setSize(1).setScroll(TimeValue.timeValueHours(2)) ); IllegalArgumentException illegalArgumentException = (IllegalArgumentException) ExceptionsHelper.unwrap( exc, @@ -618,7 +607,7 @@ public void testInvalidScrollKeepAlive() throws IOException { assertThat(searchResponse.getHits().getHits().length, equalTo(1)); Exception ex = expectThrows( Exception.class, - () -> client().prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueHours(3)).get() + client().prepareSearchScroll(searchResponse.getScrollId()).setScroll(TimeValue.timeValueHours(3)) ); IllegalArgumentException iae = (IllegalArgumentException) ExceptionsHelper.unwrap(ex, IllegalArgumentException.class); assertNotNull(iae); @@ -706,7 +695,7 @@ public void testRestartDataNodesDuringScrollSearch() throws Exception { } SearchPhaseExecutionException error = expectThrows( SearchPhaseExecutionException.class, - () -> client().prepareSearchScroll(respFromDemoIndexScrollId).get() + client().prepareSearchScroll(respFromDemoIndexScrollId) ); for (ShardSearchFailure shardSearchFailure : error.shardFailures()) { assertThat(shardSearchFailure.getCause().getMessage(), containsString("No search context found for id [1]")); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/searchafter/SearchAfterIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/searchafter/SearchAfterIT.java index b361a41574618..920895d98535a 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/searchafter/SearchAfterIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/searchafter/SearchAfterIT.java @@ -71,11 +71,10 @@ public void testsShouldFail() throws Exception { { SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("test").addSort("field1", SortOrder.ASC) + prepareSearch("test").addSort("field1", SortOrder.ASC) .setQuery(matchAllQuery()) .searchAfter(new Object[] { 0 }) .setScroll("1m") - .get() ); assertTrue(e.shardFailures().length > 0); for (ShardSearchFailure failure : e.shardFailures()) { @@ -86,11 +85,7 @@ public void testsShouldFail() throws Exception { { SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("test").addSort("field1", SortOrder.ASC) - .setQuery(matchAllQuery()) - .searchAfter(new Object[] { 0 }) - .setFrom(10) - .get() + prepareSearch("test").addSort("field1", SortOrder.ASC).setQuery(matchAllQuery()).searchAfter(new Object[] { 0 }).setFrom(10) ); assertTrue(e.shardFailures().length > 0); for (ShardSearchFailure failure : e.shardFailures()) { @@ -101,7 +96,7 @@ public void testsShouldFail() throws Exception { { SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("test").setQuery(matchAllQuery()).searchAfter(new Object[] { 0.75f }).get() + prepareSearch("test").setQuery(matchAllQuery()).searchAfter(new Object[] { 0.75f }) ); assertTrue(e.shardFailures().length > 0); for (ShardSearchFailure failure : e.shardFailures()) { @@ -112,11 +107,10 @@ public void testsShouldFail() throws Exception { { SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("test").addSort("field2", SortOrder.DESC) + prepareSearch("test").addSort("field2", SortOrder.DESC) .addSort("field1", SortOrder.ASC) .setQuery(matchAllQuery()) .searchAfter(new Object[] { 1 }) - .get() ); assertTrue(e.shardFailures().length > 0); for (ShardSearchFailure failure : e.shardFailures()) { @@ -127,10 +121,7 @@ public void testsShouldFail() throws Exception { { SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("test").setQuery(matchAllQuery()) - .addSort("field1", SortOrder.ASC) - .searchAfter(new Object[] { 1, 2 }) - .get() + prepareSearch("test").setQuery(matchAllQuery()).addSort("field1", SortOrder.ASC).searchAfter(new Object[] { 1, 2 }) ); for (ShardSearchFailure failure : e.shardFailures()) { assertTrue(e.shardFailures().length > 0); @@ -141,10 +132,7 @@ public void testsShouldFail() throws Exception { { SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("test").setQuery(matchAllQuery()) - .addSort("field1", SortOrder.ASC) - .searchAfter(new Object[] { "toto" }) - .get() + prepareSearch("test").setQuery(matchAllQuery()).addSort("field1", SortOrder.ASC).searchAfter(new Object[] { "toto" }) ); assertTrue(e.shardFailures().length > 0); for (ShardSearchFailure failure : e.shardFailures()) { @@ -490,7 +478,7 @@ public void testScrollAndSearchAfterWithBigIndex() { { OpenPointInTimeRequest openPITRequest = new OpenPointInTimeRequest("test").keepAlive(TimeValue.timeValueMinutes(5)); pitID = client().execute(TransportOpenPointInTimeAction.TYPE, openPITRequest).actionGet().getPointInTimeId(); - SearchRequest searchRequest = new SearchRequest("test").source( + SearchRequest searchRequest = new SearchRequest().source( new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(pitID).setKeepAlive(TimeValue.timeValueMinutes(5))) .sort("timestamp") ); @@ -526,7 +514,7 @@ public void testScrollAndSearchAfterWithBigIndex() { { OpenPointInTimeRequest openPITRequest = new OpenPointInTimeRequest("test").keepAlive(TimeValue.timeValueMinutes(5)); pitID = client().execute(TransportOpenPointInTimeAction.TYPE, openPITRequest).actionGet().getPointInTimeId(); - SearchRequest searchRequest = new SearchRequest("test").source( + SearchRequest searchRequest = new SearchRequest().source( new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(pitID).setKeepAlive(TimeValue.timeValueMinutes(5))) .sort(SortBuilders.pitTiebreaker()) ); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/simple/SimpleSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/simple/SimpleSearchIT.java index 7f2558da272f1..e9683c5880bc8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/simple/SimpleSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/simple/SimpleSearchIT.java @@ -65,13 +65,8 @@ protected Collection> nodePlugins() { } public void testSearchNullIndex() { - expectThrows(NullPointerException.class, () -> prepareSearch((String) null).setQuery(QueryBuilders.termQuery("_id", "XXX1")).get()); - - expectThrows( - NullPointerException.class, - () -> prepareSearch((String[]) null).setQuery(QueryBuilders.termQuery("_id", "XXX1")).get() - ); - + expectThrows(NullPointerException.class, () -> prepareSearch((String) null)); + expectThrows(NullPointerException.class, () -> prepareSearch((String[]) null)); } public void testSearchRandomPreference() throws InterruptedException, ExecutionException { @@ -461,10 +456,11 @@ public void testTermQueryBigInt() throws Exception { indexRequestBuilder.request().decRef(); String queryJson = "{ \"field\" : { \"value\" : 80315953321748200608 } }"; - XContentParser parser = createParser(JsonXContent.jsonXContent, queryJson); - parser.nextToken(); - TermQueryBuilder query = TermQueryBuilder.fromXContent(parser); - assertHitCount(prepareSearch("idx").setQuery(query), 1); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, queryJson)) { + parser.nextToken(); + TermQueryBuilder query = TermQueryBuilder.fromXContent(parser); + assertHitCount(prepareSearch("idx").setQuery(query), 1); + } } public void testTooLongRegexInRegexpQuery() throws Exception { @@ -478,7 +474,7 @@ public void testTooLongRegexInRegexpQuery() throws Exception { } SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("idx").setQuery(QueryBuilders.regexpQuery("num", regexp.toString())).get() + prepareSearch("idx").setQuery(QueryBuilders.regexpQuery("num", regexp.toString())) ); assertThat( e.getRootCause().getMessage(), @@ -528,7 +524,7 @@ public void testStrictlyCountRequest() throws Exception { } private void assertWindowFails(SearchRequestBuilder search) { - SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, () -> search.get()); + SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, search); assertThat( e.toString(), containsString( @@ -541,7 +537,7 @@ private void assertWindowFails(SearchRequestBuilder search) { private void assertRescoreWindowFails(int windowSize) { SearchRequestBuilder search = prepareSearch("idx").addRescorer(new QueryRescorerBuilder(matchAllQuery()).windowSize(windowSize)); - SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, () -> search.get()); + SearchPhaseExecutionException e = expectThrows(SearchPhaseExecutionException.class, search); assertThat( e.toString(), containsString( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/slice/SearchSliceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/slice/SearchSliceIT.java index 996ece48273f7..dc424dfd79318 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/slice/SearchSliceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/slice/SearchSliceIT.java @@ -225,7 +225,7 @@ private void assertSearchSlicesWithPointInTime(String sliceField, String sortFie for (int id = 0; id < numSlice; id++) { int numSliceResults = 0; - SearchRequestBuilder request = prepareSearch("test").slice(new SliceBuilder(sliceField, id, numSlice)) + SearchRequestBuilder request = prepareSearch().slice(new SliceBuilder(sliceField, id, numSlice)) .setPointInTime(new PointInTimeBuilder(pointInTimeId)) .addSort(SortBuilders.fieldSort(sortField)) .setSize(randomIntBetween(10, 100)); @@ -264,10 +264,10 @@ public void testInvalidFields() throws Exception { setupIndex(0, 1); SearchPhaseExecutionException exc = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("test").setQuery(matchAllQuery()) + prepareSearch("test").setQuery(matchAllQuery()) .setScroll(new Scroll(TimeValue.timeValueSeconds(10))) .slice(new SliceBuilder("invalid_random_int", 0, 10)) - .get() + ); Throwable rootCause = findRootCause(exc); assertThat(rootCause.getClass(), equalTo(IllegalArgumentException.class)); @@ -275,10 +275,9 @@ public void testInvalidFields() throws Exception { exc = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("test").setQuery(matchAllQuery()) + prepareSearch("test").setQuery(matchAllQuery()) .setScroll(new Scroll(TimeValue.timeValueSeconds(10))) .slice(new SliceBuilder("invalid_random_kw", 0, 10)) - .get() ); rootCause = findRootCause(exc); assertThat(rootCause.getClass(), equalTo(IllegalArgumentException.class)); @@ -289,7 +288,7 @@ public void testInvalidQuery() throws Exception { setupIndex(0, 1); SearchPhaseExecutionException exc = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch().setQuery(matchAllQuery()).slice(new SliceBuilder("invalid_random_int", 0, 10)).get() + prepareSearch().setQuery(matchAllQuery()).slice(new SliceBuilder("invalid_random_int", 0, 10)) ); Throwable rootCause = findRootCause(exc); assertThat(rootCause.getClass(), equalTo(SearchException.class)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java index 9ad0e9d192530..ddad063e4512d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/sort/FieldSortIT.java @@ -1646,7 +1646,7 @@ public void testNestedSort() throws IOException, InterruptedException, Execution { SearchPhaseExecutionException exc = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch().setQuery(matchAllQuery()) + prepareSearch().setQuery(matchAllQuery()) .addSort( SortBuilders.fieldSort("nested.bar.foo") .setNestedSort( @@ -1654,7 +1654,6 @@ public void testNestedSort() throws IOException, InterruptedException, Execution ) .order(SortOrder.DESC) ) - .get() ); assertThat(exc.toString(), containsString("max_children is only supported on top level of nested sort")); } @@ -1676,7 +1675,7 @@ public void testNestedSort() throws IOException, InterruptedException, Execution // missing nested path SearchPhaseExecutionException exc = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch().setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("nested.foo")).get() + prepareSearch().setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort("nested.foo")) ); assertThat(exc.toString(), containsString("it is mandatory to set the [nested] context")); } @@ -2060,9 +2059,7 @@ public void testCastNumericTypeExceptions() throws Exception { for (String numericType : new String[] { "long", "double", "date", "date_nanos" }) { ElasticsearchException exc = expectThrows( ElasticsearchException.class, - () -> prepareSearch().setQuery(matchAllQuery()) - .addSort(SortBuilders.fieldSort(invalidField).setNumericType(numericType)) - .get() + prepareSearch().setQuery(matchAllQuery()).addSort(SortBuilders.fieldSort(invalidField).setNumericType(numericType)) ); assertThat(exc.status(), equalTo(RestStatus.BAD_REQUEST)); assertThat(exc.getDetailedMessage(), containsString("[numeric_type] option cannot be set on a non-numeric field")); @@ -2151,7 +2148,7 @@ public void testSortMixedFieldTypes() { { // mixing long and double types is not allowed SearchPhaseExecutionException exc = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("index_long", "index_double").addSort(new FieldSortBuilder("foo")).setSize(10).get() + prepareSearch("index_long", "index_double").addSort(new FieldSortBuilder("foo")).setSize(10) ); assertThat(exc.getCause().toString(), containsString(errMsg)); } @@ -2159,7 +2156,7 @@ public void testSortMixedFieldTypes() { { // mixing long and keyword types is not allowed SearchPhaseExecutionException exc = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("index_long", "index_keyword").addSort(new FieldSortBuilder("foo")).setSize(10).get() + prepareSearch("index_long", "index_keyword").addSort(new FieldSortBuilder("foo")).setSize(10) ); assertThat(exc.getCause().toString(), containsString(errMsg)); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/source/MetadataFetchingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/source/MetadataFetchingIT.java index 04e4cac828a9b..98e1640266b89 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/source/MetadataFetchingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/source/MetadataFetchingIT.java @@ -106,7 +106,7 @@ public void testInvalid() { { SearchPhaseExecutionException exc = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("test").setFetchSource(true).storedFields("_none_").get() + prepareSearch("test").setFetchSource(true).storedFields("_none_") ); Throwable rootCause = ExceptionsHelper.unwrap(exc, SearchException.class); assertNotNull(rootCause); @@ -116,7 +116,7 @@ public void testInvalid() { { SearchPhaseExecutionException exc = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch("test").storedFields("_none_").addFetchField("field").get() + prepareSearch("test").storedFields("_none_").addFetchField("field") ); Throwable rootCause = ExceptionsHelper.unwrap(exc, SearchException.class); assertNotNull(rootCause); @@ -126,14 +126,14 @@ public void testInvalid() { { IllegalArgumentException exc = expectThrows( IllegalArgumentException.class, - () -> prepareSearch("test").storedFields("_none_", "field1").setVersion(true).get() + () -> prepareSearch("test").storedFields("_none_", "field1") ); assertThat(exc.getMessage(), equalTo("cannot combine _none_ with other fields")); } { IllegalArgumentException exc = expectThrows( IllegalArgumentException.class, - () -> prepareSearch("test").storedFields("_none_").storedFields("field1").setVersion(true).get() + () -> prepareSearch("test").storedFields("_none_").storedFields("field1") ); assertThat(exc.getMessage(), equalTo("cannot combine _none_ with other fields")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java index 26b7b817859b7..c94fa0a37a260 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/CompletionSuggestSearchIT.java @@ -989,7 +989,7 @@ public void testThatSortingOnCompletionFieldReturnsUsefulException() throws Exce SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch(INDEX).addSort(new FieldSortBuilder(FIELD)).get() + prepareSearch(INDEX).addSort(new FieldSortBuilder(FIELD)) ); assertThat(e.status().getStatus(), is(400)); assertThat(e.toString(), containsString("Fielddata is not supported on field [" + FIELD + "] of type [completion]")); @@ -1364,9 +1364,9 @@ public void testIssue5930() throws IOException { SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch(INDEX).addAggregation( + prepareSearch(INDEX).addAggregation( AggregationBuilders.terms("suggest_agg").field(FIELD).collectMode(randomFrom(SubAggCollectionMode.values())) - ).get() + ) ); assertThat(e.toString(), containsString("Fielddata is not supported on field [" + FIELD + "] of type [completion]")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/SuggestSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/SuggestSearchIT.java index 3631d0f7315c8..ac0c451bf409f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/SuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/suggest/SuggestSearchIT.java @@ -53,7 +53,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailuresAndResponse; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSuggestion; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSuggestionPhraseCollateMatchExists; @@ -313,12 +312,12 @@ public void testUnmappedField() throws IOException, InterruptedException, Execut { SearchRequestBuilder searchBuilder = prepareSearch().setSize(0); searchBuilder.suggest(new SuggestBuilder().setGlobalText("tetsting sugestion").addSuggestion("did_you_mean", phraseSuggestion)); - assertRequestBuilderThrows(searchBuilder, SearchPhaseExecutionException.class); + expectThrows(SearchPhaseExecutionException.class, searchBuilder); } { SearchRequestBuilder searchBuilder = prepareSearch().setSize(0); searchBuilder.suggest(new SuggestBuilder().setGlobalText("tetsting sugestion").addSuggestion("did_you_mean", phraseSuggestion)); - assertRequestBuilderThrows(searchBuilder, SearchPhaseExecutionException.class); + expectThrows(SearchPhaseExecutionException.class, searchBuilder); } } @@ -838,7 +837,7 @@ public void testShardFailures() throws IOException, InterruptedException { new SuggestBuilder().setGlobalText("tetsting sugestion") .addSuggestion("did_you_mean", phraseSuggestion("fielddoesnotexist").maxErrors(5.0f)) ); - assertRequestBuilderThrows(request, SearchPhaseExecutionException.class); + expectThrows(SearchPhaseExecutionException.class, request); // When searching on a shard which does not hold yet any document of an existing type, we should not fail assertNoFailuresAndResponse( diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CloneSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CloneSnapshotIT.java index 085fca2462984..ca06dcea88766 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CloneSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CloneSnapshotIT.java @@ -165,7 +165,7 @@ public void testClonePreventsSnapshotDelete() throws Exception { ConcurrentSnapshotExecutionException ex = expectThrows( ConcurrentSnapshotExecutionException.class, - () -> startDeleteSnapshot(repoName, sourceSnapshot).actionGet() + startDeleteSnapshot(repoName, sourceSnapshot) ); assertThat(ex.getMessage(), containsString("cannot delete snapshot while it is being cloned")); @@ -286,7 +286,7 @@ public void testDeletePreventsClone() throws Exception { ConcurrentSnapshotExecutionException ex = expectThrows( ConcurrentSnapshotExecutionException.class, - () -> startClone(repoName, sourceSnapshot, targetSnapshot, indexName).actionGet() + startClone(repoName, sourceSnapshot, targetSnapshot, indexName) ); assertThat(ex.getMessage(), containsString("cannot clone from snapshot that is being deleted")); @@ -401,10 +401,7 @@ public void testFailsOnCloneMissingIndices() { final String snapshotName = "snapshot"; createFullSnapshot(repoName, snapshotName); - expectThrows( - IndexNotFoundException.class, - () -> startClone(repoName, snapshotName, "target-snapshot", "does-not-exist").actionGet() - ); + expectThrows(IndexNotFoundException.class, startClone(repoName, snapshotName, "target-snapshot", "does-not-exist")); } public void testMasterFailoverDuringCloneStep2() throws Exception { @@ -426,7 +423,7 @@ public void testMasterFailoverDuringCloneStep2() throws Exception { final String masterNode = internalCluster().getMasterName(); waitForBlock(masterNode, repoName); internalCluster().restartNode(masterNode); - expectThrows(SnapshotException.class, cloneFuture::actionGet); + expectThrows(SnapshotException.class, cloneFuture); awaitNoMoreRunningOperations(); assertAllSnapshotsSuccessful(getRepositoryData(repoName), 2); @@ -451,7 +448,7 @@ public void testExceptionDuringShardClone() throws Exception { final String masterNode = internalCluster().getMasterName(); waitForBlock(masterNode, repoName); unblockNode(repoName, masterNode); - expectThrows(SnapshotException.class, cloneFuture::actionGet); + expectThrows(SnapshotException.class, cloneFuture); awaitNoMoreRunningOperations(); assertAllSnapshotsSuccessful(getRepositoryData(repoName), 1); assertAcked(startDeleteSnapshot(repoName, sourceSnapshot).get()); @@ -480,9 +477,7 @@ public void testDoesNotStartOnBrokenSourceSnapshot() throws Exception { final SnapshotException sne = expectThrows( SnapshotException.class, - () -> startClone(masterClient, repoName, sourceSnapshot, "target-snapshot", testIndex).actionGet( - TimeValue.timeValueSeconds(30L) - ) + startClone(masterClient, repoName, sourceSnapshot, "target-snapshot", testIndex) ); assertThat( sne.getMessage(), @@ -542,9 +537,7 @@ public void testSnapshotQueuedAfterCloneFromBrokenSourceSnapshot() throws Except ); final SnapshotException sne = expectThrows( SnapshotException.class, - () -> startClone(masterClient, repoName, sourceSnapshot, "target-snapshot", testIndex).actionGet( - TimeValue.timeValueSeconds(30L) - ) + startClone(masterClient, repoName, sourceSnapshot, "target-snapshot", testIndex) ); assertThat( sne.getMessage(), @@ -744,7 +737,7 @@ public void testRemoveFailedCloneFromCSWithoutIO() throws Exception { awaitNumberOfSnapshotsInProgress(1); waitForBlock(masterNode, repoName); unblockNode(repoName, masterNode); - expectThrows(SnapshotException.class, cloneFuture::actionGet); + expectThrows(SnapshotException.class, cloneFuture); awaitNoMoreRunningOperations(); assertAllSnapshotsSuccessful(getRepositoryData(repoName), 1); assertAcked(startDeleteSnapshot(repoName, sourceSnapshot).get()); @@ -787,7 +780,7 @@ public void testRemoveFailedCloneFromCSWithQueuedSnapshotInProgress() throws Exc waitForBlock(masterNode, repoName); unblockNode(repoName, masterNode); final ActionFuture fullSnapshotFuture2 = startFullSnapshot(repoName, "full-snapshot-2"); - expectThrows(SnapshotException.class, cloneFuture::actionGet); + expectThrows(SnapshotException.class, cloneFuture); unblockNode(repoName, dataNode); awaitNoMoreRunningOperations(); assertSuccessful(fullSnapshotFuture1); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java index df59ab18bef72..8d2e15f5027d5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/ConcurrentSnapshotsIT.java @@ -143,11 +143,7 @@ public void testRecreateCorruptedRepositoryDuringSnapshotsFails() throws Excepti logger.info("--> trying to create another snapshot in order for repository to be marked as corrupt"); final SnapshotException snapshotException = expectThrows( SnapshotException.class, - () -> clusterAdmin().prepareCreateSnapshot(repoName, "fast-snapshot2") - .setIndices(indexFast) - .setWaitForCompletion(true) - .execute() - .actionGet() + clusterAdmin().prepareCreateSnapshot(repoName, "fast-snapshot2").setIndices(indexFast).setWaitForCompletion(true) ); assertThat(snapshotException.getMessage(), containsString("failed to update snapshot in repository")); assertEquals(RepositoryData.CORRUPTED_REPO_GEN, getRepositoryMetadata(repoName).generation()); @@ -243,7 +239,7 @@ public void testBlockedRepoDoesNotBlockOtherRepos() throws Exception { clusterAdmin().prepareCreateSnapshot(otherRepoName, "snapshot").setIndices("does-not-exist-*").setWaitForCompletion(false).get(); unblockNode(blockedRepoName, internalCluster().getMasterName()); - expectThrows(SnapshotException.class, createSlowFuture::actionGet); + expectThrows(SnapshotException.class, createSlowFuture); assertBusy(() -> assertThat(currentSnapshots(otherRepoName), empty()), 30L, TimeUnit.SECONDS); } @@ -337,7 +333,7 @@ public void testSnapshotRunsAfterInProgressDelete() throws Exception { final ActionFuture snapshotFuture = startFullSnapshot(repoName, "second-snapshot"); unblockNode(repoName, masterNode); - final UncategorizedExecutionException ex = expectThrows(UncategorizedExecutionException.class, deleteFuture::actionGet); + final UncategorizedExecutionException ex = expectThrows(UncategorizedExecutionException.class, deleteFuture); assertThat(ex.getRootCause(), instanceOf(IOException.class)); assertSuccessful(snapshotFuture); @@ -542,7 +538,7 @@ public void testMasterFailOverWithQueuedDeletes() throws Exception { assertThat(sme.getSnapshotName(), is(firstSnapshot)); } } - expectThrows(SnapshotException.class, snapshotThreeFuture::actionGet); + expectThrows(SnapshotException.class, snapshotThreeFuture); logger.info("--> verify that all snapshots are gone and no more work is left in the cluster state"); awaitNoMoreRunningOperations(); @@ -597,12 +593,12 @@ public void testQueuedDeletesWithFailures() throws Exception { awaitNDeletionsInProgress(2); unblockNode(repoName, masterNode); - expectThrows(UncategorizedExecutionException.class, firstDeleteFuture::actionGet); + expectThrows(UncategorizedExecutionException.class, firstDeleteFuture); // Second delete works out cleanly since the repo is unblocked now assertThat(secondDeleteFuture.get().isAcknowledged(), is(true)); // Snapshot should have been aborted - final SnapshotException snapshotException = expectThrows(SnapshotException.class, snapshotFuture::actionGet); + final SnapshotException snapshotException = expectThrows(SnapshotException.class, snapshotFuture); assertThat(snapshotException.getMessage(), containsString(SnapshotsInProgress.ABORTED_FAILURE_TEXT)); assertThat(clusterAdmin().prepareGetSnapshots(repoName).get().getSnapshots(), empty()); @@ -629,7 +625,7 @@ public void testQueuedDeletesWithOverlap() throws Exception { // Second delete works out cleanly since the repo is unblocked now assertThat(secondDeleteFuture.get().isAcknowledged(), is(true)); // Snapshot should have been aborted - final SnapshotException snapshotException = expectThrows(SnapshotException.class, snapshotFuture::actionGet); + final SnapshotException snapshotException = expectThrows(SnapshotException.class, snapshotFuture); assertThat(snapshotException.getMessage(), containsString(SnapshotsInProgress.ABORTED_FAILURE_TEXT)); assertThat(clusterAdmin().prepareGetSnapshots(repoName).get().getSnapshots(), empty()); @@ -696,7 +692,7 @@ public void testQueuedOperationsOnMasterDisconnect() throws Exception { logger.info("--> make sure all failing requests get a response"); assertAcked(firstDeleteFuture.get()); assertAcked(secondDeleteFuture.get()); - expectThrows(SnapshotException.class, createThirdSnapshot::actionGet); + expectThrows(SnapshotException.class, createThirdSnapshot); awaitNoMoreRunningOperations(); } @@ -737,8 +733,8 @@ public void testQueuedOperationsOnMasterDisconnectAndRepoFailure() throws Except networkDisruption.stopDisrupting(); logger.info("--> make sure all failing requests get a response"); - expectThrows(SnapshotException.class, firstFailedSnapshotFuture::actionGet); - expectThrows(SnapshotException.class, secondFailedSnapshotFuture::actionGet); + expectThrows(SnapshotException.class, firstFailedSnapshotFuture); + expectThrows(SnapshotException.class, secondFailedSnapshotFuture); assertAcked(deleteFuture.get()); awaitNoMoreRunningOperations(); @@ -808,8 +804,8 @@ public void testQueuedSnapshotOperationsAndBrokenRepoOnMasterFailOver() throws E ensureStableCluster(3); awaitNoMoreRunningOperations(); - expectThrows(ElasticsearchException.class, snapshotThree::actionGet); - expectThrows(ElasticsearchException.class, snapshotFour::actionGet); + expectThrows(ElasticsearchException.class, snapshotThree); + expectThrows(ElasticsearchException.class, snapshotFour); } public void testQueuedSnapshotOperationsAndBrokenRepoOnMasterFailOver2() throws Exception { @@ -841,8 +837,8 @@ public void testQueuedSnapshotOperationsAndBrokenRepoOnMasterFailOver2() throws unblockNode(repoName, masterNode); networkDisruption.stopDisrupting(); awaitNoMoreRunningOperations(); - expectThrows(ElasticsearchException.class, snapshotThree::actionGet); - expectThrows(ElasticsearchException.class, snapshotFour::actionGet); + expectThrows(ElasticsearchException.class, snapshotThree); + expectThrows(ElasticsearchException.class, snapshotFour); } public void testQueuedSnapshotOperationsAndBrokenRepoOnMasterFailOverMultipleRepos() throws Exception { @@ -885,8 +881,8 @@ public void testQueuedSnapshotOperationsAndBrokenRepoOnMasterFailOverMultipleRep ensureStableCluster(3); awaitNoMoreRunningOperations(); - expectThrows(ElasticsearchException.class, snapshotThree::actionGet); - expectThrows(ElasticsearchException.class, snapshotFour::actionGet); + expectThrows(ElasticsearchException.class, snapshotThree); + expectThrows(ElasticsearchException.class, snapshotFour); assertAcked(deleteFuture.get()); try { createBlockedSnapshot.actionGet(); @@ -1031,7 +1027,7 @@ public void testQueuedOperationsAfterFinalizationFailure() throws Exception { unblockNode(repoName, masterName); - expectThrows(SnapshotException.class, snapshotThree::actionGet); + expectThrows(SnapshotException.class, snapshotThree); assertAcked(deleteSnapshotOne.get()); } @@ -1317,7 +1313,7 @@ public void testConcurrentOperationsLimit() throws Exception { final ConcurrentSnapshotExecutionException cse = expectThrows( ConcurrentSnapshotExecutionException.class, - () -> clusterAdmin().prepareCreateSnapshot(repoName, "expected-to-fail").get() + clusterAdmin().prepareCreateSnapshot(repoName, "expected-to-fail") ); assertThat( cse.getMessage(), @@ -1407,7 +1403,7 @@ public void testQueuedDeleteAfterFinalizationFailure() throws Exception { awaitNDeletionsInProgress(1); unblockNode(repoName, masterNode); assertAcked(deleteFuture.get()); - final SnapshotException sne = expectThrows(SnapshotException.class, snapshotFuture::actionGet); + final SnapshotException sne = expectThrows(SnapshotException.class, snapshotFuture); assertThat(sne.getCause().getMessage(), containsString("exception after block")); } @@ -1430,7 +1426,7 @@ public void testAbortNotStartedSnapshotWithoutIO() throws Exception { awaitNumberOfSnapshotsInProgress(2); assertAcked(startDeleteSnapshot(repoName, snapshotTwo).get()); - final SnapshotException sne = expectThrows(SnapshotException.class, createSnapshot2Future::actionGet); + final SnapshotException sne = expectThrows(SnapshotException.class, createSnapshot2Future); assertFalse(createSnapshot1Future.isDone()); unblockNode(repoName, dataNode); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java index 8dec05c177872..e30c219092dab 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java @@ -7,6 +7,7 @@ */ package org.elasticsearch.snapshots; +import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; @@ -48,7 +49,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileExists; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -94,7 +94,7 @@ public void testRecreateCorruptedRepositoryUnblocksIt() throws Exception { startDeleteSnapshot(repoName, snapshot).get(); logger.info("--> make sure snapshot doesn't exist"); - expectThrows(SnapshotMissingException.class, () -> clusterAdmin().prepareGetSnapshots(repoName).addSnapshots(snapshot).get()); + expectThrows(SnapshotMissingException.class, clusterAdmin().prepareGetSnapshots(repoName).addSnapshots(snapshot)); } public void testConcurrentlyChangeRepositoryContents() throws Exception { @@ -168,10 +168,7 @@ public void testConcurrentlyChangeRepositoryContents() throws Exception { startDeleteSnapshot(repoName, snapshot).get(); logger.info("--> make sure snapshot doesn't exist"); - expectThrows( - SnapshotMissingException.class, - () -> client.admin().cluster().prepareGetSnapshots(repoName).addSnapshots(snapshot).get() - ); + expectThrows(SnapshotMissingException.class, client.admin().cluster().prepareGetSnapshots(repoName).addSnapshots(snapshot)); } public void testFindDanglingLatestGeneration() throws Exception { @@ -242,7 +239,7 @@ public void testFindDanglingLatestGeneration() throws Exception { assertThat(getRepositoryData(repoName).getGenId(), is(beforeMoveGen + 2)); logger.info("--> make sure snapshot doesn't exist"); - expectThrows(SnapshotMissingException.class, () -> clusterAdmin().prepareGetSnapshots(repoName).addSnapshots(snapshot).get()); + expectThrows(SnapshotMissingException.class, clusterAdmin().prepareGetSnapshots(repoName).addSnapshots(snapshot)); } public void testHandlingMissingRootLevelSnapshotMetadata() throws Exception { @@ -598,10 +595,7 @@ public void testDeleteSnapshotWithMissingIndexAndShardMetadata() throws Exceptio logger.info("--> make sure snapshot doesn't exist"); - expectThrows( - SnapshotMissingException.class, - () -> client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1").get() - ); + expectThrows(SnapshotMissingException.class, client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1")); for (String index : indices) { assertTrue(Files.notExists(indicesPath.resolve(indexIds.get(index).getId()))); @@ -642,10 +636,7 @@ public void testDeleteSnapshotWithMissingMetadata() throws Exception { startDeleteSnapshot("test-repo", "test-snap-1").get(); logger.info("--> make sure snapshot doesn't exist"); - expectThrows( - SnapshotMissingException.class, - () -> client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1").get() - ); + expectThrows(SnapshotMissingException.class, client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1")); } public void testDeleteSnapshotWithCorruptedSnapshotFile() throws Exception { @@ -690,10 +681,7 @@ public void testDeleteSnapshotWithCorruptedSnapshotFile() throws Exception { startDeleteSnapshot("test-repo", "test-snap-1").get(); logger.info("--> make sure snapshot doesn't exist"); - expectThrows( - SnapshotMissingException.class, - () -> client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1").get().getSnapshots() - ); + expectThrows(SnapshotMissingException.class, client.admin().cluster().prepareGetSnapshots("test-repo").addSnapshots("test-snap-1")); logger.info("--> make sure that we can create the snapshot again"); createSnapshotResponse = client.admin() @@ -752,11 +740,9 @@ public void testDeleteSnapshotWithCorruptedGlobalState() throws Exception { assertThat(snapshotStatusResponse.getSnapshots().get(0).getSnapshot().getSnapshotId().getName(), equalTo("test-snap")); assertAcked(startDeleteSnapshot("test-repo", "test-snap").get()); - expectThrows(SnapshotMissingException.class, () -> clusterAdmin().prepareGetSnapshots("test-repo").addSnapshots("test-snap").get()); - assertRequestBuilderThrows( - clusterAdmin().prepareSnapshotStatus("test-repo").addSnapshots("test-snap"), - SnapshotMissingException.class - ); + expectThrows(SnapshotMissingException.class, clusterAdmin().prepareGetSnapshots("test-repo").addSnapshots("test-snap")); + ActionRequestBuilder builder = clusterAdmin().prepareSnapshotStatus("test-repo").addSnapshots("test-snap"); + expectThrows(SnapshotMissingException.class, builder); createFullSnapshot("test-repo", "test-snap"); } @@ -830,14 +816,14 @@ private void assertRepositoryBlocked(String repo, String existingSnapshot) { logger.info("--> try to delete snapshot"); final RepositoryException ex = expectThrows( RepositoryException.class, - () -> clusterAdmin().prepareDeleteSnapshot(repo, existingSnapshot).get() + clusterAdmin().prepareDeleteSnapshot(repo, existingSnapshot) ); assertThat(ex.getMessage(), containsString("concurrent modification of the index-N file")); logger.info("--> try to create snapshot"); final RepositoryException ex2 = expectThrows( RepositoryException.class, - () -> clusterAdmin().prepareCreateSnapshot(repo, existingSnapshot).get() + clusterAdmin().prepareCreateSnapshot(repo, existingSnapshot) ); assertThat(ex2.getMessage(), containsString("The repository has been disabled to prevent data corruption")); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java index 3a72ab792f571..ef8ae3cf1cffb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.snapshots; import org.elasticsearch.TransportVersion; +import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -30,7 +31,6 @@ import java.util.function.Function; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; @@ -65,7 +65,8 @@ public void testShouldNotRestoreRepositoryMetadata() { .get(); logger.info("make sure old repository wasn't restored"); - assertRequestBuilderThrows(clusterAdmin().prepareGetRepositories("test-repo-1"), RepositoryMissingException.class); + ActionRequestBuilder builder = clusterAdmin().prepareGetRepositories("test-repo-1"); + expectThrows(RepositoryMissingException.class, builder); assertThat(clusterAdmin().prepareGetRepositories("test-repo-2").get().repositories().size(), equalTo(1)); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 4290d80d4dc57..867c76e80b206 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -196,7 +196,7 @@ public void testSnapshotWithStuckNode() throws Exception { } logger.info("--> making sure that snapshot no longer exists"); - expectThrows(SnapshotMissingException.class, () -> clusterAdmin().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get()); + expectThrows(SnapshotMissingException.class, clusterAdmin().prepareGetSnapshots("test-repo").setSnapshots("test-snap")); logger.info("--> trigger repository cleanup"); clusterAdmin().prepareCleanupRepository("test-repo").get(); @@ -263,10 +263,9 @@ public void testRestoreIndexWithMissingShards() throws Exception { logger.info("--> start snapshot with default settings without a closed index - should fail"); final SnapshotException sne = expectThrows( SnapshotException.class, - () -> clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-1") + clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap-1") .setIndices("test-idx-all", "test-idx-none", "test-idx-some", "test-idx-closed") .setWaitForCompletion(true) - .get() ); assertThat(sne.getMessage(), containsString("Indices don't have primary shards")); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/FeatureStateResetApiIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/FeatureStateResetApiIT.java index 1f86d4cb39ea4..c3dbfd03cae38 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/FeatureStateResetApiIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/FeatureStateResetApiIT.java @@ -83,20 +83,16 @@ public void testResetSystemIndices() throws Exception { ); // verify that both indices are gone - Exception e1 = expectThrows(IndexNotFoundException.class, () -> indicesAdmin().prepareGetIndex().addIndices(systemIndex1).get()); - + Exception e1 = expectThrows(IndexNotFoundException.class, indicesAdmin().prepareGetIndex().addIndices(systemIndex1)); assertThat(e1.getMessage(), containsString("no such index")); - Exception e2 = expectThrows(IndexNotFoundException.class, () -> indicesAdmin().prepareGetIndex().addIndices(associatedIndex).get()); - + Exception e2 = expectThrows(IndexNotFoundException.class, indicesAdmin().prepareGetIndex().addIndices(associatedIndex)); assertThat(e2.getMessage(), containsString("no such index")); - Exception e3 = expectThrows(IndexNotFoundException.class, () -> indicesAdmin().prepareGetIndex().addIndices(systemIndex2).get()); - + Exception e3 = expectThrows(IndexNotFoundException.class, indicesAdmin().prepareGetIndex().addIndices(systemIndex2)); assertThat(e3.getMessage(), containsString("no such index")); GetIndexResponse response = indicesAdmin().prepareGetIndex().addIndices("my_index").get(); - assertThat(response.getIndices(), arrayContaining("my_index")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java index a5fe09c68f862..6b5b3826272ce 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/GetSnapshotsIT.java @@ -226,21 +226,17 @@ public void testPaginationRequiresVerboseListing() throws Exception { createNSnapshots(repoName, randomIntBetween(1, 5)); expectThrows( ActionRequestValidationException.class, - () -> clusterAdmin().prepareGetSnapshots(repoName) + clusterAdmin().prepareGetSnapshots(repoName) .setVerbose(false) .setSort(GetSnapshotsRequest.SortBy.DURATION) .setSize(GetSnapshotsRequest.NO_LIMIT) - .execute() - .actionGet() ); expectThrows( ActionRequestValidationException.class, - () -> clusterAdmin().prepareGetSnapshots(repoName) + clusterAdmin().prepareGetSnapshots(repoName) .setVerbose(false) .setSort(GetSnapshotsRequest.SortBy.START_TIME) .setSize(randomIntBetween(1, 100)) - .execute() - .actionGet() ); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MultiClusterRepoAccessIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MultiClusterRepoAccessIT.java index e6bae861e1d04..8fc6e9e2aa3d8 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MultiClusterRepoAccessIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MultiClusterRepoAccessIT.java @@ -127,7 +127,7 @@ public void testConcurrentDeleteFromOtherCluster() throws InterruptedException { final SnapshotException sne = expectThrows( SnapshotException.class, - () -> clusterAdmin().prepareCreateSnapshot(repoNameOnFirstCluster, "snap-4").setWaitForCompletion(true).get() + clusterAdmin().prepareCreateSnapshot(repoNameOnFirstCluster, "snap-4").setWaitForCompletion(true) ); assertThat(sne.getMessage(), containsString("failed to update snapshot in repository")); final RepositoryException cause = (RepositoryException) sne.getCause(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java index a6c8e0b08c9ed..6d36ce6924826 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java @@ -9,6 +9,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse; import org.elasticsearch.action.admin.cluster.snapshots.delete.TransportDeleteSnapshotAction; @@ -48,7 +49,6 @@ import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.READONLY_SETTING_KEY; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; @@ -204,22 +204,27 @@ public void testRepositoryVerification() { Settings settings = Settings.builder().put("location", randomRepoPath()).put("random_control_io_exception_rate", 1.0).build(); Settings readonlySettings = Settings.builder().put(settings).put(READONLY_SETTING_KEY, true).build(); logger.info("--> creating repository that cannot write any files - should fail"); - assertRequestBuilderThrows( - client.admin().cluster().preparePutRepository("test-repo-1").setType("mock").setSettings(settings), - RepositoryVerificationException.class - ); + ActionRequestBuilder builder3 = client.admin() + .cluster() + .preparePutRepository("test-repo-1") + .setType("mock") + .setSettings(settings); + expectThrows(RepositoryVerificationException.class, builder3); logger.info("--> creating read-only repository that cannot read any files - should fail"); - assertRequestBuilderThrows( - client.admin().cluster().preparePutRepository("test-repo-2").setType("mock").setSettings(readonlySettings), - RepositoryVerificationException.class - ); + ActionRequestBuilder builder2 = client.admin() + .cluster() + .preparePutRepository("test-repo-2") + .setType("mock") + .setSettings(readonlySettings); + expectThrows(RepositoryVerificationException.class, builder2); logger.info("--> creating repository that cannot write any files, but suppress verification - should be acked"); assertAcked(client.admin().cluster().preparePutRepository("test-repo-1").setType("mock").setSettings(settings).setVerify(false)); logger.info("--> verifying repository"); - assertRequestBuilderThrows(client.admin().cluster().prepareVerifyRepository("test-repo-1"), RepositoryVerificationException.class); + ActionRequestBuilder builder1 = client.admin().cluster().prepareVerifyRepository("test-repo-1"); + expectThrows(RepositoryVerificationException.class, builder1); logger.info("--> creating read-only repository that cannot read any files, but suppress verification - should be acked"); assertAcked( @@ -227,7 +232,8 @@ public void testRepositoryVerification() { ); logger.info("--> verifying repository"); - assertRequestBuilderThrows(client.admin().cluster().prepareVerifyRepository("test-repo-2"), RepositoryVerificationException.class); + ActionRequestBuilder builder = client.admin().cluster().prepareVerifyRepository("test-repo-2"); + expectThrows(RepositoryVerificationException.class, builder); Path location = randomRepoPath(); @@ -286,20 +292,14 @@ public void testRepositoryConflict() throws Exception { ); logger.info("--> try deleting the repository, should fail because the deletion of the snapshot is in progress"); - RepositoryConflictException e1 = expectThrows( - RepositoryConflictException.class, - () -> clusterAdmin().prepareDeleteRepository(repo).get() - ); + RepositoryConflictException e1 = expectThrows(RepositoryConflictException.class, clusterAdmin().prepareDeleteRepository(repo)); assertThat(e1.status(), equalTo(RestStatus.CONFLICT)); assertThat(e1.getMessage(), containsString("trying to modify or unregister repository that is currently used")); logger.info("--> try updating the repository, should fail because the deletion of the snapshot is in progress"); RepositoryConflictException e2 = expectThrows( RepositoryConflictException.class, - () -> clusterAdmin().preparePutRepository(repo) - .setType("mock") - .setSettings(Settings.builder().put("location", randomRepoPath())) - .get() + clusterAdmin().preparePutRepository(repo).setType("mock").setSettings(Settings.builder().put("location", randomRepoPath())) ); assertThat(e2.status(), equalTo(RestStatus.CONFLICT)); assertThat(e2.getMessage(), containsString("trying to modify or unregister repository that is currently used")); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryThrottlingStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryThrottlingStatsIT.java index 0f0858982b4ad..d8bc9327a2edd 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryThrottlingStatsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoryThrottlingStatsIT.java @@ -37,7 +37,7 @@ public void testRepositoryThrottlingStats() throws Exception { IndexStats indexStats = indicesStats.getIndex("test-idx"); long totalSizeInBytes = 0; for (ShardStats shard : indexStats.getShards()) { - totalSizeInBytes += shard.getStats().getStore().getSizeInBytes(); + totalSizeInBytes += shard.getStats().getStore().sizeInBytes(); } logger.info("--> total shards size: {} bytes", totalSizeInBytes); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java index b0589d32ef7ad..fae2c437b427c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RestoreSnapshotIT.java @@ -12,6 +12,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; @@ -55,7 +56,6 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertIndexTemplateExists; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertIndexTemplateMissing; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -697,30 +697,26 @@ public void testChangeSettingsOnRestore() throws Exception { .build(); logger.info("--> try restoring while changing the number of shards - should fail"); - assertRequestBuilderThrows( - client.admin() - .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") - .setIgnoreIndexSettings("index.analysis.*") - .setIndexSettings(newIncorrectIndexSettings) - .setWaitForCompletion(true), - SnapshotRestoreException.class - ); + ActionRequestBuilder builder1 = client.admin() + .cluster() + .prepareRestoreSnapshot("test-repo", "test-snap") + .setIgnoreIndexSettings("index.analysis.*") + .setIndexSettings(newIncorrectIndexSettings) + .setWaitForCompletion(true); + expectThrows(SnapshotRestoreException.class, builder1); logger.info("--> try restoring while changing the number of replicas to a negative number - should fail"); Settings newIncorrectReplicasIndexSettings = Settings.builder() .put(newIndexSettings) .put(SETTING_NUMBER_OF_REPLICAS.substring(IndexMetadata.INDEX_SETTING_PREFIX.length()), randomIntBetween(-10, -1)) .build(); - assertRequestBuilderThrows( - client.admin() - .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") - .setIgnoreIndexSettings("index.analysis.*") - .setIndexSettings(newIncorrectReplicasIndexSettings) - .setWaitForCompletion(true), - IllegalArgumentException.class - ); + ActionRequestBuilder builder = client.admin() + .cluster() + .prepareRestoreSnapshot("test-repo", "test-snap") + .setIgnoreIndexSettings("index.analysis.*") + .setIndexSettings(newIncorrectReplicasIndexSettings) + .setWaitForCompletion(true); + expectThrows(IllegalArgumentException.class, builder); logger.info("--> restore index with correct settings from the snapshot"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin() @@ -875,11 +871,10 @@ public void testForbidDisableSoftDeletesDuringRestore() throws Exception { createSnapshot("test-repo", "snapshot-0", Collections.singletonList("test-index")); final SnapshotRestoreException restoreError = expectThrows( SnapshotRestoreException.class, - () -> clusterAdmin().prepareRestoreSnapshot("test-repo", "snapshot-0") + clusterAdmin().prepareRestoreSnapshot("test-repo", "snapshot-0") .setIndexSettings(Settings.builder().put(INDEX_SOFT_DELETES_SETTING.getKey(), false)) .setRenamePattern("test-index") .setRenameReplacement("new-index") - .get() ); assertThat(restoreError.getMessage(), containsString("cannot disable setting [index.soft_deletes.enabled] on restore")); } @@ -892,7 +887,7 @@ public void testFailOnAncientVersion() throws Exception { final String oldSnapshot = initWithSnapshotVersion(repoName, repoPath, oldVersion); final SnapshotRestoreException snapshotRestoreException = expectThrows( SnapshotRestoreException.class, - () -> clusterAdmin().prepareRestoreSnapshot(repoName, oldSnapshot).get() + clusterAdmin().prepareRestoreSnapshot(repoName, oldSnapshot) ); assertThat( snapshotRestoreException.getMessage(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 260a5b9ffce3f..f919194b6bb5d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -767,7 +767,7 @@ public void testUnallocatedShards() { logger.info("--> snapshot"); final SnapshotException sne = expectThrows( SnapshotException.class, - () -> clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get() + clusterAdmin().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx") ); assertThat(sne.getMessage(), containsString("Indices don't have primary shards")); assertThat(getRepositoryData("test-repo"), is(RepositoryData.EMPTY)); @@ -1180,7 +1180,7 @@ public void testSnapshotStatus() throws Exception { // test that getting an unavailable snapshot status throws an exception if ignoreUnavailable is false on the request SnapshotMissingException ex = expectThrows( SnapshotMissingException.class, - () -> client.admin().cluster().prepareSnapshotStatus("test-repo").addSnapshots("test-snap-doesnt-exist").get() + client.admin().cluster().prepareSnapshotStatus("test-repo").addSnapshots("test-snap-doesnt-exist") ); assertEquals("[test-repo:test-snap-doesnt-exist] is missing", ex.getMessage()); // test that getting an unavailable snapshot status does not throw an exception if ignoreUnavailable is true on the request @@ -1453,7 +1453,7 @@ public void testDeleteSnapshotWhileRestoringFails() throws Exception { logger.info("--> try deleting the snapshot while the restore is in progress (should throw an error)"); ConcurrentSnapshotExecutionException e = expectThrows( ConcurrentSnapshotExecutionException.class, - () -> clusterAdmin().prepareDeleteSnapshot(repoName, snapshotName).get() + clusterAdmin().prepareDeleteSnapshot(repoName, snapshotName) ); assertEquals(repoName, e.getRepositoryName()); assertEquals(snapshotName, e.getSnapshotName()); @@ -1483,16 +1483,10 @@ public void testSnapshotName() throws Exception { createRepository("test-repo", "fs"); - expectThrows(InvalidSnapshotNameException.class, () -> client.admin().cluster().prepareCreateSnapshot("test-repo", "_foo").get()); - expectThrows( - SnapshotMissingException.class, - () -> client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("_foo").get() - ); - expectThrows(SnapshotMissingException.class, () -> client.admin().cluster().prepareDeleteSnapshot("test-repo", "_foo").get()); - expectThrows( - SnapshotMissingException.class, - () -> client.admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("_foo").get() - ); + expectThrows(InvalidSnapshotNameException.class, client.admin().cluster().prepareCreateSnapshot("test-repo", "_foo")); + expectThrows(SnapshotMissingException.class, client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("_foo")); + expectThrows(SnapshotMissingException.class, client.admin().cluster().prepareDeleteSnapshot("test-repo", "_foo")); + expectThrows(SnapshotMissingException.class, client.admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("_foo")); } public void testListCorruptedSnapshot() throws Exception { @@ -1538,7 +1532,7 @@ public void testListCorruptedSnapshot() throws Exception { final SnapshotException ex = expectThrows( SnapshotException.class, - () -> client.admin().cluster().prepareGetSnapshots("test-repo").setIgnoreUnavailable(false).get() + client.admin().cluster().prepareGetSnapshots("test-repo").setIgnoreUnavailable(false) ); assertThat(ex.getRepositoryName(), equalTo("test-repo")); assertThat(ex.getSnapshotName(), equalTo("test-snap-2")); @@ -1580,7 +1574,7 @@ public void testRestoreSnapshotWithCorruptedGlobalState() throws Exception { SnapshotException ex = expectThrows( SnapshotException.class, - () -> clusterAdmin().prepareRestoreSnapshot(repoName, snapshotName).setRestoreGlobalState(true).setWaitForCompletion(true).get() + clusterAdmin().prepareRestoreSnapshot(repoName, snapshotName).setRestoreGlobalState(true).setWaitForCompletion(true) ); assertThat(ex.getRepositoryName(), equalTo(repoName)); assertThat(ex.getSnapshotName(), equalTo(snapshotName)); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotBrokenSettingsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotBrokenSettingsIT.java index df2cf31e37470..c31eafa8444ad 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotBrokenSettingsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotBrokenSettingsIT.java @@ -70,12 +70,7 @@ public void testExceptionWhenRestoringPersistentSettings() { logger.info("--> restore snapshot"); final IllegalArgumentException ex = expectThrows( IllegalArgumentException.class, - client.admin() - .cluster() - .prepareRestoreSnapshot("test-repo", "test-snap") - .setRestoreGlobalState(true) - .setWaitForCompletion(true) - .execute()::actionGet + client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setRestoreGlobalState(true).setWaitForCompletion(true) ); assertEquals(BrokenSettingPlugin.EXCEPTION.getMessage(), ex.getMessage()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java new file mode 100644 index 0000000000000..746d2907f47dc --- /dev/null +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java @@ -0,0 +1,519 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.snapshots; + +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.admin.cluster.configuration.AddVotingConfigExclusionsRequest; +import org.elasticsearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsRequest; +import org.elasticsearch.action.admin.cluster.configuration.TransportAddVotingConfigExclusionsAction; +import org.elasticsearch.action.admin.cluster.configuration.TransportClearVotingConfigExclusionsAction; +import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; +import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; +import org.elasticsearch.action.support.ActionTestUtils; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.SubscribableListener; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.SnapshotDeletionsInProgress; +import org.elasticsearch.cluster.SnapshotsInProgress; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.NodesShutdownMetadata; +import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Priority; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ClusterServiceUtils; +import org.elasticsearch.test.transport.MockTransportService; + +import java.util.Collection; +import java.util.Map; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.stream.Stream; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.oneOf; + +public class SnapshotShutdownIT extends AbstractSnapshotIntegTestCase { + + private static final String REQUIRE_NODE_NAME_SETTING = IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_PREFIX + "._name"; + + @Override + protected Collection> nodePlugins() { + return CollectionUtils.appendToCopy(super.nodePlugins(), MockTransportService.TestPlugin.class); + } + + public void testRestartNodeDuringSnapshot() throws Exception { + // Marking a node for restart has no impact on snapshots (see #71333 for how to handle this case) + internalCluster().ensureAtLeastNumDataNodes(1); + final var originalNode = internalCluster().startDataOnlyNode(); + final var indexName = randomIdentifier(); + createIndexWithContent(indexName, indexSettings(1, 0).put(REQUIRE_NODE_NAME_SETTING, originalNode).build()); + + final var repoName = randomIdentifier(); + createRepository(repoName, "mock"); + + final var clusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); + final var snapshotFuture = startFullSnapshotBlockedOnDataNode(randomIdentifier(), repoName, originalNode); + final var snapshotCompletesWithoutPausingListener = ClusterServiceUtils.addTemporaryStateListener(clusterService, state -> { + final var entriesForRepo = SnapshotsInProgress.get(state).forRepo(repoName); + if (entriesForRepo.isEmpty()) { + return true; + } + assertThat(entriesForRepo, hasSize(1)); + final var shardSnapshotStatuses = entriesForRepo.iterator().next().shards().values(); + assertThat(shardSnapshotStatuses, hasSize(1)); + assertThat( + shardSnapshotStatuses.iterator().next().state(), + oneOf(SnapshotsInProgress.ShardState.INIT, SnapshotsInProgress.ShardState.SUCCESS) + ); + return false; + }); + + PlainActionFuture.get( + fut -> putShutdownMetadata( + clusterService, + SingleNodeShutdownMetadata.builder() + .setType(SingleNodeShutdownMetadata.Type.RESTART) + .setStartedAtMillis(clusterService.threadPool().absoluteTimeInMillis()) + .setReason("test"), + originalNode, + fut + ), + 10, + TimeUnit.SECONDS + ); + assertFalse(snapshotCompletesWithoutPausingListener.isDone()); + unblockAllDataNodes(repoName); // lets the shard snapshot continue so the snapshot can succeed + assertEquals(SnapshotState.SUCCESS, snapshotFuture.get(10, TimeUnit.SECONDS).getSnapshotInfo().state()); + safeAwait(snapshotCompletesWithoutPausingListener); + clearShutdownMetadata(clusterService); + } + + public void testRemoveNodeDuringSnapshot() throws Exception { + internalCluster().ensureAtLeastNumDataNodes(1); + final var originalNode = internalCluster().startDataOnlyNode(); + final var indexName = randomIdentifier(); + createIndexWithContent(indexName, indexSettings(1, 0).put(REQUIRE_NODE_NAME_SETTING, originalNode).build()); + + final var repoName = randomIdentifier(); + createRepository(repoName, "mock"); + + final var clusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); + final var snapshotFuture = startFullSnapshotBlockedOnDataNode(randomIdentifier(), repoName, originalNode); + final var snapshotPausedListener = createSnapshotPausedListener(clusterService, repoName, indexName); + + updateIndexSettings(Settings.builder().putNull(REQUIRE_NODE_NAME_SETTING), indexName); + putShutdownForRemovalMetadata(originalNode, clusterService); + unblockAllDataNodes(repoName); // lets the shard snapshot abort, which frees up the shard so it can move + safeAwait(snapshotPausedListener); + + // snapshot completes when the node vacates even though it hasn't been removed yet + assertEquals(SnapshotState.SUCCESS, snapshotFuture.get(10, TimeUnit.SECONDS).getSnapshotInfo().state()); + + if (randomBoolean()) { + internalCluster().stopNode(originalNode); + ensureGreen(indexName); + } + + clearShutdownMetadata(clusterService); + } + + public void testRemoveNodeAndFailoverMasterDuringSnapshot() throws Exception { + internalCluster().ensureAtLeastNumDataNodes(1); + final var originalNode = internalCluster().startDataOnlyNode(); + final var indexName = randomIdentifier(); + createIndexWithContent(indexName, indexSettings(1, 0).put(REQUIRE_NODE_NAME_SETTING, originalNode).build()); + + final var repoName = randomIdentifier(); + createRepository(repoName, "mock"); + + final var clusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); + final var snapshotFuture = startFullSnapshotBlockedOnDataNode(randomIdentifier(), repoName, originalNode); + final var snapshotPausedListener = createSnapshotPausedListener(clusterService, repoName, indexName); + + final var snapshotStatusUpdateBarrier = new CyclicBarrier(2); + final var masterName = internalCluster().getMasterName(); + final var masterTransportService = MockTransportService.getInstance(masterName); + masterTransportService.addRequestHandlingBehavior( + SnapshotsService.UPDATE_SNAPSHOT_STATUS_ACTION_NAME, + (handler, request, channel, task) -> masterTransportService.getThreadPool().generic().execute(() -> { + safeAwait(snapshotStatusUpdateBarrier); + safeAwait(snapshotStatusUpdateBarrier); + try { + handler.messageReceived(request, channel, task); + } catch (Exception e) { + fail(e); + } + }) + ); + + updateIndexSettings(Settings.builder().putNull(REQUIRE_NODE_NAME_SETTING), indexName); + putShutdownForRemovalMetadata(originalNode, clusterService); + unblockAllDataNodes(repoName); // lets the shard snapshot abort, which frees up the shard so it can move + safeAwait(snapshotStatusUpdateBarrier); // wait for the data node to finish and then try and update the master + masterTransportService.clearAllRules(); // the shard might migrate to the old master, so let it process more updates + + if (internalCluster().numMasterNodes() == 1) { + internalCluster().startMasterOnlyNode(); + } + safeAwait( + SubscribableListener.newForked( + l -> client().execute( + TransportAddVotingConfigExclusionsAction.TYPE, + new AddVotingConfigExclusionsRequest(Strings.EMPTY_ARRAY, new String[] { masterName }, TimeValue.timeValueSeconds(10)), + l + ) + ) + ); + safeAwait( + ClusterServiceUtils.addTemporaryStateListener( + clusterService, + s -> s.nodes().getMasterNode() != null && s.nodes().getMasterNode().getName().equals(masterName) == false + ) + ); + + logger.info("--> new master elected, releasing blocked request"); + safeAwait(snapshotStatusUpdateBarrier); // let the old master try and update the state + logger.info("--> waiting for snapshot pause"); + safeAwait(snapshotPausedListener); + logger.info("--> snapshot was paused"); + + // snapshot API fails on master failover + assertThat( + asInstanceOf( + SnapshotException.class, + ExceptionsHelper.unwrapCause( + expectThrows(ExecutionException.class, RuntimeException.class, () -> snapshotFuture.get(10, TimeUnit.SECONDS)) + ) + ).getMessage(), + containsString("no longer master") + ); + + // but the snapshot itself completes + safeAwait(ClusterServiceUtils.addTemporaryStateListener(clusterService, state -> SnapshotsInProgress.get(state).isEmpty())); + + // flush master queue to ensure the completion is applied everywhere + safeAwait( + SubscribableListener.newForked( + l -> client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).execute(l) + ) + ); + + // and succeeds + final var snapshots = safeAwait( + SubscribableListener.newForked( + l -> client().admin().cluster().getSnapshots(new GetSnapshotsRequest(repoName), l) + ) + ).getSnapshots(); + assertThat(snapshots, hasSize(1)); + assertEquals(SnapshotState.SUCCESS, snapshots.get(0).state()); + + if (randomBoolean()) { + internalCluster().stopNode(originalNode); + } + + safeAwait(SubscribableListener.newForked(l -> { + final var clearVotingConfigExclusionsRequest = new ClearVotingConfigExclusionsRequest(); + clearVotingConfigExclusionsRequest.setWaitForRemoval(false); + client().execute(TransportClearVotingConfigExclusionsAction.TYPE, clearVotingConfigExclusionsRequest, l); + })); + + clearShutdownMetadata(internalCluster().getCurrentMasterNodeInstance(ClusterService.class)); + } + + public void testRemoveNodeDuringSnapshotWithOtherRunningShardSnapshots() throws Exception { + // SnapshotInProgressAllocationDecider only considers snapshots having shards in INIT state, so a single-shard snapshot such as the + // one in testRemoveNodeDuringSnapshot will be ignored when the shard is paused, permitting the shard movement. This test verifies + // that the shard is permitted to move even when the snapshot has other shards in INIT state. + + final var repoName = randomIdentifier(); + createRepository(repoName, "mock"); + + // create another index on another node which will be blocked (remain in state INIT) throughout + final var otherNode = internalCluster().startDataOnlyNode(); + final var otherIndex = randomIdentifier(); + createIndexWithContent(otherIndex, indexSettings(1, 0).put(REQUIRE_NODE_NAME_SETTING, otherNode).build()); + blockDataNode(repoName, otherNode); + + final var nodeForRemoval = internalCluster().startDataOnlyNode(); + final var indexName = randomIdentifier(); + createIndexWithContent(indexName, indexSettings(1, 0).put(REQUIRE_NODE_NAME_SETTING, nodeForRemoval).build()); + + final var clusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); + final var snapshotFuture = startFullSnapshotBlockedOnDataNode(randomIdentifier(), repoName, nodeForRemoval); + final var snapshotPausedListener = createSnapshotPausedListener(clusterService, repoName, indexName); + waitForBlock(otherNode, repoName); + + putShutdownForRemovalMetadata(nodeForRemoval, clusterService); + unblockNode(repoName, nodeForRemoval); // lets the shard snapshot abort, which frees up the shard to move + safeAwait(snapshotPausedListener); + + // adjust the allocation filter so that the shard moves + updateIndexSettings(Settings.builder().putNull(REQUIRE_NODE_NAME_SETTING), indexName); + + // wait for the target shard snapshot to succeed + safeAwait( + ClusterServiceUtils.addTemporaryStateListener( + clusterService, + state -> SnapshotsInProgress.get(state) + .asStream() + .allMatch( + e -> e.shards() + .entrySet() + .stream() + .anyMatch( + shardEntry -> shardEntry.getKey().getIndexName().equals(indexName) + && switch (shardEntry.getValue().state()) { + case INIT, PAUSED_FOR_NODE_REMOVAL -> false; + case SUCCESS -> true; + case FAILED, ABORTED, MISSING, QUEUED, WAITING -> throw new AssertionError(shardEntry.toString()); + } + ) + ) + ) + ); + + unblockAllDataNodes(repoName); + + // snapshot completes when the node vacates even though it hasn't been removed yet + assertEquals(SnapshotState.SUCCESS, snapshotFuture.get(10, TimeUnit.SECONDS).getSnapshotInfo().state()); + + if (randomBoolean()) { + internalCluster().stopNode(nodeForRemoval); + } + + clearShutdownMetadata(clusterService); + } + + public void testStartRemoveNodeButDoNotComplete() throws Exception { + final var primaryNode = internalCluster().startDataOnlyNode(); + final var indexName = randomIdentifier(); + createIndexWithContent(indexName, indexSettings(1, 0).put(REQUIRE_NODE_NAME_SETTING, primaryNode).build()); + + final var repoName = randomIdentifier(); + createRepository(repoName, "mock"); + + final var clusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); + final var snapshotFuture = startFullSnapshotBlockedOnDataNode(randomIdentifier(), repoName, primaryNode); + final var snapshotPausedListener = createSnapshotPausedListener(clusterService, repoName, indexName); + + putShutdownForRemovalMetadata(primaryNode, clusterService); + unblockAllDataNodes(repoName); // lets the shard snapshot abort, but allocation filtering stops it from moving + safeAwait(snapshotPausedListener); + assertFalse(snapshotFuture.isDone()); + + // give up on the node shutdown so the shard snapshot can restart + clearShutdownMetadata(clusterService); + + assertEquals(SnapshotState.SUCCESS, snapshotFuture.get(10, TimeUnit.SECONDS).getSnapshotInfo().state()); + } + + public void testAbortSnapshotWhileRemovingNode() throws Exception { + final var primaryNode = internalCluster().startDataOnlyNode(); + final var indexName = randomIdentifier(); + createIndexWithContent(indexName, indexSettings(1, 0).put(REQUIRE_NODE_NAME_SETTING, primaryNode).build()); + + final var repoName = randomIdentifier(); + createRepository(repoName, "mock"); + + final var snapshotName = randomIdentifier(); + final var snapshotFuture = startFullSnapshotBlockedOnDataNode(snapshotName, repoName, primaryNode); + + final var updateSnapshotStatusBarrier = new CyclicBarrier(2); + final var masterTransportService = MockTransportService.getInstance(internalCluster().getMasterName()); + masterTransportService.addRequestHandlingBehavior( + SnapshotsService.UPDATE_SNAPSHOT_STATUS_ACTION_NAME, + (handler, request, channel, task) -> masterTransportService.getThreadPool().generic().execute(() -> { + safeAwait(updateSnapshotStatusBarrier); + safeAwait(updateSnapshotStatusBarrier); + try { + handler.messageReceived(request, channel, task); + } catch (Exception e) { + fail(e); + } + }) + ); + + final var clusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); + putShutdownForRemovalMetadata(primaryNode, clusterService); + unblockAllDataNodes(repoName); // lets the shard snapshot abort, but allocation filtering stops it from moving + safeAwait(updateSnapshotStatusBarrier); // wait for data node to notify master that the shard snapshot is paused + + // abort snapshot (and wait for the abort to land in the cluster state) + final var deleteStartedListener = ClusterServiceUtils.addTemporaryStateListener(clusterService, state -> { + if (SnapshotDeletionsInProgress.get(state).getEntries().isEmpty()) { + return false; + } + + assertEquals(SnapshotsInProgress.State.ABORTED, SnapshotsInProgress.get(state).forRepo(repoName).get(0).state()); + return true; + }); + + final var deleteSnapshotFuture = startDeleteSnapshot(repoName, snapshotName); // abort the snapshot + safeAwait(deleteStartedListener); + + safeAwait(updateSnapshotStatusBarrier); // process pause notification now that the snapshot is ABORTED + + assertEquals(SnapshotState.FAILED, snapshotFuture.get(10, TimeUnit.SECONDS).getSnapshotInfo().state()); + assertTrue(deleteSnapshotFuture.get(10, TimeUnit.SECONDS).isAcknowledged()); + + clearShutdownMetadata(clusterService); + } + + public void testShutdownWhileSuccessInFlight() throws Exception { + final var primaryNode = internalCluster().startDataOnlyNode(); + final var indexName = randomIdentifier(); + createIndexWithContent(indexName, indexSettings(1, 0).put(REQUIRE_NODE_NAME_SETTING, primaryNode).build()); + + final var repoName = randomIdentifier(); + createRepository(repoName, "mock"); + + final var clusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class); + final var masterTransportService = MockTransportService.getInstance(internalCluster().getMasterName()); + masterTransportService.addRequestHandlingBehavior( + SnapshotsService.UPDATE_SNAPSHOT_STATUS_ACTION_NAME, + (handler, request, channel, task) -> putShutdownForRemovalMetadata( + clusterService, + primaryNode, + ActionTestUtils.assertNoFailureListener(ignored -> handler.messageReceived(request, channel, task)) + ) + ); + + assertEquals( + SnapshotState.SUCCESS, + startFullSnapshot(repoName, randomIdentifier()).get(10, TimeUnit.SECONDS).getSnapshotInfo().state() + ); + clearShutdownMetadata(clusterService); + } + + private static SubscribableListener createSnapshotPausedListener( + ClusterService clusterService, + String repoName, + String indexName + ) { + return ClusterServiceUtils.addTemporaryStateListener(clusterService, state -> { + final var entriesForRepo = SnapshotsInProgress.get(state).forRepo(repoName); + assertThat(entriesForRepo, hasSize(1)); + final var shardSnapshotStatuses = entriesForRepo.iterator() + .next() + .shards() + .entrySet() + .stream() + .flatMap(e -> e.getKey().getIndexName().equals(indexName) ? Stream.of(e.getValue()) : Stream.of()) + .toList(); + assertThat(shardSnapshotStatuses, hasSize(1)); + final var shardState = shardSnapshotStatuses.iterator().next().state(); + assertThat(shardState, oneOf(SnapshotsInProgress.ShardState.INIT, SnapshotsInProgress.ShardState.PAUSED_FOR_NODE_REMOVAL)); + return shardState == SnapshotsInProgress.ShardState.PAUSED_FOR_NODE_REMOVAL; + }); + } + + private static void putShutdownForRemovalMetadata(String nodeName, ClusterService clusterService) { + PlainActionFuture.get( + fut -> putShutdownForRemovalMetadata(clusterService, nodeName, fut), + 10, + TimeUnit.SECONDS + ); + } + + private static void flushMasterQueue(ClusterService clusterService, ActionListener listener) { + clusterService.submitUnbatchedStateUpdateTask("flush queue", new ClusterStateUpdateTask(Priority.LANGUID) { + @Override + public ClusterState execute(ClusterState currentState) { + return currentState; + } + + @Override + public void onFailure(Exception e) { + fail(e); + } + + @Override + public void clusterStateProcessed(ClusterState initialState, ClusterState newState) { + listener.onResponse(null); + } + }); + } + + private static void putShutdownForRemovalMetadata(ClusterService clusterService, String nodeName, ActionListener listener) { + // not testing REPLACE just because it requires us to specify the replacement node + final var shutdownType = randomFrom(SingleNodeShutdownMetadata.Type.REMOVE, SingleNodeShutdownMetadata.Type.SIGTERM); + final var shutdownMetadata = SingleNodeShutdownMetadata.builder() + .setType(shutdownType) + .setStartedAtMillis(clusterService.threadPool().absoluteTimeInMillis()) + .setReason("test"); + switch (shutdownType) { + case SIGTERM -> shutdownMetadata.setGracePeriod(TimeValue.timeValueSeconds(60)); + } + SubscribableListener + + .newForked(l -> putShutdownMetadata(clusterService, shutdownMetadata, nodeName, l)) + .andThen((l, ignored) -> flushMasterQueue(clusterService, l)) + .addListener(listener); + } + + private static void putShutdownMetadata( + ClusterService clusterService, + SingleNodeShutdownMetadata.Builder shutdownMetadataBuilder, + String nodeName, + ActionListener listener + ) { + clusterService.submitUnbatchedStateUpdateTask("mark node for removal", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + final var nodeId = currentState.nodes().resolveNode(nodeName).getId(); + return currentState.copyAndUpdateMetadata( + mdb -> mdb.putCustom( + NodesShutdownMetadata.TYPE, + new NodesShutdownMetadata(Map.of(nodeId, shutdownMetadataBuilder.setNodeId(nodeId).build())) + ) + ); + } + + @Override + public void onFailure(Exception e) { + fail(e); + } + + @Override + public void clusterStateProcessed(ClusterState initialState, ClusterState newState) { + listener.onResponse(null); + } + }); + } + + private static void clearShutdownMetadata(ClusterService clusterService) { + PlainActionFuture.get(fut -> clusterService.submitUnbatchedStateUpdateTask("remove restart marker", new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + return currentState.copyAndUpdateMetadata(mdb -> mdb.putCustom(NodesShutdownMetadata.TYPE, NodesShutdownMetadata.EMPTY)); + } + + @Override + public void onFailure(Exception e) { + fail(e); + } + + @Override + public void clusterStateProcessed(ClusterState initialState, ClusterState newState) { + fut.onResponse(null); + } + }), 10, TimeUnit.SECONDS); + } +} diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java index cc7c7709075c0..5bd59c712caf0 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStatusApisIT.java @@ -138,7 +138,7 @@ public void testExceptionOnMissingSnapBlob() throws IOException { logger.info("--> delete snap-${uuid}.dat file for this snapshot to simulate concurrent delete"); IOUtils.rm(repoPath.resolve(BlobStoreRepository.SNAPSHOT_PREFIX + snapshotInfo.snapshotId().getUUID() + ".dat")); - expectThrows(SnapshotMissingException.class, () -> clusterAdmin().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get()); + expectThrows(SnapshotMissingException.class, clusterAdmin().prepareGetSnapshots("test-repo").setSnapshots("test-snap")); } public void testExceptionOnMissingShardLevelSnapBlob() throws IOException { @@ -167,10 +167,7 @@ public void testExceptionOnMissingShardLevelSnapBlob() throws IOException { .resolve(BlobStoreRepository.SNAPSHOT_PREFIX + snapshotInfo.snapshotId().getUUID() + ".dat") ); - expectThrows( - SnapshotMissingException.class, - () -> clusterAdmin().prepareSnapshotStatus("test-repo").setSnapshots("test-snap").get() - ); + expectThrows(SnapshotMissingException.class, clusterAdmin().prepareSnapshotStatus("test-repo").setSnapshots("test-snap")); } public void testGetSnapshotsWithoutIndices() throws Exception { @@ -456,7 +453,7 @@ public void testGetSnapshotsWithSnapshotInProgress() throws Exception { expectThrows( SnapshotMissingException.class, - () -> clusterAdmin().prepareGetSnapshots("test-repo").setSnapshots(notExistedSnapshotName).setIgnoreUnavailable(false).get() + clusterAdmin().prepareGetSnapshots("test-repo").setSnapshots(notExistedSnapshotName).setIgnoreUnavailable(false) ); logger.info("--> unblock all data nodes"); @@ -499,7 +496,7 @@ public void testGetSnapshotsRequest() throws Exception { logger.info("--> get snapshots on an empty repository"); expectThrows( SnapshotMissingException.class, - () -> client.admin().cluster().prepareGetSnapshots(repositoryName).addSnapshots("non-existent-snapshot").get() + client.admin().cluster().prepareGetSnapshots(repositoryName).addSnapshots("non-existent-snapshot") ); // with ignore unavailable set to true, should not throw an exception GetSnapshotsResponse getSnapshotsResponse = client.admin() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java index cd1b64ea8287b..7350e0366a37f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java @@ -28,7 +28,11 @@ import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.NodesShutdownMetadata; +import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; @@ -311,6 +315,10 @@ public void run() throws InterruptedException { startCleaner(); } + if (randomBoolean()) { + startNodeShutdownMarker(); + } + if (completedSnapshotLatch.await(30, TimeUnit.SECONDS)) { logger.info("--> completed target snapshot count, finishing test"); } else { @@ -1163,6 +1171,104 @@ private void startNodeRestarter() { }); } + private void startNodeShutdownMarker() { + enqueueAction(() -> { + boolean rerun = true; + if (usually()) { + return; + } + try (TransferableReleasables localReleasables = new TransferableReleasables()) { + if (localReleasables.add(blockFullClusterRestart()) == null) { + return; + } + + final var node = randomFrom(shuffledNodes); + + if (localReleasables.add(tryAcquirePermit(node.permits)) == null) { + return; + } + + final var clusterService = cluster.getCurrentMasterNodeInstance(ClusterService.class); + + SubscribableListener + + .newForked( + l -> clusterService.submitUnbatchedStateUpdateTask( + "mark [" + node + "] for removal", + new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + assertTrue( + Strings.toString(currentState), + currentState.metadata().nodeShutdowns().getAll().isEmpty() + ); + final var nodeId = currentState.nodes().resolveNode(node.nodeName).getId(); + return currentState.copyAndUpdateMetadata( + mdb -> mdb.putCustom( + NodesShutdownMetadata.TYPE, + new NodesShutdownMetadata( + Map.of( + nodeId, + SingleNodeShutdownMetadata.builder() + .setNodeId(nodeId) + .setType(SingleNodeShutdownMetadata.Type.REMOVE) + .setStartedAtMillis(clusterService.threadPool().absoluteTimeInMillis()) + .setReason("test") + .build() + ) + ) + ) + ); + } + + @Override + public void onFailure(Exception e) { + l.onFailure(e); + } + + @Override + public void clusterStateProcessed(ClusterState initialState, ClusterState newState) { + l.onResponse(null); + } + } + ) + ) + + .andThen( + (l, ignored) -> clusterService.submitUnbatchedStateUpdateTask( + "unmark [" + node + "] for removal", + new ClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + return currentState.copyAndUpdateMetadata( + mdb -> mdb.putCustom(NodesShutdownMetadata.TYPE, NodesShutdownMetadata.EMPTY) + ); + } + + @Override + public void onFailure(Exception e) { + l.onFailure(e); + } + + @Override + public void clusterStateProcessed(ClusterState initialState, ClusterState newState) { + l.onResponse(null); + } + } + ) + ) + + .addListener(mustSucceed(ignored -> startNodeShutdownMarker())); + + rerun = false; + } finally { + if (rerun) { + startNodeShutdownMarker(); + } + } + }); + } + @Nullable // if we couldn't block node restarts private Releasable blockNodeRestarts() { try (TransferableReleasables localReleasables = new TransferableReleasables()) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceIT.java index c7933d7065ec2..7ee993915ae24 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceIT.java @@ -68,7 +68,7 @@ public void testDeletingSnapshotsIsLoggedAfterClusterStateIsProcessed() throws E final SnapshotMissingException e = expectThrows( SnapshotMissingException.class, - () -> startDeleteSnapshot("test-repo", "does-not-exist").actionGet() + startDeleteSnapshot("test-repo", "does-not-exist") ); assertThat(e.getMessage(), containsString("[test-repo:does-not-exist] is missing")); assertThat(startDeleteSnapshot("test-repo", "test-snapshot").actionGet().isAcknowledged(), is(true)); @@ -106,7 +106,7 @@ public void testSnapshotDeletionFailureShouldBeLogged() throws Exception { // Failure when listing root blobs final MockRepository mockRepository = getRepositoryOnMaster("test-repo"); mockRepository.setRandomControlIOExceptionRate(1.0); - final Exception e = expectThrows(Exception.class, () -> startDeleteSnapshot("test-repo", "test-snapshot").actionGet()); + final Exception e = expectThrows(Exception.class, startDeleteSnapshot("test-repo", "test-snapshot")); assertThat(e.getCause().getMessage(), containsString("Random IOException")); } else { // Failure when finalizing on index-N file @@ -115,7 +115,7 @@ public void testSnapshotDeletionFailureShouldBeLogged() throws Exception { deleteFuture = startDeleteSnapshot("test-repo", "test-snapshot"); waitForBlock(internalCluster().getMasterName(), "test-repo"); unblockNode("test-repo", internalCluster().getMasterName()); - final Exception e = expectThrows(Exception.class, deleteFuture::actionGet); + final Exception e = expectThrows(Exception.class, deleteFuture); assertThat(e.getCause().getMessage(), containsString("exception after block")); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SystemIndicesSnapshotIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SystemIndicesSnapshotIT.java index aef0c2324f167..058d5af7d9c85 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SystemIndicesSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SystemIndicesSnapshotIT.java @@ -312,10 +312,9 @@ public void testRestoreFeatureNotInSnapshot() { final String fakeFeatureStateName = "NonExistentTestPlugin"; SnapshotRestoreException exception = expectThrows( SnapshotRestoreException.class, - () -> clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") + clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") .setWaitForCompletion(true) .setFeatureStates("SystemIndexTestPlugin", fakeFeatureStateName) - .get() ); assertThat( @@ -332,11 +331,10 @@ public void testSnapshottingSystemIndexByNameIsRejected() throws Exception { IllegalArgumentException error = expectThrows( IllegalArgumentException.class, - () -> clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") + clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") .setIndices(SystemIndexTestPlugin.SYSTEM_INDEX_NAME) .setWaitForCompletion(true) .setIncludeGlobalState(randomBoolean()) - .get() ); assertThat( error.getMessage(), @@ -376,10 +374,9 @@ public void testRestoringSystemIndexByNameIsRejected() throws IllegalAccessExcep IllegalArgumentException ex = expectThrows( IllegalArgumentException.class, - () -> clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") + clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") .setWaitForCompletion(true) .setIndices(SystemIndexTestPlugin.SYSTEM_INDEX_NAME) - .get() ); assertThat( ex.getMessage(), @@ -611,11 +608,10 @@ public void testNoneFeatureStateMustBeAlone() { // run a snapshot including global state IllegalArgumentException createEx = expectThrows( IllegalArgumentException.class, - () -> clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") + clusterAdmin().prepareCreateSnapshot(REPO_NAME, "test-snap") .setWaitForCompletion(true) .setIncludeGlobalState(randomBoolean()) .setFeatureStates("SystemIndexTestPlugin", "none", "AnotherSystemIndexTestPlugin") - .get() ); assertThat( createEx.getMessage(), @@ -634,11 +630,10 @@ public void testNoneFeatureStateMustBeAlone() { SnapshotRestoreException restoreEx = expectThrows( SnapshotRestoreException.class, - () -> clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") + clusterAdmin().prepareRestoreSnapshot(REPO_NAME, "test-snap") .setWaitForCompletion(true) .setRestoreGlobalState(randomBoolean()) .setFeatureStates("SystemIndexTestPlugin", "none") - .get() ); assertThat( restoreEx.getMessage(), diff --git a/server/src/internalClusterTest/java/org/elasticsearch/timeseries/support/TimeSeriesDimensionsLimitIT.java b/server/src/internalClusterTest/java/org/elasticsearch/timeseries/support/TimeSeriesDimensionsLimitIT.java index 4cf5316d16242..105e360cd4020 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/timeseries/support/TimeSeriesDimensionsLimitIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/timeseries/support/TimeSeriesDimensionsLimitIT.java @@ -55,7 +55,7 @@ public void testDimensionFieldNameLimit() throws IOException { "@timestamp", Instant.now().toEpochMilli() ); - final Exception ex = expectThrows(DocumentParsingException.class, indexRequestBuilder::get); + final Exception ex = expectThrows(DocumentParsingException.class, indexRequestBuilder); indexRequestBuilder.request().decRef(); assertThat( ex.getCause().getMessage(), @@ -92,7 +92,7 @@ public void testDimensionFieldValueLimit() throws IOException { "@timestamp", startTime + 1 ); - final Exception ex = expectThrows(DocumentParsingException.class, indexRequestBuilder2::get); + final Exception ex = expectThrows(DocumentParsingException.class, indexRequestBuilder2); indexRequestBuilder2.request().decRef(); assertThat(ex.getCause().getMessage(), equalTo("Dimension fields must be less than [1024] bytes but was [1025].")); } @@ -179,7 +179,7 @@ public void testTotalDimensionFieldsSizeLuceneLimitPlusOne() throws IOException source.put(dimensionFieldNames.get(i), randomAlphaOfLength(1024)); } IndexRequestBuilder indexRequestBuilder = prepareIndex("test").setSource(source); - final Exception ex = expectThrows(DocumentParsingException.class, indexRequestBuilder::get); + final Exception ex = expectThrows(DocumentParsingException.class, indexRequestBuilder); indexRequestBuilder.request().decRef(); assertEquals("_tsid longer than [32766] bytes [33903].", ex.getCause().getMessage()); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/update/UpdateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/update/UpdateIT.java index 79cb37ce40f82..5caf14d1ab5d6 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/update/UpdateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/update/UpdateIT.java @@ -307,11 +307,8 @@ public void testUpdate() throws Exception { Script fieldIncScript = new Script(ScriptType.INLINE, UPDATE_SCRIPTS, FIELD_INC_SCRIPT, Collections.singletonMap("field", "field")); { - UpdateRequestBuilder updateRequestBuilder = client().prepareUpdate(indexOrAlias(), "1"); - DocumentMissingException ex = expectThrows( - DocumentMissingException.class, - () -> updateRequestBuilder.setScript(fieldIncScript).get() - ); + UpdateRequestBuilder updateRequestBuilder = client().prepareUpdate(indexOrAlias(), "1").setScript(fieldIncScript); + DocumentMissingException ex = expectThrows(DocumentMissingException.class, updateRequestBuilder); updateRequestBuilder.request().decRef(); assertEquals("[1]: document missing", ex.getMessage()); } @@ -489,38 +486,29 @@ public void testUpdateWithIfSeqNo() throws Exception { DocWriteResponse result = indexRequestBuilder.get(); indexRequestBuilder.request().decRef(); { - UpdateRequestBuilder updateRequestBuilder = client().prepareUpdate(indexOrAlias(), "1"); - expectThrows( - VersionConflictEngineException.class, - () -> updateRequestBuilder.setDoc(XContentFactory.jsonBuilder().startObject().field("field", 2).endObject()) - .setIfSeqNo(result.getSeqNo() + 1) - .setIfPrimaryTerm(result.getPrimaryTerm()) - .get() - ); + UpdateRequestBuilder updateRequestBuilder = client().prepareUpdate(indexOrAlias(), "1") + .setDoc(XContentFactory.jsonBuilder().startObject().field("field", 2).endObject()) + .setIfSeqNo(result.getSeqNo() + 1) + .setIfPrimaryTerm(result.getPrimaryTerm()); + expectThrows(VersionConflictEngineException.class, updateRequestBuilder); updateRequestBuilder.request().decRef(); } { - UpdateRequestBuilder updateRequestBuilder = client().prepareUpdate(indexOrAlias(), "1"); - expectThrows( - VersionConflictEngineException.class, - () -> updateRequestBuilder.setDoc(XContentFactory.jsonBuilder().startObject().field("field", 2).endObject()) - .setIfSeqNo(result.getSeqNo()) - .setIfPrimaryTerm(result.getPrimaryTerm() + 1) - .get() - ); + UpdateRequestBuilder updateRequestBuilder = client().prepareUpdate(indexOrAlias(), "1") + .setDoc(XContentFactory.jsonBuilder().startObject().field("field", 2).endObject()) + .setIfSeqNo(result.getSeqNo()) + .setIfPrimaryTerm(result.getPrimaryTerm() + 1); + expectThrows(VersionConflictEngineException.class, updateRequestBuilder); updateRequestBuilder.request().decRef(); } { - UpdateRequestBuilder updateRequestBuilder = client().prepareUpdate(indexOrAlias(), "1"); - expectThrows( - VersionConflictEngineException.class, - () -> updateRequestBuilder.setDoc(XContentFactory.jsonBuilder().startObject().field("field", 2).endObject()) - .setIfSeqNo(result.getSeqNo() + 1) - .setIfPrimaryTerm(result.getPrimaryTerm() + 1) - .get() - ); + UpdateRequestBuilder updateRequestBuilder = client().prepareUpdate(indexOrAlias(), "1") + .setDoc(XContentFactory.jsonBuilder().startObject().field("field", 2).endObject()) + .setIfSeqNo(result.getSeqNo() + 1) + .setIfPrimaryTerm(result.getPrimaryTerm() + 1); + expectThrows(VersionConflictEngineException.class, updateRequestBuilder); updateRequestBuilder.request().decRef(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/versioning/SimpleVersioningIT.java b/server/src/internalClusterTest/java/org/elasticsearch/versioning/SimpleVersioningIT.java index ebd5e8a084ed8..580bb23b9db07 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/versioning/SimpleVersioningIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/versioning/SimpleVersioningIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.versioning; import org.apache.lucene.tests.util.TestUtil; +import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; @@ -36,7 +37,6 @@ import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFutureThrows; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; @@ -80,7 +80,7 @@ public void testExternalGTE() throws Exception { .setSource("field1", "value1_1") .setVersion(13) .setVersionType(VersionType.EXTERNAL_GTE); - assertRequestBuilderThrows(indexRequestBuilder, VersionConflictEngineException.class); + expectThrows(VersionConflictEngineException.class, indexRequestBuilder); indexRequestBuilder.request().decRef(); client().admin().indices().prepareRefresh().get(); @@ -92,10 +92,8 @@ public void testExternalGTE() throws Exception { } // deleting with a lower version fails. - assertRequestBuilderThrows( - client().prepareDelete("test", "1").setVersion(2).setVersionType(VersionType.EXTERNAL_GTE), - VersionConflictEngineException.class - ); + ActionRequestBuilder builder = client().prepareDelete("test", "1").setVersion(2).setVersionType(VersionType.EXTERNAL_GTE); + expectThrows(VersionConflictEngineException.class, builder); // Delete with a higher or equal version deletes all versions up to the given one. long v = randomIntBetween(14, 17); @@ -236,18 +234,12 @@ public void testCompareAndSet() { assertFutureThrows(indexRequestBuilder.execute(), VersionConflictEngineException.class); indexRequestBuilder.request().decRef(); - assertRequestBuilderThrows( - client().prepareDelete("test", "1").setIfSeqNo(10).setIfPrimaryTerm(1), - VersionConflictEngineException.class - ); - assertRequestBuilderThrows( - client().prepareDelete("test", "1").setIfSeqNo(10).setIfPrimaryTerm(2), - VersionConflictEngineException.class - ); - assertRequestBuilderThrows( - client().prepareDelete("test", "1").setIfSeqNo(1).setIfPrimaryTerm(2), - VersionConflictEngineException.class - ); + ActionRequestBuilder builder6 = client().prepareDelete("test", "1").setIfSeqNo(10).setIfPrimaryTerm(1); + expectThrows(VersionConflictEngineException.class, builder6); + ActionRequestBuilder builder5 = client().prepareDelete("test", "1").setIfSeqNo(10).setIfPrimaryTerm(2); + expectThrows(VersionConflictEngineException.class, builder5); + ActionRequestBuilder builder4 = client().prepareDelete("test", "1").setIfSeqNo(1).setIfPrimaryTerm(2); + expectThrows(VersionConflictEngineException.class, builder4); client().admin().indices().prepareRefresh().get(); for (int i = 0; i < 10; i++) { @@ -278,24 +270,16 @@ public void testCompareAndSet() { assertThat(deleteResponse.getSeqNo(), equalTo(2L)); assertThat(deleteResponse.getPrimaryTerm(), equalTo(1L)); - assertRequestBuilderThrows( - client().prepareDelete("test", "1").setIfSeqNo(1).setIfPrimaryTerm(1), - VersionConflictEngineException.class - ); - assertRequestBuilderThrows( - client().prepareDelete("test", "1").setIfSeqNo(3).setIfPrimaryTerm(12), - VersionConflictEngineException.class - ); - assertRequestBuilderThrows( - client().prepareDelete("test", "1").setIfSeqNo(1).setIfPrimaryTerm(2), - VersionConflictEngineException.class - ); + ActionRequestBuilder builder3 = client().prepareDelete("test", "1").setIfSeqNo(1).setIfPrimaryTerm(1); + expectThrows(VersionConflictEngineException.class, builder3); + ActionRequestBuilder builder2 = client().prepareDelete("test", "1").setIfSeqNo(3).setIfPrimaryTerm(12); + expectThrows(VersionConflictEngineException.class, builder2); + ActionRequestBuilder builder1 = client().prepareDelete("test", "1").setIfSeqNo(1).setIfPrimaryTerm(2); + expectThrows(VersionConflictEngineException.class, builder1); // the doc is deleted. Even when we hit the deleted seqNo, a conditional delete should fail. - assertRequestBuilderThrows( - client().prepareDelete("test", "1").setIfSeqNo(2).setIfPrimaryTerm(1), - VersionConflictEngineException.class - ); + ActionRequestBuilder builder = client().prepareDelete("test", "1").setIfSeqNo(2).setIfPrimaryTerm(1); + expectThrows(VersionConflictEngineException.class, builder); } public void testSimpleVersioningWithFlush() throws Exception { @@ -314,17 +298,15 @@ public void testSimpleVersioningWithFlush() throws Exception { .setSource("field1", "value1_1") .setIfSeqNo(0) .setIfPrimaryTerm(1); - assertRequestBuilderThrows(indexRequestBuilder, VersionConflictEngineException.class); + expectThrows(VersionConflictEngineException.class, indexRequestBuilder); indexRequestBuilder.request().decRef(); indexRequestBuilder = prepareIndex("test").setId("1").setCreate(true).setSource("field1", "value1_1"); - assertRequestBuilderThrows(indexRequestBuilder, VersionConflictEngineException.class); + expectThrows(VersionConflictEngineException.class, indexRequestBuilder); indexRequestBuilder.request().decRef(); - assertRequestBuilderThrows( - client().prepareDelete("test", "1").setIfSeqNo(0).setIfPrimaryTerm(1), - VersionConflictEngineException.class - ); + ActionRequestBuilder builder = client().prepareDelete("test", "1").setIfSeqNo(0).setIfPrimaryTerm(1); + expectThrows(VersionConflictEngineException.class, builder); for (int i = 0; i < 10; i++) { assertThat(client().prepareGet("test", "1").get().getVersion(), equalTo(2L)); diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java index 613e6868b8e9f..e72cb6c53e8e5 100644 --- a/server/src/main/java/module-info.java +++ b/server/src/main/java/module-info.java @@ -419,7 +419,8 @@ provides org.apache.lucene.codecs.PostingsFormat with org.elasticsearch.index.codec.bloomfilter.ES85BloomFilterPostingsFormat, - org.elasticsearch.index.codec.bloomfilter.ES87BloomFilterPostingsFormat; + org.elasticsearch.index.codec.bloomfilter.ES87BloomFilterPostingsFormat, + org.elasticsearch.index.codec.postings.ES812PostingsFormat; provides org.apache.lucene.codecs.DocValuesFormat with ES87TSDBDocValuesFormat; exports org.elasticsearch.cluster.routing.allocation.shards diff --git a/server/src/main/java/org/elasticsearch/Build.java b/server/src/main/java/org/elasticsearch/Build.java index 24cd82d29614e..0b8cd149744e3 100644 --- a/server/src/main/java/org/elasticsearch/Build.java +++ b/server/src/main/java/org/elasticsearch/Build.java @@ -47,8 +47,9 @@ private static class CurrentHolder { // finds the pluggable current build, or uses the local build as a fallback private static Build findCurrent() { - var buildExtension = ExtensionLoader.loadSingleton(ServiceLoader.load(BuildExtension.class), () -> Build::findLocalBuild); - return buildExtension.getCurrentBuild(); + return ExtensionLoader.loadSingleton(ServiceLoader.load(BuildExtension.class)) + .map(BuildExtension::getCurrentBuild) + .orElseGet(Build::findLocalBuild); } } @@ -204,7 +205,7 @@ static URL getElasticsearchCodeSourceLocation() { public static Build readBuild(StreamInput in) throws IOException { final String flavor; if (in.getTransportVersion().before(TransportVersions.V_8_3_0) - || in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_040)) { + || in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { flavor = in.readString(); } else { flavor = "default"; @@ -234,7 +235,7 @@ public static Build readBuild(StreamInput in) throws IOException { version = versionMatcher.group(1); qualifier = versionMatcher.group(2); } - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_041)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { minWireVersion = in.readString(); minIndexVersion = in.readString(); displayString = in.readString(); @@ -251,7 +252,7 @@ public static Build readBuild(StreamInput in) throws IOException { public static void writeBuild(Build build, StreamOutput out) throws IOException { if (out.getTransportVersion().before(TransportVersions.V_8_3_0) - || out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_040)) { + || out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { out.writeString(build.flavor()); } out.writeString(build.type().displayName()); @@ -265,7 +266,7 @@ public static void writeBuild(Build build, StreamOutput out) throws IOException out.writeBoolean(build.isSnapshot()); out.writeString(build.qualifiedVersion()); } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_041)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { out.writeString(build.minWireCompatVersion()); out.writeString(build.minIndexCompatVersion()); out.writeString(build.displayString()); diff --git a/server/src/main/java/org/elasticsearch/TransportVersion.java b/server/src/main/java/org/elasticsearch/TransportVersion.java index 92bb88f16385d..d3224bb048393 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersion.java +++ b/server/src/main/java/org/elasticsearch/TransportVersion.java @@ -109,13 +109,11 @@ public String toString() { private static class CurrentHolder { private static final TransportVersion CURRENT = findCurrent(); - // finds the pluggable current version, or uses the given fallback + // finds the pluggable current version private static TransportVersion findCurrent() { - var versionExtension = ExtensionLoader.loadSingleton(ServiceLoader.load(VersionExtension.class), () -> null); - if (versionExtension == null) { - return TransportVersions.LATEST_DEFINED; - } - var version = versionExtension.getCurrentTransportVersion(TransportVersions.LATEST_DEFINED); + var version = ExtensionLoader.loadSingleton(ServiceLoader.load(VersionExtension.class)) + .map(e -> e.getCurrentTransportVersion(TransportVersions.LATEST_DEFINED)) + .orElse(TransportVersions.LATEST_DEFINED); assert version.onOrAfter(TransportVersions.LATEST_DEFINED); return version; } diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 625871d25734b..eb4738b6723de 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -93,27 +93,6 @@ static TransportVersion def(int id) { * Detached transport versions added below here. */ public static final TransportVersion V_8_500_020 = def(8_500_020); - public static final TransportVersion V_8_500_040 = def(8_500_040); - public static final TransportVersion V_8_500_041 = def(8_500_041); - public static final TransportVersion V_8_500_042 = def(8_500_042); - public static final TransportVersion V_8_500_043 = def(8_500_043); - public static final TransportVersion V_8_500_044 = def(8_500_044); - public static final TransportVersion V_8_500_045 = def(8_500_045); - public static final TransportVersion V_8_500_046 = def(8_500_046); - public static final TransportVersion V_8_500_047 = def(8_500_047); - public static final TransportVersion V_8_500_048 = def(8_500_048); - public static final TransportVersion V_8_500_049 = def(8_500_049); - public static final TransportVersion V_8_500_050 = def(8_500_050); - public static final TransportVersion V_8_500_051 = def(8_500_051); - public static final TransportVersion V_8_500_052 = def(8_500_052); - public static final TransportVersion V_8_500_053 = def(8_500_053); - public static final TransportVersion V_8_500_054 = def(8_500_054); - public static final TransportVersion V_8_500_055 = def(8_500_055); - public static final TransportVersion V_8_500_056 = def(8_500_056); - public static final TransportVersion V_8_500_057 = def(8_500_057); - public static final TransportVersion V_8_500_058 = def(8_500_058); - public static final TransportVersion V_8_500_059 = def(8_500_059); - public static final TransportVersion V_8_500_060 = def(8_500_060); public static final TransportVersion V_8_500_061 = def(8_500_061); public static final TransportVersion V_8_500_062 = def(8_500_062); public static final TransportVersion V_8_500_063 = def(8_500_063); @@ -193,6 +172,10 @@ static TransportVersion def(int id) { public static final TransportVersion ENRICH_ELASTICSEARCH_VERSION_REMOVED = def(8_560_00_0); public static final TransportVersion NODE_STATS_REQUEST_SIMPLIFIED = def(8_561_00_0); public static final TransportVersion TEXT_EXPANSION_TOKEN_PRUNING_CONFIG_ADDED = def(8_562_00_0); + public static final TransportVersion ESQL_ASYNC_QUERY = def(8_563_00_0); + public static final TransportVersion ESQL_STATUS_INCLUDE_LUCENE_QUERIES = def(8_564_00_0); + public static final TransportVersion ESQL_CLUSTER_ALIAS = def(8_565_00_0); + public static final TransportVersion SNAPSHOTS_IN_PROGRESS_TRACKING_REMOVING_NODES_ADDED = def(8_566_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/action/ActionFuture.java b/server/src/main/java/org/elasticsearch/action/ActionFuture.java index e51e31f4c03ce..061875e42fec8 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionFuture.java +++ b/server/src/main/java/org/elasticsearch/action/ActionFuture.java @@ -27,22 +27,6 @@ public interface ActionFuture extends Future { */ T actionGet(); - /** - * Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just catching the {@link InterruptedException} and throwing - * an {@link IllegalStateException} instead. Also catches - * {@link java.util.concurrent.ExecutionException} and throws the actual cause instead. - */ - T actionGet(String timeout); - - /** - * Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just catching the {@link InterruptedException} and throwing - * an {@link IllegalStateException} instead. Also catches - * {@link java.util.concurrent.ExecutionException} and throws the actual cause instead. - * - * @param timeoutMillis Timeout in millis - */ - T actionGet(long timeoutMillis); - /** * Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just catching the {@link InterruptedException} and throwing * an {@link IllegalStateException} instead. Also catches diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 2039acda89b8a..627eae485d59c 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -30,7 +30,6 @@ import org.elasticsearch.action.admin.cluster.migration.TransportPostFeatureUpgradeAction; import org.elasticsearch.action.admin.cluster.node.hotthreads.TransportNodesHotThreadsAction; import org.elasticsearch.action.admin.cluster.node.info.TransportNodesInfoAction; -import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsAction; import org.elasticsearch.action.admin.cluster.node.reload.TransportNodesReloadSecureSettingsAction; import org.elasticsearch.action.admin.cluster.node.shutdown.PrevalidateNodeRemovalAction; import org.elasticsearch.action.admin.cluster.node.shutdown.TransportPrevalidateNodeRemovalAction; @@ -105,10 +104,8 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexAction; import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; import org.elasticsearch.action.admin.indices.dangling.delete.TransportDeleteDanglingIndexAction; -import org.elasticsearch.action.admin.indices.dangling.find.FindDanglingIndexAction; import org.elasticsearch.action.admin.indices.dangling.find.TransportFindDanglingIndexAction; import org.elasticsearch.action.admin.indices.dangling.import_index.TransportImportDanglingIndexAction; -import org.elasticsearch.action.admin.indices.dangling.list.ListDanglingIndicesAction; import org.elasticsearch.action.admin.indices.dangling.list.TransportListDanglingIndicesAction; import org.elasticsearch.action.admin.indices.delete.TransportDeleteIndexAction; import org.elasticsearch.action.admin.indices.diskusage.AnalyzeIndexDiskUsageAction; @@ -149,7 +146,6 @@ import org.elasticsearch.action.admin.indices.settings.get.TransportGetSettingsAction; import org.elasticsearch.action.admin.indices.settings.put.TransportUpdateSettingsAction; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction; -import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresAction; import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction; import org.elasticsearch.action.admin.indices.shrink.ResizeAction; import org.elasticsearch.action.admin.indices.shrink.TransportResizeAction; @@ -682,7 +678,7 @@ public void reg actions.register(IndicesStatsAction.INSTANCE, TransportIndicesStatsAction.class); actions.register(IndicesSegmentsAction.INSTANCE, TransportIndicesSegmentsAction.class); - actions.register(IndicesShardStoresAction.INSTANCE, TransportIndicesShardStoresAction.class); + actions.register(TransportIndicesShardStoresAction.TYPE, TransportIndicesShardStoresAction.class); actions.register(CreateIndexAction.INSTANCE, TransportCreateIndexAction.class); actions.register(ResizeAction.INSTANCE, TransportResizeAction.class); actions.register(RolloverAction.INSTANCE, TransportRolloverAction.class); @@ -740,7 +736,7 @@ public void reg actions.register(TransportExplainAction.TYPE, TransportExplainAction.class); actions.register(TransportClearScrollAction.TYPE, TransportClearScrollAction.class); actions.register(RecoveryAction.INSTANCE, TransportRecoveryAction.class); - actions.register(NodesReloadSecureSettingsAction.INSTANCE, TransportNodesReloadSecureSettingsAction.class); + actions.register(TransportNodesReloadSecureSettingsAction.TYPE, TransportNodesReloadSecureSettingsAction.class); actions.register(AutoCreateAction.INSTANCE, AutoCreateAction.TransportAction.class); actions.register(ResolveIndexAction.INSTANCE, ResolveIndexAction.TransportAction.class); actions.register(AnalyzeIndexDiskUsageAction.INSTANCE, TransportAnalyzeIndexDiskUsageAction.class); @@ -776,10 +772,10 @@ public void reg actions.register(RetentionLeaseActions.REMOVE, RetentionLeaseActions.TransportRemoveAction.class); // Dangling indices - actions.register(ListDanglingIndicesAction.INSTANCE, TransportListDanglingIndicesAction.class); + actions.register(TransportListDanglingIndicesAction.TYPE, TransportListDanglingIndicesAction.class); actions.register(TransportImportDanglingIndexAction.TYPE, TransportImportDanglingIndexAction.class); actions.register(TransportDeleteDanglingIndexAction.TYPE, TransportDeleteDanglingIndexAction.class); - actions.register(FindDanglingIndexAction.INSTANCE, TransportFindDanglingIndexAction.class); + actions.register(TransportFindDanglingIndexAction.TYPE, TransportFindDanglingIndexAction.class); // internal actions actions.register(GlobalCheckpointSyncAction.TYPE, GlobalCheckpointSyncAction.class); diff --git a/server/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java index 6209e9fce390e..32d65d743e6a6 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java @@ -48,13 +48,6 @@ public Response get(TimeValue timeout) { return execute().actionGet(timeout); } - /** - * Short version of execute().actionGet(). - */ - public Response get(String timeout) { - return execute().actionGet(timeout); - } - public void execute(ActionListener listener) { client.execute(action, request, listener); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsAction.java deleted file mode 100644 index 3b09694958dcd..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsAction.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.cluster.node.reload; - -import org.elasticsearch.action.ActionType; -import org.elasticsearch.common.io.stream.Writeable; - -public class NodesReloadSecureSettingsAction extends ActionType { - - public static final NodesReloadSecureSettingsAction INSTANCE = new NodesReloadSecureSettingsAction(); - public static final String NAME = "cluster:admin/nodes/reload_secure_settings"; - - private NodesReloadSecureSettingsAction() { - super(NAME, Writeable.Reader.localOnly()); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java deleted file mode 100644 index 95c5d53ad7fbc..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequestBuilder.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.cluster.node.reload; - -import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder; -import org.elasticsearch.client.internal.ElasticsearchClient; -import org.elasticsearch.common.settings.SecureString; - -/** - * Builder for the reload secure settings nodes request - */ -public class NodesReloadSecureSettingsRequestBuilder extends NodesOperationRequestBuilder< - NodesReloadSecureSettingsRequest, - NodesReloadSecureSettingsResponse, - NodesReloadSecureSettingsRequestBuilder> { - - public NodesReloadSecureSettingsRequestBuilder(ElasticsearchClient client) { - super(client, NodesReloadSecureSettingsAction.INSTANCE, new NodesReloadSecureSettingsRequest()); - } - - public NodesReloadSecureSettingsRequestBuilder setSecureStorePassword(SecureString secureStorePassword) { - request.setSecureStorePassword(secureStorePassword); - return this; - } - -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java index ed63e6d1b4474..0165ccaeb8016 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java @@ -13,6 +13,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.nodes.TransportNodesAction; @@ -39,6 +40,10 @@ public class TransportNodesReloadSecureSettingsAction extends TransportNodesActi NodesReloadSecureSettingsRequest.NodeRequest, NodesReloadSecureSettingsResponse.NodeResponse> { + public static final ActionType TYPE = ActionType.localOnly( + "cluster:admin/nodes/reload_secure_settings" + ); + private static final Logger logger = LogManager.getLogger(TransportNodesReloadSecureSettingsAction.class); private final Environment environment; @@ -54,7 +59,7 @@ public TransportNodesReloadSecureSettingsAction( PluginsService pluginService ) { super( - NodesReloadSecureSettingsAction.NAME, + TYPE.name(), clusterService, transportService, actionFilters, diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java index 4c7c4ec8f15a2..2fc2f1cfde3b7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java @@ -13,9 +13,9 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; -import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresAction; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; +import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.cluster.ClusterState; @@ -110,7 +110,7 @@ private void verifyThenSubmitUpdate( ) { transportService.sendRequest( transportService.getLocalNode(), - IndicesShardStoresAction.NAME, + TransportIndicesShardStoresAction.TYPE.name(), new IndicesShardStoresRequest().indices(stalePrimaryAllocations.keySet().toArray(Strings.EMPTY_ARRAY)), new ActionListenerResponseHandler<>(listener.delegateFailureAndWrap((delegate, response) -> { Map>> status = response.getStoreStatuses(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index e1f1636781a08..4be6c6af3d7db 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -227,7 +227,7 @@ private void buildResponse( // state and the repository contents in the below logic final SnapshotIndexShardStage stage = switch (shardEntry.getValue().state()) { case FAILED, ABORTED, MISSING -> SnapshotIndexShardStage.FAILURE; - case INIT, WAITING, QUEUED -> SnapshotIndexShardStage.STARTED; + case INIT, WAITING, PAUSED_FOR_NODE_REMOVAL, QUEUED -> SnapshotIndexShardStage.STARTED; case SUCCESS -> SnapshotIndexShardStage.DONE; }; final SnapshotIndexShardStatus shardStatus; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/AnalysisStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/AnalysisStats.java index f8d894e4de48b..81a26999d2907 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/AnalysisStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/AnalysisStats.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.admin.cluster.stats; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.cluster.metadata.Metadata; @@ -36,14 +37,12 @@ import java.util.Set; import java.util.TreeMap; -import static org.elasticsearch.TransportVersions.V_8_500_045; - /** * Statistics about analysis usage. */ public final class AnalysisStats implements ToXContentFragment, Writeable { - private static final TransportVersion SYNONYM_SETS_VERSION = V_8_500_045; + private static final TransportVersion SYNONYM_SETS_VERSION = TransportVersions.V_8_500_061; private static final Set SYNONYM_FILTER_TYPES = Set.of("synonym", "synonym_graph"); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/ReloadAnalyzersRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/ReloadAnalyzersRequest.java index 31c5f57ab5eef..e2894f072011c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/ReloadAnalyzersRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/ReloadAnalyzersRequest.java @@ -25,7 +25,7 @@ public class ReloadAnalyzersRequest extends BroadcastRequest listener) { this.nodeClient.execute( - ListDanglingIndicesAction.INSTANCE, + TransportListDanglingIndicesAction.TYPE, new ListDanglingIndicesRequest(indexUUID), listener.delegateFailure((l, response) -> { if (response.hasFailures()) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/find/FindDanglingIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/find/FindDanglingIndexAction.java deleted file mode 100644 index 107d2d1734183..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/find/FindDanglingIndexAction.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.dangling.find; - -import org.elasticsearch.action.ActionType; -import org.elasticsearch.common.io.stream.Writeable; - -/** - * Represents a request to find a particular dangling index by UUID. - */ -public class FindDanglingIndexAction extends ActionType { - - public static final FindDanglingIndexAction INSTANCE = new FindDanglingIndexAction(); - public static final String NAME = "cluster:admin/indices/dangling/find"; - - private FindDanglingIndexAction() { - super(NAME, Writeable.Reader.localOnly()); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/find/TransportFindDanglingIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/find/TransportFindDanglingIndexAction.java index 553e3915b3e3f..e3178c4b7fc30 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/find/TransportFindDanglingIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/find/TransportFindDanglingIndexAction.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.admin.indices.dangling.find; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.nodes.TransportNodesAction; @@ -34,6 +35,8 @@ public class TransportFindDanglingIndexAction extends TransportNodesAction< NodeFindDanglingIndexRequest, NodeFindDanglingIndexResponse> { + public static final ActionType TYPE = ActionType.localOnly("cluster:admin/indices/dangling/find"); + private final TransportService transportService; private final DanglingIndicesState danglingIndicesState; @@ -46,7 +49,7 @@ public TransportFindDanglingIndexAction( DanglingIndicesState danglingIndicesState ) { super( - FindDanglingIndexAction.NAME, + TYPE.name(), clusterService, transportService, actionFilters, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/import_index/TransportImportDanglingIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/import_index/TransportImportDanglingIndexAction.java index 0348b46bedcae..d59e09928986c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/import_index/TransportImportDanglingIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/import_index/TransportImportDanglingIndexAction.java @@ -14,9 +14,9 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.FailedNodeException; -import org.elasticsearch.action.admin.indices.dangling.find.FindDanglingIndexAction; import org.elasticsearch.action.admin.indices.dangling.find.FindDanglingIndexRequest; import org.elasticsearch.action.admin.indices.dangling.find.NodeFindDanglingIndexResponse; +import org.elasticsearch.action.admin.indices.dangling.find.TransportFindDanglingIndexAction; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -97,7 +97,7 @@ public void onFailure(Exception e) { private void findDanglingIndex(ImportDanglingIndexRequest request, ActionListener listener) { final String indexUUID = request.getIndexUUID(); this.nodeClient.execute( - FindDanglingIndexAction.INSTANCE, + TransportFindDanglingIndexAction.TYPE, new FindDanglingIndexRequest(indexUUID), listener.delegateFailure((l, response) -> { if (response.hasFailures()) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/list/ListDanglingIndicesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/list/ListDanglingIndicesAction.java deleted file mode 100644 index 3db80832f4959..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/list/ListDanglingIndicesAction.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.dangling.list; - -import org.elasticsearch.action.ActionType; -import org.elasticsearch.common.io.stream.Writeable; - -/** - * Represents a request to list all dangling indices known to the cluster. - */ -public class ListDanglingIndicesAction extends ActionType { - - public static final ListDanglingIndicesAction INSTANCE = new ListDanglingIndicesAction(); - public static final String NAME = "cluster:admin/indices/dangling/list"; - - private ListDanglingIndicesAction() { - super(NAME, Writeable.Reader.localOnly()); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/list/TransportListDanglingIndicesAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/list/TransportListDanglingIndicesAction.java index 6e0a27f7fe822..7baa190e3899d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/list/TransportListDanglingIndicesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/list/TransportListDanglingIndicesAction.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.admin.indices.dangling.list; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.admin.indices.dangling.DanglingIndexInfo; import org.elasticsearch.action.support.ActionFilters; @@ -35,6 +36,9 @@ public class TransportListDanglingIndicesAction extends TransportNodesAction< ListDanglingIndicesResponse, NodeListDanglingIndicesRequest, NodeListDanglingIndicesResponse> { + + public static final ActionType TYPE = ActionType.localOnly("cluster:admin/indices/dangling/list"); + private final TransportService transportService; private final DanglingIndicesState danglingIndicesState; @@ -47,7 +51,7 @@ public TransportListDanglingIndicesAction( DanglingIndicesState danglingIndicesState ) { super( - ListDanglingIndicesAction.NAME, + TYPE.name(), clusterService, transportService, actionFilters, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzer.java b/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzer.java index 17b28ebbe3b4b..2f57b59c165e2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzer.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzer.java @@ -55,6 +55,7 @@ import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.index.codec.postings.ES812PostingsFormat; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.LuceneFilesExtensions; @@ -302,6 +303,9 @@ private static void readProximity(Terms terms, PostingsEnum postings) throws IOE private static BlockTermState getBlockTermState(TermsEnum termsEnum, BytesRef term) throws IOException { if (term != null && termsEnum.seekExact(term)) { final TermState termState = termsEnum.termState(); + if (termState instanceof final ES812PostingsFormat.IntBlockTermState blockTermState) { + return new BlockTermState(blockTermState.docStartFP, blockTermState.posStartFP, blockTermState.payStartFP); + } if (termState instanceof final Lucene99PostingsFormat.IntBlockTermState blockTermState) { return new BlockTermState(blockTermState.docStartFP, blockTermState.posStartFP, blockTermState.payStartFP); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java deleted file mode 100644 index 7165953fa85ed..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.shards; - -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; -import org.elasticsearch.client.internal.ElasticsearchClient; -import org.elasticsearch.cluster.health.ClusterHealthStatus; - -/** - * Request builder for {@link IndicesShardStoresRequest} - */ -public class IndicesShardStoreRequestBuilder extends MasterNodeReadOperationRequestBuilder< - IndicesShardStoresRequest, - IndicesShardStoresResponse, - IndicesShardStoreRequestBuilder> { - - public IndicesShardStoreRequestBuilder(ElasticsearchClient client, String... indices) { - super(client, IndicesShardStoresAction.INSTANCE, new IndicesShardStoresRequest(indices)); - } - - /** - * Sets the indices for the shard stores request - */ - public IndicesShardStoreRequestBuilder setIndices(String... indices) { - request.indices(indices); - return this; - } - - /** - * Specifies what type of requested indices to ignore and wildcard indices expressions - * By default, expands wildcards to both open and closed indices - */ - public IndicesShardStoreRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) { - request.indicesOptions(indicesOptions); - return this; - } - - /** - * Set statuses to filter shards to get stores info on. - * @param shardStatuses acceptable values are "green", "yellow", "red" and "all" - * see {@link ClusterHealthStatus} for details - */ - public IndicesShardStoreRequestBuilder setShardStatuses(String... shardStatuses) { - request.shardStatuses(shardStatuses); - return this; - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresAction.java deleted file mode 100644 index f170e14778504..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresAction.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.action.admin.indices.shards; - -import org.elasticsearch.action.ActionType; - -/** - * ActionType for {@link TransportIndicesShardStoresAction} - * - * Exposes shard store information for requested indices. - * Shard store information reports which nodes hold shard copies, how recent they are - * and any exceptions on opening the shard index or from previous engine failures - */ -public class IndicesShardStoresAction extends ActionType { - - public static final IndicesShardStoresAction INSTANCE = new IndicesShardStoresAction(); - public static final String NAME = "indices:monitor/shard_stores"; - - private IndicesShardStoresAction() { - super(NAME, IndicesShardStoresResponse::new); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresRequest.java index a4a8a475ae8b7..0ff478365cb53 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresRequest.java @@ -25,14 +25,14 @@ import java.util.Map; /** - * Request for {@link IndicesShardStoresAction} + * Request for {@link TransportIndicesShardStoresAction} */ public class IndicesShardStoresRequest extends MasterNodeReadRequest implements IndicesRequest.Replaceable { static final int DEFAULT_MAX_CONCURRENT_SHARD_REQUESTS = 100; private String[] indices = Strings.EMPTY_ARRAY; - private IndicesOptions indicesOptions = IndicesOptions.strictExpand(); + private IndicesOptions indicesOptions = IndicesOptions.strictExpandHidden(); private EnumSet statuses = EnumSet.of(ClusterHealthStatus.YELLOW, ClusterHealthStatus.RED); private int maxConcurrentShardRequests = DEFAULT_MAX_CONCURRENT_SHARD_REQUESTS; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java index e9b27629beebf..fc1ef9e011e62 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java @@ -29,7 +29,7 @@ import java.util.Map; /** - * Response for {@link IndicesShardStoresAction} + * Response for {@link TransportIndicesShardStoresAction} * * Consists of {@link StoreStatus}s for requested indices grouped by * indices and shard ids and a list of encountered node {@link Failure}s diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java index 4f04414cff1ac..7d091d8278ab7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionType; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse.Failure; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse.StoreStatus; @@ -62,6 +63,8 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc IndicesShardStoresRequest, IndicesShardStoresResponse> { + public static final ActionType TYPE = ActionType.localOnly("indices:monitor/shard_stores"); + private static final Logger logger = LogManager.getLogger(TransportIndicesShardStoresAction.class); private final NodeClient client; @@ -76,7 +79,7 @@ public TransportIndicesShardStoresAction( NodeClient client ) { super( - IndicesShardStoresAction.NAME, + TYPE.name(), transportService, clusterService, threadPool, diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeNumberOfShardsCalculator.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeNumberOfShardsCalculator.java index 31807919fd9d9..076841e3efadc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeNumberOfShardsCalculator.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeNumberOfShardsCalculator.java @@ -69,7 +69,7 @@ public int calculate(Integer numberOfShards, ByteSizeValue maxPrimaryShardSize, } } else if (maxPrimaryShardSize != null) { int sourceIndexShardsNum = sourceMetadata.getNumberOfShards(); - long sourceIndexStorageBytes = indexStoreStats.getSizeInBytes(); + long sourceIndexStorageBytes = indexStoreStats.sizeInBytes(); long maxPrimaryShardSizeBytes = maxPrimaryShardSize.getBytes(); long minShardsNum = sourceIndexStorageBytes / maxPrimaryShardSizeBytes; if (minShardsNum * maxPrimaryShardSizeBytes < sourceIndexStorageBytes) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java index 76500964be750..d0da715b17168 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java @@ -48,7 +48,7 @@ public class CommonStats implements Writeable, ToXContentFragment { private static final TransportVersion VERSION_SUPPORTING_NODE_MAPPINGS = TransportVersions.V_8_5_0; - private static final TransportVersion VERSION_SUPPORTING_DENSE_VECTOR_STATS = TransportVersions.V_8_500_058; + private static final TransportVersion VERSION_SUPPORTING_DENSE_VECTOR_STATS = TransportVersions.V_8_500_061; @Nullable public DocsStats docs; diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkPrimaryExecutionContext.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkPrimaryExecutionContext.java index 49c2822599be8..e74a163709a9d 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkPrimaryExecutionContext.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkPrimaryExecutionContext.java @@ -61,6 +61,7 @@ enum ItemProcessingState { private DocWriteRequest requestToExecute; private BulkItemResponse executionResult; private int updateRetryCounter; + private long noopMappingUpdateRetryForMappingVersion; BulkPrimaryExecutionContext(BulkShardRequest request, IndexShard primary) { this.request = request; @@ -89,6 +90,7 @@ private void advance() { updateRetryCounter = 0; requestToExecute = null; executionResult = null; + noopMappingUpdateRetryForMappingVersion = -1; assert assertInvariants(ItemProcessingState.INITIAL); } @@ -191,12 +193,39 @@ public void resetForMappingUpdateRetry() { resetForExecutionRetry(); } + /** + * Don't bother the master node if the mapping update is a noop. + * This may happen if there was a concurrent mapping update that added the same field. + * + * @param mappingVersion the current mapping version. This is used to guard against infinite loops. + * @throws IllegalStateException if retried multiple times with the same mapping version, to guard against infinite loops. + */ + public void resetForNoopMappingUpdateRetry(long mappingVersion) { + assert assertInvariants(ItemProcessingState.TRANSLATED); + if (noopMappingUpdateRetryForMappingVersion == mappingVersion) { + // this should never happen, if we end up here, there's probably a bug + // seems like we're in a live lock/infinite loop here + // we've already re-tried and are about to retry again + // as no state has changed in the meantime (the mapping version is still the same), + // we can't expect another retry would yield a different result + // a possible cause: + // maybe we added more dynamic mappers in DocumentParserContext.addDynamicMapper than possible according to the field limit + // the additional fields are then ignored by the mapping merge and the process repeats + String message = "On retry, this indexing request resulted in another noop mapping update. " + + "Failing the indexing operation to prevent an infinite retry loop."; + assert false : message; + throw new IllegalStateException(message); + } + resetForExecutionRetry(); + noopMappingUpdateRetryForMappingVersion = mappingVersion; + } + /** resets the current item state, prepare for a new execution */ private void resetForExecutionRetry() { - assert assertInvariants(ItemProcessingState.WAIT_FOR_MAPPING_UPDATE, ItemProcessingState.EXECUTED); currentItemState = ItemProcessingState.INITIAL; requestToExecute = null; executionResult = null; + noopMappingUpdateRetryForMappingVersion = -1; assert assertInvariants(ItemProcessingState.INITIAL); } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 80f36b601058b..ccfcf38cb8867 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -29,6 +29,7 @@ import org.elasticsearch.action.ingest.IngestActionForwarder; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.RefCountingRunnable; import org.elasticsearch.action.support.WriteResponse; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.action.update.UpdateRequest; @@ -393,18 +394,22 @@ protected void createMissingIndicesAndIndexData( long startTime ) { final AtomicArray responses = new AtomicArray<>(bulkRequest.requests.size()); + // Optimizing when there are no prerequisite actions if (autoCreateIndices.isEmpty()) { executeBulk(task, bulkRequest, startTime, listener, executorName, responses, indicesThatCannotBeCreated); - } else { - final AtomicInteger counter = new AtomicInteger(autoCreateIndices.size()); + return; + } + Runnable executeBulkRunnable = () -> threadPool.executor(executorName).execute(new ActionRunnable<>(listener) { + @Override + protected void doRun() { + executeBulk(task, bulkRequest, startTime, listener, executorName, responses, indicesThatCannotBeCreated); + } + }); + try (RefCountingRunnable refs = new RefCountingRunnable(executeBulkRunnable)) { for (String index : autoCreateIndices) { - createIndex(index, bulkRequest.timeout(), new ActionListener<>() { + createIndex(index, bulkRequest.timeout(), ActionListener.releaseAfter(new ActionListener<>() { @Override - public void onResponse(CreateIndexResponse result) { - if (counter.decrementAndGet() == 0) { - forkExecuteBulk(listener); - } - } + public void onResponse(CreateIndexResponse createIndexResponse) {} @Override public void onFailure(Exception e) { @@ -425,23 +430,8 @@ public void onFailure(Exception e) { } } } - if (counter.decrementAndGet() == 0) { - forkExecuteBulk(ActionListener.wrap(listener::onResponse, inner -> { - inner.addSuppressed(e); - listener.onFailure(inner); - })); - } } - - private void forkExecuteBulk(ActionListener finalListener) { - threadPool.executor(executorName).execute(new ActionRunnable<>(finalListener) { - @Override - protected void doRun() { - executeBulk(task, bulkRequest, startTime, listener, executorName, responses, indicesThatCannotBeCreated); - } - }); - } - }); + }, refs.acquire())); } } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index c8026fa392d3c..79ae641d0afd5 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -48,6 +48,7 @@ import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.get.GetResult; +import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperException; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.SourceToParse; @@ -65,6 +66,7 @@ import java.io.IOException; import java.util.Map; +import java.util.Optional; import java.util.concurrent.Executor; import java.util.function.Consumer; import java.util.function.LongSupplier; @@ -382,12 +384,21 @@ static boolean executeBulkItemRequest( if (result.getResultType() == Engine.Result.Type.MAPPING_UPDATE_REQUIRED) { try { - primary.mapperService() - .merge( - MapperService.SINGLE_MAPPING_NAME, - new CompressedXContent(result.getRequiredMappingUpdate()), - MapperService.MergeReason.MAPPING_UPDATE_PREFLIGHT - ); + Optional mergedSource = Optional.ofNullable( + primary.mapperService() + .merge( + MapperService.SINGLE_MAPPING_NAME, + new CompressedXContent(result.getRequiredMappingUpdate()), + MapperService.MergeReason.MAPPING_UPDATE_PREFLIGHT + ) + ).map(DocumentMapper::mappingSource); + Optional previousSource = Optional.ofNullable(primary.mapperService().documentMapper()) + .map(DocumentMapper::mappingSource); + + if (mergedSource.equals(previousSource)) { + context.resetForNoopMappingUpdateRetry(primary.mapperService().mappingVersion()); + return true; + } } catch (Exception e) { logger.info(() -> format("%s mapping update rejected by primary", primary.shardId()), e); assert result.getId() != null; diff --git a/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java b/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java index a2f4d6408a3a4..61c979f9494b5 100644 --- a/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java +++ b/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java @@ -62,7 +62,7 @@ public Request(StreamInput in) throws IOException { super(in); sourceIndex = in.readString(); targetIndex = in.readString(); - waitTimeout = in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_054) + waitTimeout = in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061) ? TimeValue.parseTimeValue(in.readString(), "timeout") : DEFAULT_WAIT_TIMEOUT; downsampleConfig = new DownsampleConfig(in); @@ -89,7 +89,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(sourceIndex); out.writeString(targetIndex); out.writeString( - out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_054) + out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061) ? waitTimeout.getStringRep() : DEFAULT_WAIT_TIMEOUT.getStringRep() ); diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java index c3005de3de14b..2df2b0c8c7aee 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.StringLiteralDeduplicator; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.Nullable; @@ -38,6 +39,7 @@ import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.ingest.IngestService; import org.elasticsearch.plugins.internal.DocumentParsingObserver; import org.elasticsearch.transport.LeakTracker; import org.elasticsearch.xcontent.XContentBuilder; @@ -79,7 +81,7 @@ public class IndexRequest extends ReplicatedWriteRequest Releasable { private static final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(IndexRequest.class); - private static final TransportVersion PIPELINES_HAVE_RUN_FIELD_ADDED = TransportVersions.V_8_500_049; + private static final TransportVersion PIPELINES_HAVE_RUN_FIELD_ADDED = TransportVersions.V_8_500_061; /** * Max length of the source document to include into string() @@ -163,11 +165,9 @@ public IndexRequest(@Nullable ShardId shardId, StreamInput in) throws IOExceptio opType = OpType.fromId(in.readByte()); version = in.readLong(); versionType = VersionType.fromValue(in.readByte()); - pipeline = in.readOptionalString(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_5_0)) { - finalPipeline = in.readOptionalString(); - } + pipeline = readPipelineName(in); if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_5_0)) { + finalPipeline = readPipelineName(in); isPipelineResolved = in.readBoolean(); } isRetry = in.readBoolean(); @@ -216,6 +216,22 @@ public IndexRequest(String index) { this.refCounted = LeakTracker.wrap(new IndexRequestRefCounted()); } + private static final StringLiteralDeduplicator pipelineNameDeduplicator = new StringLiteralDeduplicator(); + + // reads pipeline name from the stream and deduplicates it to save heap on large bulk requests + @Nullable + private static String readPipelineName(StreamInput in) throws IOException { + final String read = in.readOptionalString(); + if (read == null) { + return null; + } + if (IngestService.NOOP_PIPELINE_NAME.equals(read)) { + // common path of no pipeline set + return IngestService.NOOP_PIPELINE_NAME; + } + return pipelineNameDeduplicator.deduplicate(read); + } + @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = super.validate(); diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index d821764e788b7..c77a03824a75c 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -29,12 +29,12 @@ import org.elasticsearch.core.Releasables; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchContextMissingException; +import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.builder.PointInTimeBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.internal.AliasFilter; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.internal.ShardSearchRequest; @@ -212,7 +212,9 @@ public final void start() { // total hits is null in the response if the tracking of total hits is disabled boolean withTotalHits = trackTotalHitsUpTo != SearchContext.TRACK_TOTAL_HITS_DISABLED; sendSearchResponse( - withTotalHits ? InternalSearchResponse.EMPTY_WITH_TOTAL_HITS : InternalSearchResponse.EMPTY_WITHOUT_TOTAL_HITS, + withTotalHits + ? new SearchResponseSections(SearchHits.EMPTY_WITH_TOTAL_HITS, null, null, false, null, null, 1) + : new SearchResponseSections(SearchHits.EMPTY_WITHOUT_TOTAL_HITS, null, null, false, null, null, 1), new AtomicArray<>(0) ); return; @@ -655,7 +657,7 @@ public boolean isPartOfPointInTime(ShardSearchContextId contextId) { } private SearchResponse buildSearchResponse( - InternalSearchResponse internalSearchResponse, + SearchResponseSections internalSearchResponse, ShardSearchFailure[] failures, String scrollId, String searchContextId @@ -682,7 +684,7 @@ boolean buildPointInTimeFromSearchResults() { } @Override - public void sendSearchResponse(InternalSearchResponse internalSearchResponse, AtomicArray queryResults) { + public void sendSearchResponse(SearchResponseSections internalSearchResponse, AtomicArray queryResults) { ShardSearchFailure[] failures = buildShardFailures(); Boolean allowPartialResults = request.allowPartialSearchResults(); assert allowPartialResults != null : "SearchRequest missing setting for allowPartialSearchResults"; diff --git a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java index 2df8b60cd9728..00e2b41fde3da 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java @@ -18,7 +18,6 @@ import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.collapse.CollapseBuilder; -import org.elasticsearch.search.internal.InternalSearchResponse; import java.util.Iterator; import java.util.List; @@ -31,13 +30,13 @@ */ final class ExpandSearchPhase extends SearchPhase { private final SearchPhaseContext context; - private final InternalSearchResponse searchResponse; + private final SearchHits searchHits; private final Supplier nextPhase; - ExpandSearchPhase(SearchPhaseContext context, InternalSearchResponse searchResponse, Supplier nextPhase) { + ExpandSearchPhase(SearchPhaseContext context, SearchHits searchHits, Supplier nextPhase) { super("expand"); this.context = context; - this.searchResponse = searchResponse; + this.searchHits = searchHits; this.nextPhase = nextPhase; } @@ -53,7 +52,7 @@ private boolean isCollapseRequest() { @Override public void run() { - if (isCollapseRequest() && searchResponse.hits().getHits().length > 0) { + if (isCollapseRequest() && searchHits.getHits().length > 0) { SearchRequest searchRequest = context.getRequest(); CollapseBuilder collapseBuilder = searchRequest.source().collapse(); final List innerHitBuilders = collapseBuilder.getInnerHits(); @@ -61,7 +60,7 @@ public void run() { if (collapseBuilder.getMaxConcurrentGroupRequests() > 0) { multiRequest.maxConcurrentSearchRequests(collapseBuilder.getMaxConcurrentGroupRequests()); } - for (SearchHit hit : searchResponse.hits().getHits()) { + for (SearchHit hit : searchHits.getHits()) { BoolQueryBuilder groupQuery = new BoolQueryBuilder(); Object collapseValue = hit.field(collapseBuilder.getField()).getValue(); if (collapseValue != null) { @@ -85,7 +84,7 @@ public void run() { } context.getSearchTransport().sendExecuteMultiSearch(multiRequest, context.getTask(), ActionListener.wrap(response -> { Iterator it = response.iterator(); - for (SearchHit hit : searchResponse.hits.getHits()) { + for (SearchHit hit : searchHits.getHits()) { for (InnerHitBuilder innerHitBuilder : innerHitBuilders) { MultiSearchResponse.Item item = it.next(); if (item.isFailure()) { diff --git a/server/src/main/java/org/elasticsearch/action/search/FetchLookupFieldsPhase.java b/server/src/main/java/org/elasticsearch/action/search/FetchLookupFieldsPhase.java index 9f1da9a7e2b03..9c50d534ac4ce 100644 --- a/server/src/main/java/org/elasticsearch/action/search/FetchLookupFieldsPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/FetchLookupFieldsPhase.java @@ -15,9 +15,9 @@ import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.fetch.subphase.LookupField; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.transport.RemoteClusterService; import java.util.ArrayList; @@ -33,10 +33,10 @@ */ final class FetchLookupFieldsPhase extends SearchPhase { private final SearchPhaseContext context; - private final InternalSearchResponse searchResponse; + private final SearchResponseSections searchResponse; private final AtomicArray queryResults; - FetchLookupFieldsPhase(SearchPhaseContext context, InternalSearchResponse searchResponse, AtomicArray queryResults) { + FetchLookupFieldsPhase(SearchPhaseContext context, SearchResponseSections searchResponse, AtomicArray queryResults) { super("fetch_lookup_fields"); this.context = context; this.searchResponse = searchResponse; @@ -47,9 +47,9 @@ private record Cluster(String clusterAlias, List hitsWithLookupFields } - private static List groupLookupFieldsByClusterAlias(InternalSearchResponse response) { + private static List groupLookupFieldsByClusterAlias(SearchHits searchHits) { final Map> perClusters = new HashMap<>(); - for (SearchHit hit : response.hits.getHits()) { + for (SearchHit hit : searchHits.getHits()) { String clusterAlias = hit.getClusterAlias() != null ? hit.getClusterAlias() : RemoteClusterService.LOCAL_CLUSTER_GROUP_KEY; if (hit.hasLookupFields()) { perClusters.computeIfAbsent(clusterAlias, k -> new ArrayList<>()).add(hit); @@ -70,7 +70,7 @@ private static List groupLookupFieldsByClusterAlias(InternalSearchRespo @Override public void run() { - final List clusters = groupLookupFieldsByClusterAlias(searchResponse); + final List clusters = groupLookupFieldsByClusterAlias(searchResponse.hits); if (clusters.isEmpty()) { context.sendSearchResponse(searchResponse, queryResults); return; diff --git a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java index e8d3ded154f55..27ff6a2ab8309 100644 --- a/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/FetchSearchPhase.java @@ -16,7 +16,6 @@ import org.elasticsearch.search.dfs.AggregatedDfs; import org.elasticsearch.search.fetch.FetchSearchResult; import org.elasticsearch.search.fetch.ShardFetchSearchRequest; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.transport.Transport; @@ -31,7 +30,7 @@ final class FetchSearchPhase extends SearchPhase { private final ArraySearchPhaseResults fetchResults; private final AtomicArray queryResults; - private final BiFunction, SearchPhase> nextPhaseFactory; + private final BiFunction, SearchPhase> nextPhaseFactory; private final SearchPhaseContext context; private final Logger logger; private final SearchPhaseResults resultConsumer; @@ -45,7 +44,7 @@ final class FetchSearchPhase extends SearchPhase { context, (response, queryPhaseResults) -> new ExpandSearchPhase( context, - response, + response.hits, () -> new FetchLookupFieldsPhase(context, response, queryPhaseResults) ) ); @@ -55,7 +54,7 @@ final class FetchSearchPhase extends SearchPhase { SearchPhaseResults resultConsumer, AggregatedDfs aggregatedDfs, SearchPhaseContext context, - BiFunction, SearchPhase> nextPhaseFactory + BiFunction, SearchPhase> nextPhaseFactory ) { super("fetch"); if (context.getNumShards() != resultConsumer.getNumShards()) { @@ -230,11 +229,12 @@ private void moveToNextPhase( SearchPhaseController.ReducedQueryPhase reducedQueryPhase, AtomicArray fetchResultsArr ) { - final InternalSearchResponse internalResponse = SearchPhaseController.merge( - context.getRequest().scroll() != null, - reducedQueryPhase, - fetchResultsArr + context.executeNextPhase( + this, + nextPhaseFactory.apply( + SearchPhaseController.merge(context.getRequest().scroll() != null, reducedQueryPhase, fetchResultsArr), + queryResults + ) ); - context.executeNextPhase(this, nextPhaseFactory.apply(internalResponse, queryResults)); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java index cadcd6ca57334..2e50667fc02b1 100644 --- a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java @@ -19,6 +19,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.rest.action.search.RestMultiSearchAction; import org.elasticsearch.rest.action.search.RestSearchAction; @@ -33,7 +34,6 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; -import java.io.InputStream; import java.util.ArrayList; import java.util.List; import java.util.Locale; @@ -239,8 +239,11 @@ public static void readMultiLineFormat( // now parse the action if (nextMarker - from > 0) { try ( - InputStream stream = data.slice(from, nextMarker - from).streamInput(); - XContentParser parser = xContent.createParser(parserConfig, stream) + XContentParser parser = XContentHelper.createParserNotCompressed( + parserConfig, + data.slice(from, nextMarker - from), + xContent.type() + ) ) { Map source = parser.map(); Object expandWildcards = null; @@ -301,8 +304,13 @@ public static void readMultiLineFormat( if (nextMarker == -1) { break; } - BytesReference bytes = data.slice(from, nextMarker - from); - try (InputStream stream = bytes.streamInput(); XContentParser parser = xContent.createParser(parserConfig, stream)) { + try ( + XContentParser parser = XContentHelper.createParserNotCompressed( + parserConfig, + data.slice(from, nextMarker - from), + xContent.type() + ) + ) { consumer.accept(searchRequest, parser); } // move pointers diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java b/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java index f10650a6401d6..83a6870d72491 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java @@ -32,10 +32,10 @@ import java.nio.charset.StandardCharsets; import java.util.Base64; import java.util.Collections; -import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.TreeSet; import java.util.stream.Collectors; public final class SearchContextId { @@ -110,12 +110,30 @@ public static SearchContextId decode(NamedWriteableRegistry namedWriteableRegist } } + public static String[] decodeIndices(String id) { + try ( + var decodedInputStream = Base64.getUrlDecoder().wrap(new ByteArrayInputStream(id.getBytes(StandardCharsets.ISO_8859_1))); + var in = new InputStreamStreamInput(decodedInputStream) + ) { + final TransportVersion version = TransportVersion.readVersion(in); + in.setTransportVersion(version); + final Map shards = Collections.unmodifiableMap( + in.readCollection(Maps::newHashMapWithExpectedSize, SearchContextId::readShardsMapEntry) + ); + return new SearchContextId(shards, Collections.emptyMap()).getActualIndices(); + } catch (IOException e) { + assert false : e; + throw new IllegalArgumentException(e); + } + } + private static void readShardsMapEntry(StreamInput in, Map shards) throws IOException { shards.put(new ShardId(in), new SearchContextIdForNode(in)); } public String[] getActualIndices() { - final Set indices = new HashSet<>(); + // ensure that the order is consistent + final Set indices = new TreeSet<>(); for (Map.Entry entry : shards().entrySet()) { final String indexName = entry.getKey().getIndexName(); final String clusterAlias = entry.getValue().getClusterAlias(); diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseContext.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseContext.java index d70b99fe46c00..af9bcac8e3a33 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseContext.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseContext.java @@ -14,7 +14,6 @@ import org.elasticsearch.core.Releasable; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.transport.Transport; @@ -64,7 +63,7 @@ interface SearchPhaseContext extends Executor { * @param internalSearchResponse the internal search response * @param queryResults the results of the query phase */ - void sendSearchResponse(InternalSearchResponse internalSearchResponse, AtomicArray queryResults); + void sendSearchResponse(SearchResponseSections internalSearchResponse, AtomicArray queryResults); /** * Notifies the top-level listener of the provided exception diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index d4808def29d1f..e425d9d66dd69 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -40,7 +40,6 @@ import org.elasticsearch.search.dfs.DfsKnnResults; import org.elasticsearch.search.dfs.DfsSearchResult; import org.elasticsearch.search.fetch.FetchSearchResult; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.profile.SearchProfileQueryPhaseResult; import org.elasticsearch.search.profile.SearchProfileResults; @@ -355,13 +354,13 @@ public static List[] fillDocIdsToLoad(int numShards, ScoreDoc[] shardDo * Expects sortedDocs to have top search docs across all shards, optionally followed by top suggest docs for each named * completion suggestion ordered by suggestion name */ - public static InternalSearchResponse merge( + public static SearchResponseSections merge( boolean ignoreFrom, ReducedQueryPhase reducedQueryPhase, AtomicArray fetchResultsArray ) { if (reducedQueryPhase.isEmptyResult) { - return InternalSearchResponse.EMPTY_WITH_TOTAL_HITS; + return new SearchResponseSections(SearchHits.EMPTY_WITH_TOTAL_HITS, null, null, false, null, null, 1); } ScoreDoc[] sortedDocs = reducedQueryPhase.sortedTopDocs.scoreDocs; var fetchResults = fetchResultsArray.asList(); @@ -753,14 +752,14 @@ public record ReducedQueryPhase( * Creates a new search response from the given merged hits. * @see #merge(boolean, ReducedQueryPhase, AtomicArray) */ - public InternalSearchResponse buildResponse(SearchHits hits, Collection fetchResults) { - return new InternalSearchResponse( + public SearchResponseSections buildResponse(SearchHits hits, Collection fetchResults) { + return new SearchResponseSections( hits, aggregations, suggest, - buildSearchProfileResults(fetchResults), timedOut, terminatedEarly, + buildSearchProfileResults(fetchResults), numReducePhases ); } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java index 7ac8c4d5299d4..456a574c6f6b2 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -407,6 +407,21 @@ public ActionRequestValidationException validate() { if (scroll) { validationException = addValidationError("using [point in time] is not allowed in a scroll context", validationException); } + if (indices().length > 0) { + validationException = addValidationError( + "[indices] cannot be used with point in time. Do not specify any index with point in time.", + validationException + ); + } + if (indicesOptions().equals(DEFAULT_INDICES_OPTIONS) == false) { + validationException = addValidationError("[indicesOptions] cannot be used with point in time", validationException); + } + if (routing() != null) { + validationException = addValidationError("[routing] cannot be used with point in time", validationException); + } + if (preference() != null) { + validationException = addValidationError("[preference] cannot be used with point in time", validationException); + } } else if (source != null && source.sorts() != null) { for (SortBuilder sortBuilder : source.sorts()) { if (sortBuilder instanceof FieldSortBuilder diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java index 9ff0f6273171b..d6a0153a235a2 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java @@ -28,7 +28,6 @@ import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.profile.SearchProfileResults; import org.elasticsearch.search.profile.SearchProfileShardResult; import org.elasticsearch.search.suggest.Suggest; @@ -67,7 +66,13 @@ public class SearchResponse extends ActionResponse implements ChunkedToXContentO private static final ParseField TERMINATED_EARLY = new ParseField("terminated_early"); private static final ParseField NUM_REDUCE_PHASES = new ParseField("num_reduce_phases"); - private final SearchResponseSections internalResponse; + private final SearchHits hits; + private final Aggregations aggregations; + private final Suggest suggest; + private final SearchProfileResults profileResults; + private final boolean timedOut; + private final Boolean terminatedEarly; + private final int numReducePhases; private final String scrollId; private final String pointInTimeId; private final int totalShards; @@ -79,7 +84,13 @@ public class SearchResponse extends ActionResponse implements ChunkedToXContentO public SearchResponse(StreamInput in) throws IOException { super(in); - internalResponse = new InternalSearchResponse(in); + this.hits = new SearchHits(in); + this.aggregations = in.readBoolean() ? InternalAggregations.readFrom(in) : null; + this.suggest = in.readBoolean() ? new Suggest(in) : null; + this.timedOut = in.readBoolean(); + this.terminatedEarly = in.readOptionalBoolean(); + this.profileResults = in.readOptionalWriteable(SearchProfileResults::new); + this.numReducePhases = in.readVInt(); totalShards = in.readVInt(); successfulShards = in.readVInt(); int size = in.readVInt(); @@ -99,7 +110,13 @@ public SearchResponse(StreamInput in) throws IOException { } public SearchResponse( - SearchResponseSections internalResponse, + SearchHits hits, + Aggregations aggregations, + Suggest suggest, + boolean timedOut, + Boolean terminatedEarly, + SearchProfileResults profileResults, + int numReducePhases, String scrollId, int totalShards, int successfulShards, @@ -108,11 +125,63 @@ public SearchResponse( ShardSearchFailure[] shardFailures, Clusters clusters ) { - this(internalResponse, scrollId, totalShards, successfulShards, skippedShards, tookInMillis, shardFailures, clusters, null); + this( + hits, + aggregations, + suggest, + timedOut, + terminatedEarly, + profileResults, + numReducePhases, + scrollId, + totalShards, + successfulShards, + skippedShards, + tookInMillis, + shardFailures, + clusters, + null + ); + } + + public SearchResponse( + SearchResponseSections searchResponseSections, + String scrollId, + int totalShards, + int successfulShards, + int skippedShards, + long tookInMillis, + ShardSearchFailure[] shardFailures, + Clusters clusters, + String pointInTimeId + ) { + this( + searchResponseSections.hits, + searchResponseSections.aggregations, + searchResponseSections.suggest, + searchResponseSections.timedOut, + searchResponseSections.terminatedEarly, + searchResponseSections.profileResults, + searchResponseSections.numReducePhases, + scrollId, + totalShards, + successfulShards, + skippedShards, + tookInMillis, + shardFailures, + clusters, + pointInTimeId + ); } public SearchResponse( - SearchResponseSections internalResponse, + SearchHits hits, + Aggregations aggregations, + Suggest suggest, + boolean timedOut, + Boolean terminatedEarly, + SearchProfileResults profileResults, + int numReducePhases, String scrollId, int totalShards, int successfulShards, @@ -122,7 +191,13 @@ public SearchResponse( Clusters clusters, String pointInTimeId ) { - this.internalResponse = internalResponse; + this.hits = hits; + this.aggregations = aggregations; + this.suggest = suggest; + this.profileResults = profileResults; + this.timedOut = timedOut; + this.terminatedEarly = terminatedEarly; + this.numReducePhases = numReducePhases; this.scrollId = scrollId; this.pointInTimeId = pointInTimeId; this.clusters = clusters; @@ -144,7 +219,7 @@ public RestStatus status() { * The search hits. */ public SearchHits getHits() { - return internalResponse.hits(); + return hits; } /** @@ -152,7 +227,7 @@ public SearchHits getHits() { * either {@code null} or {@link InternalAggregations#EMPTY}. */ public @Nullable Aggregations getAggregations() { - return internalResponse.aggregations(); + return aggregations; } /** @@ -163,14 +238,14 @@ public boolean hasAggregations() { } public Suggest getSuggest() { - return internalResponse.suggest(); + return suggest; } /** * Has the search operation timed out. */ public boolean isTimedOut() { - return internalResponse.timedOut(); + return timedOut; } /** @@ -178,14 +253,14 @@ public boolean isTimedOut() { * terminateAfter */ public Boolean isTerminatedEarly() { - return internalResponse.terminatedEarly(); + return terminatedEarly; } /** * Returns the number of reduce phases applied to obtain this search response */ public int getNumReducePhases() { - return internalResponse.getNumReducePhases(); + return numReducePhases; } /** @@ -253,7 +328,10 @@ public String pointInTimeId() { */ @Nullable public Map getProfileResults() { - return internalResponse.profile(); + if (profileResults == null) { + return Collections.emptyMap(); + } + return profileResults.getShardResults(); } /** @@ -278,7 +356,27 @@ public Iterator innerToXContentChunked(ToXContent.Params p return Iterators.concat( ChunkedToXContentHelper.singleChunk(SearchResponse.this::headerToXContent), Iterators.single(clusters), - internalResponse.toXContentChunked(params) + Iterators.concat( + Iterators.flatMap(Iterators.single(hits), r -> r.toXContentChunked(params)), + Iterators.single((ToXContent) (b, p) -> { + if (aggregations != null) { + aggregations.toXContent(b, p); + } + return b; + }), + Iterators.single((b, p) -> { + if (suggest != null) { + suggest.toXContent(b, p); + } + return b; + }), + Iterators.single((b, p) -> { + if (profileResults != null) { + profileResults.toXContent(b, p); + } + return b; + }) + ) ); } @@ -396,17 +494,14 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE } } } - SearchResponseSections searchResponseSections = new SearchResponseSections( + return new SearchResponse( hits, aggs, suggest, timedOut, terminatedEarly, profile, - numReducePhases - ); - return new SearchResponse( - searchResponseSections, + numReducePhases, scrollId, totalShards, successfulShards, @@ -420,7 +515,13 @@ public static SearchResponse innerFromXContent(XContentParser parser) throws IOE @Override public void writeTo(StreamOutput out) throws IOException { - internalResponse.writeTo(out); + hits.writeTo(out); + out.writeOptionalWriteable((InternalAggregations) aggregations); + out.writeOptionalWriteable(suggest); + out.writeBoolean(timedOut); + out.writeOptionalBoolean(terminatedEarly); + out.writeOptionalWriteable(profileResults); + out.writeVInt(numReducePhases); out.writeVInt(totalShards); out.writeVInt(successfulShards); @@ -532,7 +633,7 @@ public Clusters(StreamInput in) throws IOException { this.total = in.readVInt(); int successfulTemp = in.readVInt(); int skippedTemp = in.readVInt(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_053)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { List clusterList = in.readCollectionAsList(Cluster::new); if (clusterList.isEmpty()) { this.clusterInfo = Collections.emptyMap(); @@ -585,7 +686,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(total); out.writeVInt(successful); out.writeVInt(skipped); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_053)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { if (clusterInfo != null) { List clusterList = clusterInfo.values().stream().toList(); out.writeCollection(clusterList); @@ -1268,17 +1369,14 @@ public String toString() { // public for tests public static SearchResponse empty(Supplier tookInMillisSupplier, Clusters clusters) { SearchHits searchHits = new SearchHits(new SearchHit[0], new TotalHits(0L, TotalHits.Relation.EQUAL_TO), Float.NaN); - InternalSearchResponse internalSearchResponse = new InternalSearchResponse( + return new SearchResponse( searchHits, InternalAggregations.EMPTY, null, - null, false, null, - 0 - ); - return new SearchResponse( - internalSearchResponse, + null, + 0, null, 0, 0, diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java index b6143cfc51c3a..1b616b9f3bc87 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java @@ -27,7 +27,6 @@ import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.aggregations.AggregationReduceContext; import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.profile.SearchProfileResults; import org.elasticsearch.search.profile.SearchProfileShardResult; import org.elasticsearch.search.suggest.Suggest; @@ -211,18 +210,15 @@ SearchResponse getMergedResponse(Clusters clusters) { SearchProfileResults profileShardResults = profileResults.isEmpty() ? null : new SearchProfileResults(profileResults); // make failures ordering consistent between ordinary search and CCS by looking at the shard they come from Arrays.sort(shardFailures, FAILURES_COMPARATOR); - InternalSearchResponse response = new InternalSearchResponse( + long tookInMillis = searchTimeProvider.buildTookInMillis(); + return new SearchResponse( mergedSearchHits, reducedAggs, suggest, - profileShardResults, topDocsStats.timedOut, topDocsStats.terminatedEarly, - numReducePhases - ); - long tookInMillis = searchTimeProvider.buildTookInMillis(); - return new SearchResponse( - response, + profileShardResults, + numReducePhases, null, totalShards, successfulShards, diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponseSections.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponseSections.java index b4de15f4cc413..6f382b9e5f8d6 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponseSections.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponseSections.java @@ -8,30 +8,20 @@ package org.elasticsearch.action.search; -import org.elasticsearch.common.collect.Iterators; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.profile.SearchProfileResults; import org.elasticsearch.search.profile.SearchProfileShardResult; import org.elasticsearch.search.suggest.Suggest; -import org.elasticsearch.xcontent.ToXContent; -import java.io.IOException; import java.util.Collections; -import java.util.Iterator; import java.util.Map; /** - * Base class that holds the various sections which a search response is - * composed of (hits, aggs, suggestions etc.) and allows to retrieve them. - * - * The reason why this class exists is that the high level REST client uses its own classes - * to parse aggregations into, which are not serializable. This is the common part that can be - * shared between core and client. + * Holds some sections that a search response is composed of (hits, aggs, suggestions etc.) during some steps of the search response + * building. */ -public class SearchResponseSections implements ChunkedToXContent { +public class SearchResponseSections { protected final SearchHits hits; protected final Aggregations aggregations; @@ -98,33 +88,4 @@ public final Map profile() { } return profileResults.getShardResults(); } - - @Override - public Iterator toXContentChunked(ToXContent.Params params) { - return Iterators.concat( - Iterators.flatMap(Iterators.single(hits), r -> r.toXContentChunked(params)), - Iterators.single((ToXContent) (b, p) -> { - if (aggregations != null) { - aggregations.toXContent(b, p); - } - return b; - }), - Iterators.single((b, p) -> { - if (suggest != null) { - suggest.toXContent(b, p); - } - return b; - }), - Iterators.single((b, p) -> { - if (profileResults != null) { - profileResults.toXContent(b, p); - } - return b; - }) - ); - } - - protected void writeTo(StreamOutput out) throws IOException { - throw new UnsupportedOperationException(); - } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java index 5681bda8b2741..885fd98fbdc15 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchScrollAsyncAction.java @@ -18,7 +18,6 @@ import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.internal.InternalScrollSearchRequest; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.transport.RemoteClusterService; import org.elasticsearch.transport.Transport; @@ -240,7 +239,6 @@ protected final void sendResponse( final AtomicArray fetchResults ) { try { - final InternalSearchResponse internalResponse = SearchPhaseController.merge(true, queryPhase, fetchResults); // the scroll ID never changes we always return the same ID. This ID contains all the shards and their context ids // such that we can talk to them again in the next roundtrip. String scrollId = null; @@ -250,7 +248,7 @@ protected final void sendResponse( ActionListener.respondAndRelease( listener, new SearchResponse( - internalResponse, + SearchPhaseController.merge(true, queryPhase, fetchResults), scrollId, this.scrollId.getContext().length, successfulOps.get(), diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTransportAPMMetrics.java b/server/src/main/java/org/elasticsearch/action/search/SearchTransportAPMMetrics.java new file mode 100644 index 0000000000000..93b8e22d0d7cd --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/search/SearchTransportAPMMetrics.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.search; + +import org.elasticsearch.telemetry.metric.LongHistogram; +import org.elasticsearch.telemetry.metric.MeterRegistry; + +public class SearchTransportAPMMetrics { + public static final String SEARCH_ACTION_LATENCY_BASE_METRIC = "es.search.nodes.transport_actions.latency.histogram"; + public static final String ACTION_ATTRIBUTE_NAME = "action"; + + public static final String QUERY_CAN_MATCH_NODE_METRIC = "shards_can_match"; + public static final String DFS_ACTION_METRIC = "dfs_query_then_fetch/shard_dfs_phase"; + public static final String QUERY_ID_ACTION_METRIC = "dfs_query_then_fetch/shard_query_phase"; + public static final String QUERY_ACTION_METRIC = "query_then_fetch/shard_query_phase"; + public static final String FREE_CONTEXT_ACTION_METRIC = "shard_release_context"; + public static final String FETCH_ID_ACTION_METRIC = "shard_fetch_phase"; + public static final String QUERY_SCROLL_ACTION_METRIC = "scroll/shard_query_phase"; + public static final String FETCH_ID_SCROLL_ACTION_METRIC = "scroll/shard_fetch_phase"; + public static final String QUERY_FETCH_SCROLL_ACTION_METRIC = "scroll/shard_query_and_fetch_phase"; + public static final String FREE_CONTEXT_SCROLL_ACTION_METRIC = "scroll/shard_release_context"; + public static final String CLEAR_SCROLL_CONTEXTS_ACTION_METRIC = "scroll/shard_release_contexts"; + + private final LongHistogram actionLatencies; + + public SearchTransportAPMMetrics(MeterRegistry meterRegistry) { + this( + meterRegistry.registerLongHistogram( + SEARCH_ACTION_LATENCY_BASE_METRIC, + "Transport action execution times at the node level, expressed as a histogram", + "millis" + ) + ); + } + + private SearchTransportAPMMetrics(LongHistogram actionLatencies) { + this.actionLatencies = actionLatencies; + } + + public LongHistogram getActionLatencies() { + return actionLatencies; + } +} diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index e46d26c3532ad..b7cc61ad70e2f 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -59,6 +59,19 @@ import java.util.Objects; import java.util.function.BiFunction; +import static org.elasticsearch.action.search.SearchTransportAPMMetrics.ACTION_ATTRIBUTE_NAME; +import static org.elasticsearch.action.search.SearchTransportAPMMetrics.CLEAR_SCROLL_CONTEXTS_ACTION_METRIC; +import static org.elasticsearch.action.search.SearchTransportAPMMetrics.DFS_ACTION_METRIC; +import static org.elasticsearch.action.search.SearchTransportAPMMetrics.FETCH_ID_ACTION_METRIC; +import static org.elasticsearch.action.search.SearchTransportAPMMetrics.FETCH_ID_SCROLL_ACTION_METRIC; +import static org.elasticsearch.action.search.SearchTransportAPMMetrics.FREE_CONTEXT_ACTION_METRIC; +import static org.elasticsearch.action.search.SearchTransportAPMMetrics.FREE_CONTEXT_SCROLL_ACTION_METRIC; +import static org.elasticsearch.action.search.SearchTransportAPMMetrics.QUERY_ACTION_METRIC; +import static org.elasticsearch.action.search.SearchTransportAPMMetrics.QUERY_CAN_MATCH_NODE_METRIC; +import static org.elasticsearch.action.search.SearchTransportAPMMetrics.QUERY_FETCH_SCROLL_ACTION_METRIC; +import static org.elasticsearch.action.search.SearchTransportAPMMetrics.QUERY_ID_ACTION_METRIC; +import static org.elasticsearch.action.search.SearchTransportAPMMetrics.QUERY_SCROLL_ACTION_METRIC; + /** * An encapsulation of {@link org.elasticsearch.search.SearchService} operations exposed through * transport. @@ -68,13 +81,27 @@ public class SearchTransportService { public static final String FREE_CONTEXT_SCROLL_ACTION_NAME = "indices:data/read/search[free_context/scroll]"; public static final String FREE_CONTEXT_ACTION_NAME = "indices:data/read/search[free_context]"; public static final String CLEAR_SCROLL_CONTEXTS_ACTION_NAME = "indices:data/read/search[clear_scroll_contexts]"; + + /** + * Part of DFS_QUERY_THEN_FETCH, which fetches distributed term frequencies and executes KNN. + */ public static final String DFS_ACTION_NAME = "indices:data/read/search[phase/dfs]"; public static final String QUERY_ACTION_NAME = "indices:data/read/search[phase/query]"; + + /** + * Part of DFS_QUERY_THEN_FETCH, which fetches distributed term frequencies and executes KNN. + */ public static final String QUERY_ID_ACTION_NAME = "indices:data/read/search[phase/query/id]"; public static final String QUERY_SCROLL_ACTION_NAME = "indices:data/read/search[phase/query/scroll]"; public static final String QUERY_FETCH_SCROLL_ACTION_NAME = "indices:data/read/search[phase/query+fetch/scroll]"; public static final String FETCH_ID_SCROLL_ACTION_NAME = "indices:data/read/search[phase/fetch/id/scroll]"; public static final String FETCH_ID_ACTION_NAME = "indices:data/read/search[phase/fetch/id]"; + + /** + * The Can-Match phase. It is executed to pre-filter shards that a search request hits. It rewrites the query on + * the shard and checks whether the result of the rewrite matches no documents, in which case the shard can be + * filtered out. + */ public static final String QUERY_CAN_MATCH_NODE_NAME = "indices:data/read/search[can_match][n]"; private final TransportService transportService; @@ -382,35 +409,41 @@ public void writeTo(StreamOutput out) throws IOException { } } - public static void registerRequestHandler(TransportService transportService, SearchService searchService) { + public static void registerRequestHandler( + TransportService transportService, + SearchService searchService, + SearchTransportAPMMetrics searchTransportMetrics + ) { transportService.registerRequestHandler( FREE_CONTEXT_SCROLL_ACTION_NAME, EsExecutors.DIRECT_EXECUTOR_SERVICE, ScrollFreeContextRequest::new, - (request, channel, task) -> { + instrumentedHandler(FREE_CONTEXT_SCROLL_ACTION_METRIC, transportService, searchTransportMetrics, (request, channel, task) -> { boolean freed = searchService.freeReaderContext(request.id()); channel.sendResponse(new SearchFreeContextResponse(freed)); - } + }) ); TransportActionProxy.registerProxyAction(transportService, FREE_CONTEXT_SCROLL_ACTION_NAME, false, SearchFreeContextResponse::new); + transportService.registerRequestHandler( FREE_CONTEXT_ACTION_NAME, EsExecutors.DIRECT_EXECUTOR_SERVICE, SearchFreeContextRequest::new, - (request, channel, task) -> { + instrumentedHandler(FREE_CONTEXT_ACTION_METRIC, transportService, searchTransportMetrics, (request, channel, task) -> { boolean freed = searchService.freeReaderContext(request.id()); channel.sendResponse(new SearchFreeContextResponse(freed)); - } + }) ); TransportActionProxy.registerProxyAction(transportService, FREE_CONTEXT_ACTION_NAME, false, SearchFreeContextResponse::new); + transportService.registerRequestHandler( CLEAR_SCROLL_CONTEXTS_ACTION_NAME, EsExecutors.DIRECT_EXECUTOR_SERVICE, TransportRequest.Empty::new, - (request, channel, task) -> { + instrumentedHandler(CLEAR_SCROLL_CONTEXTS_ACTION_METRIC, transportService, searchTransportMetrics, (request, channel, task) -> { searchService.freeAllScrollContexts(); channel.sendResponse(TransportResponse.Empty.INSTANCE); - } + }) ); TransportActionProxy.registerProxyAction( transportService, @@ -423,19 +456,32 @@ public static void registerRequestHandler(TransportService transportService, Sea DFS_ACTION_NAME, EsExecutors.DIRECT_EXECUTOR_SERVICE, ShardSearchRequest::new, - (request, channel, task) -> searchService.executeDfsPhase(request, (SearchShardTask) task, new ChannelActionListener<>(channel)) + instrumentedHandler( + DFS_ACTION_METRIC, + transportService, + searchTransportMetrics, + (request, channel, task) -> searchService.executeDfsPhase( + request, + (SearchShardTask) task, + new ChannelActionListener<>(channel) + ) + ) ); - TransportActionProxy.registerProxyAction(transportService, DFS_ACTION_NAME, true, DfsSearchResult::new); transportService.registerRequestHandler( QUERY_ACTION_NAME, EsExecutors.DIRECT_EXECUTOR_SERVICE, ShardSearchRequest::new, - (request, channel, task) -> searchService.executeQueryPhase( - request, - (SearchShardTask) task, - new ChannelActionListener<>(channel) + instrumentedHandler( + QUERY_ACTION_METRIC, + transportService, + searchTransportMetrics, + (request, channel, task) -> searchService.executeQueryPhase( + request, + (SearchShardTask) task, + new ChannelActionListener<>(channel) + ) ) ); TransportActionProxy.registerProxyActionWithDynamicResponseType( @@ -449,9 +495,16 @@ public static void registerRequestHandler(TransportService transportService, Sea QUERY_ID_ACTION_NAME, EsExecutors.DIRECT_EXECUTOR_SERVICE, QuerySearchRequest::new, - (request, channel, task) -> { - searchService.executeQueryPhase(request, (SearchShardTask) task, new ChannelActionListener<>(channel)); - } + instrumentedHandler( + QUERY_ID_ACTION_METRIC, + transportService, + searchTransportMetrics, + (request, channel, task) -> searchService.executeQueryPhase( + request, + (SearchShardTask) task, + new ChannelActionListener<>(channel) + ) + ) ); TransportActionProxy.registerProxyAction(transportService, QUERY_ID_ACTION_NAME, true, QuerySearchResult::new); @@ -459,9 +512,16 @@ public static void registerRequestHandler(TransportService transportService, Sea QUERY_SCROLL_ACTION_NAME, EsExecutors.DIRECT_EXECUTOR_SERVICE, InternalScrollSearchRequest::new, - (request, channel, task) -> { - searchService.executeQueryPhase(request, (SearchShardTask) task, new ChannelActionListener<>(channel)); - } + instrumentedHandler( + QUERY_SCROLL_ACTION_METRIC, + transportService, + searchTransportMetrics, + (request, channel, task) -> searchService.executeQueryPhase( + request, + (SearchShardTask) task, + new ChannelActionListener<>(channel) + ) + ) ); TransportActionProxy.registerProxyAction(transportService, QUERY_SCROLL_ACTION_NAME, true, ScrollQuerySearchResult::new); @@ -469,22 +529,33 @@ public static void registerRequestHandler(TransportService transportService, Sea QUERY_FETCH_SCROLL_ACTION_NAME, EsExecutors.DIRECT_EXECUTOR_SERVICE, InternalScrollSearchRequest::new, - (request, channel, task) -> { - searchService.executeFetchPhase(request, (SearchShardTask) task, new ChannelActionListener<>(channel)); - } + instrumentedHandler( + QUERY_FETCH_SCROLL_ACTION_METRIC, + transportService, + searchTransportMetrics, + (request, channel, task) -> searchService.executeFetchPhase( + request, + (SearchShardTask) task, + new ChannelActionListener<>(channel) + ) + ) ); TransportActionProxy.registerProxyAction(transportService, QUERY_FETCH_SCROLL_ACTION_NAME, true, ScrollQueryFetchSearchResult::new); - TransportRequestHandler shardFetchHandler = (request, channel, task) -> searchService.executeFetchPhase( - request, - (SearchShardTask) task, - new ChannelActionListener<>(channel) - ); transportService.registerRequestHandler( FETCH_ID_SCROLL_ACTION_NAME, EsExecutors.DIRECT_EXECUTOR_SERVICE, ShardFetchRequest::new, - shardFetchHandler + instrumentedHandler( + FETCH_ID_SCROLL_ACTION_METRIC, + transportService, + searchTransportMetrics, + (request, channel, task) -> searchService.executeFetchPhase( + request, + (SearchShardTask) task, + new ChannelActionListener<>(channel) + ) + ) ); TransportActionProxy.registerProxyAction(transportService, FETCH_ID_SCROLL_ACTION_NAME, true, FetchSearchResult::new); @@ -494,7 +565,16 @@ public static void registerRequestHandler(TransportService transportService, Sea true, true, ShardFetchSearchRequest::new, - shardFetchHandler + instrumentedHandler( + FETCH_ID_ACTION_METRIC, + transportService, + searchTransportMetrics, + (request, channel, task) -> searchService.executeFetchPhase( + request, + (SearchShardTask) task, + new ChannelActionListener<>(channel) + ) + ) ); TransportActionProxy.registerProxyAction(transportService, FETCH_ID_ACTION_NAME, true, FetchSearchResult::new); @@ -502,16 +582,39 @@ public static void registerRequestHandler(TransportService transportService, Sea QUERY_CAN_MATCH_NODE_NAME, transportService.getThreadPool().executor(ThreadPool.Names.SEARCH_COORDINATION), CanMatchNodeRequest::new, - (request, channel, task) -> searchService.canMatch(request, new ChannelActionListener<>(channel)) + instrumentedHandler( + QUERY_CAN_MATCH_NODE_METRIC, + transportService, + searchTransportMetrics, + (request, channel, task) -> searchService.canMatch(request, new ChannelActionListener<>(channel)) + ) ); TransportActionProxy.registerProxyAction(transportService, QUERY_CAN_MATCH_NODE_NAME, true, CanMatchNodeResponse::new); } + private static TransportRequestHandler instrumentedHandler( + String actionQualifier, + TransportService transportService, + SearchTransportAPMMetrics searchTransportMetrics, + TransportRequestHandler transportRequestHandler + ) { + return (request, channel, task) -> { + var startTime = transportService.getThreadPool().relativeTimeInMillis(); + try { + transportRequestHandler.messageReceived(request, channel, task); + } finally { + var elapsedTime = transportService.getThreadPool().relativeTimeInMillis() - startTime; + searchTransportMetrics.getActionLatencies().record(elapsedTime, Map.of(ACTION_ATTRIBUTE_NAME, actionQualifier)); + } + }; + } + /** * Returns a connection to the given node on the provided cluster. If the cluster alias is null the node will be resolved * against the local cluster. + * * @param clusterAlias the cluster alias the node should be resolved against - * @param node the node to resolve + * @param node the node to resolve * @return a connection to the given node belonging to the cluster with the provided alias. */ public Transport.Connection getConnection(@Nullable String clusterAlias, DiscoveryNode node) { diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java index 2bc642e6c0907..eb01cb2f3137b 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java @@ -28,12 +28,12 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.internal.AliasFilter; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; @@ -252,7 +252,10 @@ public void onFailure(Exception e) { @Override protected void doRun() { - sendSearchResponse(InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, results.getAtomicArray()); + sendSearchResponse( + new SearchResponseSections(SearchHits.EMPTY_WITH_TOTAL_HITS, null, null, false, null, null, 1), + results.getAtomicArray() + ); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 6045a9ff5efa3..4e9aed5f643f2 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -63,10 +63,8 @@ import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchService; import org.elasticsearch.search.aggregations.AggregationReduceContext; -import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.internal.AliasFilter; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.profile.SearchProfileResults; import org.elasticsearch.search.profile.SearchProfileShardResult; @@ -162,7 +160,8 @@ public TransportSearchAction( ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, NamedWriteableRegistry namedWriteableRegistry, - ExecutorSelector executorSelector + ExecutorSelector executorSelector, + SearchTransportAPMMetrics searchTransportMetrics ) { super(TYPE.name(), transportService, actionFilters, SearchRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); this.threadPool = threadPool; @@ -170,7 +169,7 @@ public TransportSearchAction( this.searchPhaseController = searchPhaseController; this.searchTransportService = searchTransportService; this.remoteClusterService = searchTransportService.getRemoteClusterService(); - SearchTransportService.registerRequestHandler(transportService, searchService); + SearchTransportService.registerRequestHandler(transportService, searchService, searchTransportMetrics); this.clusterService = clusterService; this.transportService = transportService; this.searchService = searchService; @@ -541,19 +540,16 @@ public void onResponse(SearchResponse searchResponse) { ? null : new SearchProfileResults(profileResults); - InternalSearchResponse internalSearchResponse = new InternalSearchResponse( - searchResponse.getHits(), - (InternalAggregations) searchResponse.getAggregations(), - searchResponse.getSuggest(), - profile, - searchResponse.isTimedOut(), - searchResponse.isTerminatedEarly(), - searchResponse.getNumReducePhases() - ); ActionListener.respondAndRelease( listener, new SearchResponse( - internalSearchResponse, + searchResponse.getHits(), + searchResponse.getAggregations(), + searchResponse.getSuggest(), + searchResponse.isTimedOut(), + searchResponse.isTerminatedEarly(), + profile, + searchResponse.getNumReducePhases(), searchResponse.getScrollId(), searchResponse.getTotalShards(), searchResponse.getSuccessfulShards(), diff --git a/server/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java b/server/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java index 721983b6af0e7..e2b8fcbf2825c 100644 --- a/server/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java +++ b/server/src/main/java/org/elasticsearch/action/support/PlainActionFuture.java @@ -31,13 +31,15 @@ public class PlainActionFuture implements ActionFuture, ActionListener { @Override - public void onResponse(T result) { + public void onResponse(@Nullable T result) { set(result); } @Override public void onFailure(Exception e) { - setException(e); + if (sync.setException(Objects.requireNonNull(e))) { + done(false); + } } private static final String BLOCKING_OP_REASON = "Blocking operation"; @@ -115,23 +117,9 @@ public boolean cancel(boolean mayInterruptIfRunning) { return false; } done(false); - if (mayInterruptIfRunning) { - interruptTask(); - } return true; } - /** - * Subclasses can override this method to implement interruption of the - * future's computation. The method is invoked automatically by a successful - * call to {@link #cancel(boolean) cancel(true)}. - *

- * The default implementation does nothing. - * - * @since 10.0 - */ - protected void interruptTask() {} - /** * Subclasses should invoke this method to set the result of the computation * to {@code value}. This will set the state of the future to @@ -141,7 +129,7 @@ protected void interruptTask() {} * @param value the value that was the result of the task. * @return true if the state was successfully changed. */ - protected boolean set(@Nullable T value) { + protected final boolean set(@Nullable T value) { boolean result = sync.set(value); if (result) { done(true); @@ -149,33 +137,6 @@ protected boolean set(@Nullable T value) { return result; } - /** - * Subclasses should invoke this method to set the result of the computation - * to an error, {@code throwable}. This will set the state of the future to - * {@link PlainActionFuture.Sync#COMPLETED} and call {@link #done(boolean)} if the - * state was successfully changed. - * - * @param throwable the exception that the task failed with. - * @return true if the state was successfully changed. - * @throws Error if the throwable was an {@link Error}. - */ - protected boolean setException(Throwable throwable) { - boolean result = sync.setException(Objects.requireNonNull(throwable)); - if (result) { - done(false); - } - - // If it's an Error, we want to make sure it reaches the top of the - // call stack, so we rethrow it. - - // we want to notify the listeners we have with errors as well, as it breaks - // how we work in ES in terms of using assertions - // if (throwable instanceof Error) { - // throw (Error) throwable; - // } - return result; - } - /** * Called when the {@link PlainActionFuture} is completed. The {@code success} boolean indicates if the {@link * PlainActionFuture} was successfully completed (the value is {@code true}). In the cases the {@link PlainActionFuture} @@ -194,16 +155,6 @@ public T actionGet() { } } - @Override - public T actionGet(String timeout) { - return actionGet(TimeValue.parseTimeValue(timeout, null, getClass().getSimpleName() + ".actionGet.timeout")); - } - - @Override - public T actionGet(long timeoutMillis) { - return actionGet(timeoutMillis, TimeUnit.MILLISECONDS); - } - @Override public T actionGet(TimeValue timeout) { return actionGet(timeout.millis(), TimeUnit.MILLISECONDS); @@ -272,7 +223,7 @@ static final class Sync extends AbstractQueuedSynchronizer { static final int CANCELLED = 4; private V value; - private Throwable exception; + private Exception exception; /* * Acquisition succeeds if the future is done, otherwise it fails. @@ -311,7 +262,7 @@ V get(long nanos) throws TimeoutException, CancellationException, ExecutionExcep } /** - * Blocks until {@link #complete(Object, Throwable, int)} has been + * Blocks until {@link #complete(Object, Exception, int)} has been * successfully called. Throws a {@link CancellationException} if the task * was cancelled, or a {@link ExecutionException} if the task completed with * an error. @@ -390,8 +341,8 @@ boolean set(@Nullable V v) { /** * Transition to the COMPLETED state and set the exception. */ - boolean setException(Throwable t) { - return complete(null, t, COMPLETED); + boolean setException(Exception e) { + return complete(null, e, COMPLETED); } /** @@ -409,16 +360,16 @@ boolean cancel() { * final state ({@link #COMPLETED} or {@link #CANCELLED}). * * @param v the value to set as the result of the computation. - * @param t the exception to set as the result of the computation. + * @param e the exception to set as the result of the computation. * @param finalState the state to transition to. */ - private boolean complete(@Nullable V v, @Nullable Throwable t, int finalState) { + private boolean complete(@Nullable V v, @Nullable Exception e, int finalState) { boolean doCompletion = compareAndSetState(RUNNING, COMPLETING); if (doCompletion) { // If this thread successfully transitioned to COMPLETING, set the value // and exception and then release to the final state. this.value = v; - this.exception = t; + this.exception = e; releaseShared(finalState); } else if (getState() == COMPLETING) { // If some other thread is currently completing the future, block until diff --git a/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java b/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java index e37f248246920..4fb243891709b 100644 --- a/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesAction.java @@ -25,6 +25,8 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.core.Releasables; +import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportRequest; @@ -96,6 +98,23 @@ protected void doExecute(Task task, NodesRequest request, ActionListener { + final List drainedResponses; + synchronized (responses) { + drainedResponses = List.copyOf(responses); + responses.clear(); + } + Releasables.wrap(Iterators.map(drainedResponses.iterator(), r -> r::decRef)).close(); + }); + } + } + @Override protected void sendItemRequest(DiscoveryNode discoveryNode, ActionListener listener) { final var nodeRequest = newNodeRequest(request); @@ -118,9 +137,14 @@ protected void sendItemRequest(DiscoveryNode discoveryNode, ActionListener, Exception> onCompletion() { // ref releases all happen-before here so no need to be synchronized - return l -> newResponseAsync(task, request, responses, exceptions, l); + return l -> { + try (var ignored = Releasables.wrap(Iterators.map(responses.iterator(), r -> r::decRef))) { + newResponseAsync(task, request, responses, exceptions, l); + } + }; } @Override @@ -154,9 +182,11 @@ private Writeable.Reader nodeResponseReader(DiscoveryNode discover } /** - * Create a new {@link NodesResponse} (multi-node response). + * Create a new {@link NodesResponse}. This method is executed on {@link #finalExecutor}. * - * @param request The associated request. + * @param request The request whose response we are constructing. {@link TransportNodesAction} may have already released all its + * references to this object before calling this method, so it's up to individual implementations to retain their own + * reference to the request if still needed here. * @param responses All successful node-level responses. * @param failures All node-level failures. * @return Never {@code null}. @@ -166,7 +196,11 @@ private Writeable.Reader nodeResponseReader(DiscoveryNode discover /** * Create a new {@link NodesResponse}, possibly asynchronously. The default implementation is synchronous and calls - * {@link #newResponse(BaseNodesRequest, List, List)} + * {@link #newResponse(BaseNodesRequest, List, List)}. This method is executed on {@link #finalExecutor}. + * + * @param request The request whose response we are constructing. {@link TransportNodesAction} may have already released all its + * references to this object before calling this method, so it's up to individual implementations to retain their own + * reference to the request if still needed here. */ protected void newResponseAsync( Task task, @@ -175,7 +209,7 @@ protected void newResponseAsync( List failures, ActionListener listener ) { - ActionListener.completeWith(listener, () -> newResponse(request, responses, failures)); + ActionListener.run(listener, l -> ActionListener.respondAndRelease(l, newResponse(request, responses, failures))); } protected abstract NodeRequest newNodeRequest(NodesRequest request); diff --git a/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java b/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java index 9e3bed8cef09a..a8365a62c9e58 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java +++ b/server/src/main/java/org/elasticsearch/client/internal/ClusterAdminClient.java @@ -19,7 +19,6 @@ import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequestBuilder; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; -import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsRequestBuilder; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestBuilder; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -85,10 +84,6 @@ import org.elasticsearch.action.admin.cluster.storedscripts.GetStoredScriptResponse; import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptRequest; import org.elasticsearch.action.admin.cluster.storedscripts.PutStoredScriptRequestBuilder; -import org.elasticsearch.action.admin.indices.dangling.delete.DeleteDanglingIndexRequest; -import org.elasticsearch.action.admin.indices.dangling.import_index.ImportDanglingIndexRequest; -import org.elasticsearch.action.admin.indices.dangling.list.ListDanglingIndicesRequest; -import org.elasticsearch.action.admin.indices.dangling.list.ListDanglingIndicesResponse; import org.elasticsearch.action.ingest.DeletePipelineRequest; import org.elasticsearch.action.ingest.DeletePipelineRequestBuilder; import org.elasticsearch.action.ingest.GetPipelineRequest; @@ -168,11 +163,6 @@ public interface ClusterAdminClient extends ElasticsearchClient { */ ClusterUpdateSettingsRequestBuilder prepareUpdateSettings(); - /** - * Re initialize each cluster node and pass them the secret store password. - */ - NodesReloadSecureSettingsRequestBuilder prepareReloadSecureSettings(); - /** * Reroutes allocation of shards. Advance API. */ @@ -552,34 +542,4 @@ public interface ClusterAdminClient extends ElasticsearchClient { * Get a script from the cluster state */ void getStoredScript(GetStoredScriptRequest request, ActionListener listener); - - /** - * List dangling indices on all nodes. - */ - void listDanglingIndices(ListDanglingIndicesRequest request, ActionListener listener); - - /** - * List dangling indices on all nodes. - */ - ActionFuture listDanglingIndices(ListDanglingIndicesRequest request); - - /** - * Restore specified dangling indices. - */ - void importDanglingIndex(ImportDanglingIndexRequest request, ActionListener listener); - - /** - * Restore specified dangling indices. - */ - ActionFuture importDanglingIndex(ImportDanglingIndexRequest request); - - /** - * Delete specified dangling indices. - */ - void deleteDanglingIndex(DeleteDanglingIndexRequest request, ActionListener listener); - - /** - * Delete specified dangling indices. - */ - ActionFuture deleteDanglingIndex(DeleteDanglingIndexRequest request); } diff --git a/server/src/main/java/org/elasticsearch/client/internal/IndicesAdminClient.java b/server/src/main/java/org/elasticsearch/client/internal/IndicesAdminClient.java index b16eba26f3594..9ba26b95244ab 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/IndicesAdminClient.java +++ b/server/src/main/java/org/elasticsearch/client/internal/IndicesAdminClient.java @@ -69,9 +69,6 @@ import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequestBuilder; -import org.elasticsearch.action.admin.indices.shards.IndicesShardStoreRequestBuilder; -import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest; -import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeRequestBuilder; import org.elasticsearch.action.admin.indices.shrink.ResizeResponse; @@ -150,27 +147,6 @@ public interface IndicesAdminClient extends ElasticsearchClient { */ IndicesSegmentsRequestBuilder prepareSegments(String... indices); - /** - * The shard stores info of one or more indices. - * - * @param request The indices shard stores request - * @return The result future - */ - ActionFuture shardStores(IndicesShardStoresRequest request); - - /** - * The shard stores info of one or more indices. - * - * @param request The indices shard stores request - * @param listener A listener to be notified with a result - */ - void shardStores(IndicesShardStoresRequest request, ActionListener listener); - - /** - * The shard stores info of one or more indices. - */ - IndicesShardStoreRequestBuilder prepareShardStores(String... indices); - /** * Creates an index using an explicit request allowing to specify the settings of the index. * diff --git a/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java b/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java index 075d1a4bb1e66..67f9daa94fc2e 100644 --- a/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java +++ b/server/src/main/java/org/elasticsearch/client/internal/support/AbstractClient.java @@ -28,7 +28,6 @@ import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequestBuilder; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.admin.cluster.node.info.TransportNodesInfoAction; -import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsRequestBuilder; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestBuilder; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -139,13 +138,6 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; -import org.elasticsearch.action.admin.indices.dangling.delete.DeleteDanglingIndexRequest; -import org.elasticsearch.action.admin.indices.dangling.delete.TransportDeleteDanglingIndexAction; -import org.elasticsearch.action.admin.indices.dangling.import_index.ImportDanglingIndexRequest; -import org.elasticsearch.action.admin.indices.dangling.import_index.TransportImportDanglingIndexAction; -import org.elasticsearch.action.admin.indices.dangling.list.ListDanglingIndicesAction; -import org.elasticsearch.action.admin.indices.dangling.list.ListDanglingIndicesRequest; -import org.elasticsearch.action.admin.indices.dangling.list.ListDanglingIndicesResponse; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequestBuilder; import org.elasticsearch.action.admin.indices.delete.TransportDeleteIndexAction; @@ -204,10 +196,6 @@ import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequestBuilder; -import org.elasticsearch.action.admin.indices.shards.IndicesShardStoreRequestBuilder; -import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresAction; -import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest; -import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; import org.elasticsearch.action.admin.indices.shrink.ResizeAction; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeRequestBuilder; @@ -737,11 +725,6 @@ public ClusterUpdateSettingsRequestBuilder prepareUpdateSettings() { return new ClusterUpdateSettingsRequestBuilder(this); } - @Override - public NodesReloadSecureSettingsRequestBuilder prepareReloadSecureSettings() { - return new NodesReloadSecureSettingsRequestBuilder(this); - } - @Override public ActionFuture nodesInfo(final NodesInfoRequest request) { return execute(TransportNodesInfoAction.TYPE, request); @@ -1047,36 +1030,6 @@ public void getStoredScript(final GetStoredScriptRequest request, final ActionLi execute(GetStoredScriptAction.INSTANCE, request, listener); } - @Override - public ActionFuture listDanglingIndices(ListDanglingIndicesRequest request) { - return execute(ListDanglingIndicesAction.INSTANCE, request); - } - - @Override - public void listDanglingIndices(ListDanglingIndicesRequest request, ActionListener listener) { - execute(ListDanglingIndicesAction.INSTANCE, request, listener); - } - - @Override - public ActionFuture importDanglingIndex(ImportDanglingIndexRequest request) { - return execute(TransportImportDanglingIndexAction.TYPE, request); - } - - @Override - public void importDanglingIndex(ImportDanglingIndexRequest request, ActionListener listener) { - execute(TransportImportDanglingIndexAction.TYPE, request, listener); - } - - @Override - public ActionFuture deleteDanglingIndex(DeleteDanglingIndexRequest request) { - return execute(TransportDeleteDanglingIndexAction.TYPE, request); - } - - @Override - public void deleteDanglingIndex(DeleteDanglingIndexRequest request, ActionListener listener) { - execute(TransportDeleteDanglingIndexAction.TYPE, request, listener); - } - @Override public GetStoredScriptRequestBuilder prepareGetStoredScript(String id) { return new GetStoredScriptRequestBuilder(this).setId(id); @@ -1399,21 +1352,6 @@ public IndicesSegmentsRequestBuilder prepareSegments(String... indices) { return new IndicesSegmentsRequestBuilder(this).setIndices(indices); } - @Override - public ActionFuture shardStores(IndicesShardStoresRequest request) { - return execute(IndicesShardStoresAction.INSTANCE, request); - } - - @Override - public void shardStores(IndicesShardStoresRequest request, ActionListener listener) { - execute(IndicesShardStoresAction.INSTANCE, request, listener); - } - - @Override - public IndicesShardStoreRequestBuilder prepareShardStores(String... indices) { - return new IndicesShardStoreRequestBuilder(this, indices); - } - @Override public ActionFuture updateSettings(final UpdateSettingsRequest request) { return execute(UpdateSettingsAction.INSTANCE, request); diff --git a/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java b/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java index 1744bcc91b834..26c453d419f4c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java +++ b/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java @@ -26,7 +26,6 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings; import org.elasticsearch.cluster.service.ClusterService; @@ -97,7 +96,6 @@ public class InternalClusterInfoService implements ClusterInfoService, ClusterSt private final Object mutex = new Object(); private final List> nextRefreshListeners = new ArrayList<>(); - private final ClusterService clusterService; private AsyncRefresh currentRefresh; private RefreshScheduler refreshScheduler; @@ -108,7 +106,6 @@ public InternalClusterInfoService(Settings settings, ClusterService clusterServi this.indicesStatsSummary = IndicesStatsSummary.EMPTY; this.threadPool = threadPool; this.client = client; - this.clusterService = clusterService; this.updateFrequency = INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.get(settings); this.fetchTimeout = INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.get(settings); this.enabled = DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING.get(settings); @@ -250,7 +247,6 @@ public void onResponse(IndicesStatsResponse indicesStatsResponse) { final Map reservedSpaceBuilders = new HashMap<>(); buildShardLevelInfo( - clusterService.state().routingTable(), adjustShardStats(stats), shardSizeByIdentifierBuilder, shardDataSetSizeBuilder, @@ -445,7 +441,6 @@ public void addListener(Consumer clusterInfoConsumer) { } static void buildShardLevelInfo( - RoutingTable routingTable, ShardStats[] stats, Map shardSizes, Map shardDataSetSizeBuilder, @@ -453,7 +448,7 @@ static void buildShardLevelInfo( Map reservedSpaceByShard ) { for (ShardStats s : stats) { - final ShardRouting shardRouting = routingTable.deduplicate(s.getShardRouting()); + final ShardRouting shardRouting = s.getShardRouting(); dataPathByShard.put(ClusterInfo.NodeAndShard.from(shardRouting), s.getDataPath()); final StoreStats storeStats = s.getStats().getStore(); @@ -462,7 +457,7 @@ static void buildShardLevelInfo( } final long size = storeStats.sizeInBytes(); final long dataSetSize = storeStats.totalDataSetSizeInBytes(); - final long reserved = storeStats.getReservedSize().getBytes(); + final long reserved = storeStats.reservedSizeInBytes(); final String shardIdentifier = ClusterInfo.shardIdentifierFromRouting(shardRouting); logger.trace("shard: {} size: {} reserved: {}", shardIdentifier, size, reserved); diff --git a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java index 470f175deb247..987fe0c4f5ed7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java @@ -11,6 +11,7 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.ClusterState.Custom; +import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; @@ -18,6 +19,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.Maps; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.SuppressForbidden; @@ -41,6 +43,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -50,12 +53,14 @@ import java.util.Set; import java.util.stream.Stream; +import static org.elasticsearch.TransportVersions.SNAPSHOTS_IN_PROGRESS_TRACKING_REMOVING_NODES_ADDED; + /** * Meta data about snapshots that are currently executing */ public class SnapshotsInProgress extends AbstractNamedDiffable implements Custom { - public static final SnapshotsInProgress EMPTY = new SnapshotsInProgress(Map.of()); + public static final SnapshotsInProgress EMPTY = new SnapshotsInProgress(Map.of(), Set.of()); public static final String TYPE = "snapshots"; @@ -64,12 +69,33 @@ public class SnapshotsInProgress extends AbstractNamedDiffable implement // keyed by repository name private final Map entries; + /** + * IDs of nodes which are marked for removal, or which were previously marked for removal and still have running shard snapshots. + */ + // When a node is marked for removal it pauses all its shard snapshots as promptly as possible. When each shard snapshot pauses it + // enters state PAUSED_FOR_NODE_REMOVAL to allow the shard to move to a different node where its snapshot can resume. However, if the + // removal marker is deleted before the node shuts down then we need to make sure to resume the snapshots of any remaining shards, which + // we do by moving all those PAUSED_FOR_NODE_REMOVAL shards back to state INIT. The problem is that the data node needs to be able to + // distinguish an INIT shard whose snapshot was successfully paused and now needs to be resumed from an INIT shard whose move to state + // PAUSED_FOR_NODE_REMOVAL has not yet been processed on the master: the latter kind of shard will move back to PAUSED_FOR_NODE_REMOVAL + // in a subsequent update and so shouldn't be resumed. The solution is to wait for all the shards on the previously-shutting-down node + // to finish pausing before resuming any of them. We do this by tracking the nodes in this field, avoiding moving any shards back to + // state INIT while the node appears in this set and, conversely, we only remove nodes from this set when none of their shards are in + // INIT state. + private final Set nodesIdsForRemoval; + public static SnapshotsInProgress get(ClusterState state) { return state.custom(TYPE, EMPTY); } public SnapshotsInProgress(StreamInput in) throws IOException { - this(collectByRepo(in)); + this(collectByRepo(in), readNodeIdsForRemoval(in)); + } + + private static Set readNodeIdsForRemoval(StreamInput in) throws IOException { + return in.getTransportVersion().onOrAfter(SNAPSHOTS_IN_PROGRESS_TRACKING_REMOVING_NODES_ADDED) + ? in.readCollectionAsImmutableSet(StreamInput::readString) + : Set.of(); } private static Map collectByRepo(StreamInput in) throws IOException { @@ -89,8 +115,9 @@ private static Map collectByRepo(StreamInput in) throws IOExcept return res; } - private SnapshotsInProgress(Map entries) { + private SnapshotsInProgress(Map entries, Set nodesIdsForRemoval) { this.entries = Map.copyOf(entries); + this.nodesIdsForRemoval = nodesIdsForRemoval; assert assertConsistentEntries(this.entries); } @@ -107,7 +134,7 @@ public SnapshotsInProgress withUpdatedEntriesForRepo(String repository, List toXContentChunked(ToXContent.Params ignored) { - return Iterators.concat(ChunkedToXContentHelper.startArray("snapshots"), asStream().iterator(), ChunkedToXContentHelper.endArray()); + return Iterators.concat( + ChunkedToXContentHelper.startArray("snapshots"), + asStream().iterator(), + ChunkedToXContentHelper.endArray(), + ChunkedToXContentHelper.startArray("node_ids_for_removal"), + Iterators.map(nodesIdsForRemoval.iterator(), s -> (builder, params) -> builder.value(s)), + ChunkedToXContentHelper.endArray() + ); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - return entries.equals(((SnapshotsInProgress) o).entries); + final var other = (SnapshotsInProgress) o; + return nodesIdsForRemoval.equals(other.nodesIdsForRemoval) && entries.equals(other.entries); } @Override public int hashCode() { - return entries.hashCode(); + return Objects.hash(entries, nodesIdsForRemoval); } @Override public String toString() { - StringBuilder builder = new StringBuilder("SnapshotsInProgress["); + StringBuilder builder = new StringBuilder("SnapshotsInProgress[entries=["); final Iterator entryList = asStream().iterator(); boolean firstEntry = true; while (entryList.hasNext()) { @@ -250,7 +290,7 @@ public String toString() { builder.append(entryList.next().snapshot().getSnapshotId().getName()); firstEntry = false; } - return builder.append("]").toString(); + return builder.append("],nodeIdsForRemoval=").append(nodesIdsForRemoval).append("]").toString(); } /** @@ -324,6 +364,10 @@ public static boolean completed(Collection shards) { return true; } + public boolean isNodeIdForRemoval(String nodeId) { + return nodeId != null && nodesIdsForRemoval.contains(nodeId); + } + private static boolean hasFailures(Map clones) { for (ShardSnapshotStatus value : clones.values()) { if (value.state().failed()) { @@ -384,6 +428,76 @@ private static boolean assertShardStateConsistent( return true; } + /** + * Adds any new node IDs to {@link #nodesIdsForRemoval}, and removes any node IDs that are no longer marked for shutdown if they have no + * running shard snapshots. + */ + public SnapshotsInProgress withUpdatedNodeIdsForRemoval(ClusterState clusterState) { + assert clusterState.getMinTransportVersion().onOrAfter(TransportVersions.SNAPSHOTS_IN_PROGRESS_TRACKING_REMOVING_NODES_ADDED); + + final var updatedNodeIdsForRemoval = new HashSet<>(nodesIdsForRemoval); + + final var nodeIdsMarkedForRemoval = getNodesIdsMarkedForRemoval(clusterState); + + // add any nodes newly marked for removal + updatedNodeIdsForRemoval.addAll(nodeIdsMarkedForRemoval); + + // remove any nodes which are no longer marked for shutdown if they have no running shard snapshots + updatedNodeIdsForRemoval.removeAll(getObsoleteNodeIdsForRemoval(nodeIdsMarkedForRemoval)); + + if (updatedNodeIdsForRemoval.equals(nodesIdsForRemoval)) { + return this; + } else { + return new SnapshotsInProgress(entries, Collections.unmodifiableSet(updatedNodeIdsForRemoval)); + } + } + + private static Set getNodesIdsMarkedForRemoval(ClusterState clusterState) { + final var nodesShutdownMetadata = clusterState.metadata().nodeShutdowns(); + final var shutdownMetadataCount = nodesShutdownMetadata.getAllNodeIds().size(); + if (shutdownMetadataCount == 0) { + return Set.of(); + } + + final Set result = Sets.newHashSetWithExpectedSize(shutdownMetadataCount); + for (final var entry : nodesShutdownMetadata.getAll().entrySet()) { + if (entry.getValue().getType() != SingleNodeShutdownMetadata.Type.RESTART) { + // Only pause the snapshot when the node is being removed (to let shards vacate) and not when it is restarting in place. If + // it is restarting and there are replicas to promote then we need #71333 to move the shard snapshot over; if there are no + // replicas then we do not expect the restart to be graceful so a PARTIAL or FAILED snapshot is ok. + result.add(entry.getKey()); + } + } + return result; + } + + private Set getObsoleteNodeIdsForRemoval(Set latestNodeIdsMarkedForRemoval) { + final var obsoleteNodeIdsForRemoval = new HashSet<>(nodesIdsForRemoval); + obsoleteNodeIdsForRemoval.removeIf(latestNodeIdsMarkedForRemoval::contains); + if (obsoleteNodeIdsForRemoval.isEmpty()) { + return Set.of(); + } + for (final var byRepo : entries.values()) { + for (final var entry : byRepo.entries()) { + if (entry.state() == State.STARTED && entry.hasShardsInInitState()) { + for (final var shardSnapshotStatus : entry.shards().values()) { + if (shardSnapshotStatus.state() == ShardState.INIT) { + obsoleteNodeIdsForRemoval.remove(shardSnapshotStatus.nodeId()); + if (obsoleteNodeIdsForRemoval.isEmpty()) { + return Set.of(); + } + } + } + } + } + } + return obsoleteNodeIdsForRemoval; + } + + public boolean nodeIdsForRemovalChanged(SnapshotsInProgress other) { + return nodesIdsForRemoval.equals(other.nodesIdsForRemoval) == false; + } + public enum ShardState { INIT((byte) 0, false, false), SUCCESS((byte) 2, true, false), @@ -397,7 +511,12 @@ public enum ShardState { /** * Shard snapshot is waiting for another shard snapshot for the same shard and to the same repository to finish. */ - QUEUED((byte) 7, false, false); + QUEUED((byte) 7, false, false), + /** + * Primary shard is assigned to a node which is marked for removal from the cluster (or which was previously marked for removal and + * we're still waiting for its other shards to pause). + */ + PAUSED_FOR_NODE_REMOVAL((byte) 8, false, false); private final byte value; @@ -428,6 +547,7 @@ public static ShardState fromValue(byte value) { case 5 -> MISSING; case 6 -> WAITING; case 7 -> QUEUED; + case 8 -> PAUSED_FOR_NODE_REMOVAL; default -> throw new IllegalArgumentException("No shard snapshot state for value [" + value + "]"); }; } @@ -539,7 +659,8 @@ public ShardSnapshotStatus( private boolean assertConsistent() { // If the state is failed we have to have a reason for this failure assert state.failed() == false || reason != null; - assert (state != ShardState.INIT && state != ShardState.WAITING) || nodeId != null : "Null node id for state [" + state + "]"; + assert (state != ShardState.INIT && state != ShardState.WAITING && state != ShardState.PAUSED_FOR_NODE_REMOVAL) + || nodeId != null : "Null node id for state [" + state + "]"; assert state != ShardState.QUEUED || (nodeId == null && generation == null && reason == null) : "Found unexpected non-null values for queued state shard nodeId[" + nodeId + "][" + generation + "][" + reason + "]"; assert state == ShardState.SUCCESS || shardSnapshotResult == null; @@ -584,10 +705,14 @@ public ShardSnapshotResult shardSnapshotResult() { /** * Checks if this shard snapshot is actively executing. * A shard is defined as actively executing if it either is in a state that may write to the repository - * ({@link ShardState#INIT} or {@link ShardState#ABORTED}) or about to write to it in state {@link ShardState#WAITING}. + * ({@link ShardState#INIT} or {@link ShardState#ABORTED}) or about to write to it in state {@link ShardState#WAITING} or + * {@link ShardState#PAUSED_FOR_NODE_REMOVAL}. */ public boolean isActive() { - return state == ShardState.INIT || state == ShardState.ABORTED || state == ShardState.WAITING; + return switch (state) { + case INIT, ABORTED, WAITING, PAUSED_FOR_NODE_REMOVAL -> true; + case SUCCESS, FAILED, MISSING, QUEUED -> false; + }; } @Override @@ -824,6 +949,9 @@ private static boolean assertShardsConsistent( if ((state == State.INIT || state == State.ABORTED) && shards.isEmpty()) { return true; } + if (hasInitStateShards) { + assert state == State.STARTED : "shouldn't have INIT-state shards in state " + state; + } final Set indexNames = indices.keySet(); final Set indexNamesInShards = new HashSet<>(); shards.entrySet().forEach(s -> { @@ -1535,9 +1663,11 @@ private static final class SnapshotInProgressDiff implements NamedDiff { private final SnapshotsInProgress after; private final DiffableUtils.MapDiff> mapDiff; + private final Set nodeIdsForRemoval; SnapshotInProgressDiff(SnapshotsInProgress before, SnapshotsInProgress after) { this.mapDiff = DiffableUtils.diff(before.entries, after.entries, DiffableUtils.getStringKeySerializer()); + this.nodeIdsForRemoval = after.nodesIdsForRemoval; this.after = after; } @@ -1551,12 +1681,14 @@ private static final class SnapshotInProgressDiff implements NamedDiff { DiffableUtils.readJdkMapDiff(i, DiffableUtils.getStringKeySerializer(), ByRepo.INT_DIFF_VALUE_SERIALIZER) ) ); + this.nodeIdsForRemoval = readNodeIdsForRemoval(in); this.after = null; } @Override public SnapshotsInProgress apply(Custom part) { - return new SnapshotsInProgress(mapDiff.apply(((SnapshotsInProgress) part).entries)); + final var snapshotsInProgress = (SnapshotsInProgress) part; + return new SnapshotsInProgress(mapDiff.apply(snapshotsInProgress.entries), this.nodeIdsForRemoval); } @Override @@ -1577,6 +1709,11 @@ public void writeTo(StreamOutput out) throws IOException { } else { new SimpleDiffable.CompleteDiff<>(after).writeTo(out); } + if (out.getTransportVersion().onOrAfter(SNAPSHOTS_IN_PROGRESS_TRACKING_REMOVING_NODES_ADDED)) { + out.writeStringCollection(nodeIdsForRemoval); + } else { + assert nodeIdsForRemoval.isEmpty() : nodeIdsForRemoval; + } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java index cc0ff0b26f4d7..6147712a5d70a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java @@ -24,7 +24,6 @@ import org.elasticsearch.xcontent.XContentParserConfiguration; import java.io.IOException; -import java.io.InputStream; import java.util.Optional; import java.util.function.Function; @@ -142,14 +141,11 @@ public static void validateAliasFilter( assert searchExecutionContext != null; try ( - InputStream inputStream = filter.streamInput(); - XContentParser parser = XContentFactory.xContentType(inputStream) - .xContent() - .createParser( - XContentParserConfiguration.EMPTY.withRegistry(xContentRegistry) - .withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), - filter.streamInput() - ) + XContentParser parser = XContentHelper.createParserNotCompressed( + XContentParserConfiguration.EMPTY.withRegistry(xContentRegistry).withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), + filter, + XContentHelper.xContentType(filter) + ) ) { validateAliasFilter(parser, searchExecutionContext); } catch (Exception e) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java index 34d8515d2dfdd..35f66f2848f5f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.util.FeatureFlag; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; @@ -38,7 +39,6 @@ import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; -import org.elasticsearch.xcontent.XContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; @@ -1129,8 +1129,7 @@ private static Instant getTimeStampFromRaw(Object rawTimestamp) { } private static Instant getTimestampFromParser(BytesReference source, XContentType xContentType) { - XContent xContent = xContentType.xContent(); - try (XContentParser parser = xContent.createParser(TS_EXTRACT_CONFIG, source.streamInput())) { + try (XContentParser parser = XContentHelper.createParserNotCompressed(TS_EXTRACT_CONFIG, source, xContentType)) { ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.nextToken(), parser); return switch (parser.nextToken()) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java index 8d7ce0525e943..83a5d99c8f348 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java @@ -48,7 +48,7 @@ public class DataStreamLifecycle implements SimpleDiffable, ToXContentObject { // Versions over the wire - public static final TransportVersion ADDED_ENABLED_FLAG_VERSION = TransportVersions.V_8_500_057; + public static final TransportVersion ADDED_ENABLED_FLAG_VERSION = TransportVersions.V_8_500_061; public static final String DATA_STREAMS_LIFECYCLE_ONLY_SETTING_NAME = "data_streams.lifecycle_only.mode"; @@ -190,10 +190,8 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { out.writeOptionalWriteable(dataRetention); } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_040)) { - out.writeOptionalWriteable(downsampling); - } if (out.getTransportVersion().onOrAfter(ADDED_ENABLED_FLAG_VERSION)) { + out.writeOptionalWriteable(downsampling); out.writeBoolean(enabled); } } @@ -204,14 +202,11 @@ public DataStreamLifecycle(StreamInput in) throws IOException { } else { dataRetention = null; } - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_040)) { - downsampling = in.readOptionalWriteable(Downsampling::read); - } else { - downsampling = null; - } if (in.getTransportVersion().onOrAfter(ADDED_ENABLED_FLAG_VERSION)) { + downsampling = in.readOptionalWriteable(Downsampling::read); enabled = in.readBoolean(); } else { + downsampling = null; enabled = true; } } diff --git a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java index 43f117acbd9fe..e77a7b27e1a2c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java +++ b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java @@ -337,7 +337,7 @@ public DiscoveryNode(StreamInput in) throws IOException { } } this.roles = Collections.unmodifiableSortedSet(roles); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_040)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { versionInfo = new VersionInformation(Version.readVersion(in), IndexVersion.readVersion(in), IndexVersion.readVersion(in)); } else { versionInfo = inferVersionInformation(Version.readVersion(in)); @@ -374,7 +374,7 @@ public void writeTo(StreamOutput out) throws IOException { o.writeString(role.roleNameAbbreviation()); o.writeBoolean(role.canContainData()); }); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_040)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { Version.writeVersion(versionInfo.nodeVersion(), out); IndexVersion.writeVersion(versionInfo.minIndexVersion(), out); IndexVersion.writeVersion(versionInfo.maxIndexVersion(), out); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java index cd05ca3d523d8..9003e7720c747 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRouting.java @@ -281,16 +281,14 @@ public Builder builder() { private Builder hashSource(XContentType sourceType, BytesReference source) { Builder b = builder(); - try { - try (XContentParser parser = sourceType.xContent().createParser(parserConfig, source.streamInput())) { - parser.nextToken(); // Move to first token - if (parser.currentToken() == null) { - throw new IllegalArgumentException("Error extracting routing: source didn't contain any routing fields"); - } - parser.nextToken(); - b.extractObject(null, parser); - ensureExpectedToken(null, parser.nextToken(), parser); + try (XContentParser parser = sourceType.xContent().createParser(parserConfig, source.streamInput())) { + parser.nextToken(); // Move to first token + if (parser.currentToken() == null) { + throw new IllegalArgumentException("Error extracting routing: source didn't contain any routing fields"); } + parser.nextToken(); + b.extractObject(null, parser); + ensureExpectedToken(null, parser.nextToken(), parser); } catch (IOException | ParsingException e) { throw new IllegalArgumentException("Error extracting routing: " + e.getMessage(), e); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java index 986a6bd0385e8..723d65fbc2a3f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java @@ -148,33 +148,6 @@ public IndexShardRoutingTable shardRoutingTable(ShardId shardId) { return shard; } - /** - * Try to deduplicate the given shard routing with an equal instance found in this routing table. This is used by the logic of the - * {@link org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider} and - * {@link org.elasticsearch.cluster.InternalClusterInfoService} to deduplicate instances created by a master node and those read from - * the network to speed up the use of {@link ShardRouting} as a map key in {@link org.elasticsearch.cluster.ClusterInfo#getDataPath}. - * - * @param shardRouting shard routing to deduplicate - * @return deduplicated shard routing from this routing table if an equivalent shard routing was found or the given instance otherwise - */ - public ShardRouting deduplicate(ShardRouting shardRouting) { - final IndexRoutingTable indexShardRoutingTable = indicesRouting.get(shardRouting.index().getName()); - if (indexShardRoutingTable == null) { - return shardRouting; - } - final IndexShardRoutingTable shardRoutingTable = indexShardRoutingTable.shard(shardRouting.id()); - if (shardRoutingTable == null) { - return shardRouting; - } - for (int i = 0; i < shardRoutingTable.size(); i++) { - ShardRouting found = shardRoutingTable.shard(i); - if (shardRouting.equals(found)) { - return found; - } - } - return shardRouting; - } - @Nullable public ShardRouting getByAllocationId(ShardId shardId, String allocationId) { final IndexRoutingTable indexRoutingTable = index(shardId.getIndex()); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java index ed6b2af2fb55d..4e674648bc3a4 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java @@ -109,7 +109,7 @@ public RoutingAllocation( AllocationDeciders deciders, @Nullable RoutingNodes routingNodes, ClusterState clusterState, - @Nullable ClusterInfo clusterInfo, + ClusterInfo clusterInfo, SnapshotShardSizeInfo shardSizeInfo, long currentNanoTime ) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 64f88ac1e2417..56d0966e0594f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -371,6 +371,11 @@ private static long getTotalDiskUsageInBytes(ClusterInfo clusterInfo, Metadata m // Visible for testing static long getIndexDiskUsageInBytes(ClusterInfo clusterInfo, IndexMetadata indexMetadata) { + if (indexMetadata.ignoreDiskWatermarks()) { + // disk watermarks are ignored for partial searchable snapshots + // and is equivalent to indexMetadata.isPartialSearchableSnapshot() + return 0; + } final long forecastedShardSize = indexMetadata.getForecastedShardSizeInBytes().orElse(-1L); long totalSizeInBytes = 0; int shardCount = 0; @@ -394,6 +399,11 @@ static long getIndexDiskUsageInBytes(ClusterInfo clusterInfo, IndexMetadata inde } private static long getShardDiskUsageInBytes(ShardRouting shardRouting, IndexMetadata indexMetadata, ClusterInfo clusterInfo) { + if (indexMetadata.ignoreDiskWatermarks()) { + // disk watermarks are ignored for partial searchable snapshots + // and is equivalent to indexMetadata.isPartialSearchableSnapshot() + return 0; + } return Math.max(indexMetadata.getForecastedShardSizeInBytes().orElse(0L), clusterInfo.getShardSize(shardRouting, 0L)); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java index b562ba8e9482d..ac88374e74b34 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java @@ -9,6 +9,7 @@ package org.elasticsearch.cluster.routing.allocation.decider; import org.elasticsearch.cluster.SnapshotsInProgress; +import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; @@ -101,6 +102,18 @@ private static Decision canMove(ShardRouting shardRouting, RoutingAllocation all continue; } + if (shardSnapshotStatus.state() == SnapshotsInProgress.ShardState.PAUSED_FOR_NODE_REMOVAL) { + // this shard snapshot is paused pending the removal of its assigned node + final var nodeShutdown = allocation.metadata().nodeShutdowns().get(shardRouting.currentNodeId()); + if (nodeShutdown != null && nodeShutdown.getType() != SingleNodeShutdownMetadata.Type.RESTART) { + // NB we check metadata().nodeShutdowns() too because if the node was marked for removal and then that mark was + // removed then the shard can still be PAUSED_FOR_NODE_REMOVAL while there are other shards on the node which + // haven't finished pausing yet. In that case the shard is about to go back into INIT state again, so we should keep + // it where it is. + continue; + } + } + return allocation.decision( Decision.THROTTLE, NAME, diff --git a/server/src/main/java/org/elasticsearch/common/collect/ImmutableOpenMap.java b/server/src/main/java/org/elasticsearch/common/collect/ImmutableOpenMap.java index 9f852f01397da..33d8fbf99f31f 100644 --- a/server/src/main/java/org/elasticsearch/common/collect/ImmutableOpenMap.java +++ b/server/src/main/java/org/elasticsearch/common/collect/ImmutableOpenMap.java @@ -10,9 +10,9 @@ import com.carrotsearch.hppc.ObjectCollection; import com.carrotsearch.hppc.ObjectObjectHashMap; -import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.carrotsearch.hppc.procedures.ObjectObjectProcedure; +import com.carrotsearch.hppc.procedures.ObjectProcedure; import org.elasticsearch.common.util.Maps; @@ -84,18 +84,9 @@ public boolean containsKey(Object key) { } @Override + @SuppressWarnings("unchecked") public boolean containsValue(Object value) { - for (ObjectCursor cursor : map.values()) { - if (Objects.equals(cursor.value, value)) { - return true; - } - } - return false; - } - - @Override - public VType put(KType key, VType value) { - throw new UnsupportedOperationException("modification is not supported"); + return map.values().contains((VType) value); } @Override @@ -103,16 +94,6 @@ public VType remove(Object key) { throw new UnsupportedOperationException("modification is not supported"); } - @Override - public void putAll(Map m) { - throw new UnsupportedOperationException("modification is not supported"); - } - - @Override - public void clear() { - throw new UnsupportedOperationException("modification is not supported"); - } - @Override public int size() { return map.size(); @@ -146,35 +127,7 @@ public int hashCode() { return super.hashCode(); } - private static final class ConversionIterator implements Iterator> { - - private final Iterator> original; - - ConversionIterator(Iterator> original) { - this.original = original; - } - - @Override - public boolean hasNext() { - return original.hasNext(); - } - - @Override - public Map.Entry next() { - final ObjectObjectCursor obj = original.next(); - if (obj == null) { - return null; - } - return new Maps.ImmutableEntry<>(obj.key, obj.value); - } - - @Override - public void remove() { - throw new UnsupportedOperationException("removal is unsupported"); - } - } - - private static final class EntrySet extends AbstractSet> { + private static class EntrySet extends AbstractSet> { private final ObjectObjectHashMap map; private EntrySet(ObjectObjectHashMap map) { @@ -187,13 +140,23 @@ public int size() { } @Override - public void clear() { - throw new UnsupportedOperationException("removal is unsupported"); + public boolean isEmpty() { + return map.isEmpty(); } @Override public Iterator> iterator() { - return new ConversionIterator<>(map.iterator()); + return Iterators.map(map.iterator(), c -> new Maps.ImmutableEntry<>(c.key, c.value)); + } + + @Override + public Spliterator> spliterator() { + return Spliterators.spliterator(iterator(), size(), Spliterator.IMMUTABLE); + } + + @Override + public void forEach(Consumer> action) { + map.forEach((Consumer>) c -> action.accept(new Maps.ImmutableEntry<>(c.key, c.value))); } @SuppressWarnings("unchecked") @@ -204,70 +167,87 @@ public boolean contains(Object o) { } Map.Entry e = (Map.Entry) o; Object key = e.getKey(); - if (map.containsKey((KType) key) == false) { + Object v = map.get((KType) key); + if (v == null && map.containsKey((KType) key) == false) { return false; } - Object val = map.get((KType) key); - return Objects.equals(val, e.getValue()); + return Objects.equals(v, e.getValue()); } @Override - public boolean remove(Object o) { - throw new UnsupportedOperationException("removal is not supported"); + public String toString() { + return map.toString(); + } + } + + private static class MapObjectCollection extends AbstractCollection { + private final ObjectCollection collection; + + private MapObjectCollection(ObjectCollection collection) { + this.collection = collection; } @Override - public Spliterator> spliterator() { - return Spliterators.spliterator(iterator(), size(), Spliterator.SIZED); + public int size() { + return collection.size(); } @Override - public void forEach(Consumer> action) { - map.forEach((Consumer>) ooCursor -> { - Maps.ImmutableEntry entry = new Maps.ImmutableEntry<>(ooCursor.key, ooCursor.value); - action.accept(entry); - }); + public boolean isEmpty() { + return collection.isEmpty(); } - } - private static final class KeySet extends AbstractSet { + @Override + public Iterator iterator() { + return Iterators.map(collection.iterator(), c -> c.value); + } + + @Override + public Spliterator spliterator() { + return Spliterators.spliterator(iterator(), size(), Spliterator.IMMUTABLE); + } - private final ObjectObjectHashMap.KeysContainer keys; + @Override + public void forEach(Consumer action) { + collection.forEach((ObjectProcedure) action::accept); + } - private KeySet(ObjectObjectHashMap.KeysContainer keys) { - this.keys = keys; + @Override + @SuppressWarnings("unchecked") + public boolean contains(Object o) { + return collection.contains((Type) o); } @Override - public Iterator iterator() { - final Iterator> iterator = keys.iterator(); - return new Iterator<>() { - @Override - public boolean hasNext() { - return iterator.hasNext(); - } - - @Override - public KType next() { - return iterator.next().value; - } - - @Override - public void remove() { - throw new UnsupportedOperationException(); - } - }; + public boolean equals(Object obj) { + return collection.equals(obj); } @Override - public int size() { - return keys.size(); + public int hashCode() { + return collection.hashCode(); + } + + @Override + public String toString() { + return collection.toString(); + } + + @Override + public Object[] toArray() { + return collection.toArray(); } @Override @SuppressWarnings("unchecked") - public boolean contains(Object o) { - return keys.contains((KType) o); + public T[] toArray(T[] a) { + return a.length == 0 ? (T[]) collection.toArray(a.getClass().getComponentType()) : super.toArray(a); + } + } + + private static class KeySet extends MapObjectCollection implements Set { + private KeySet(ObjectObjectHashMap.KeysContainer keys) { + super(keys); } }; @@ -278,17 +258,7 @@ public Set keySet() { @Override public Collection values() { - return new AbstractCollection() { - @Override - public Iterator iterator() { - return ImmutableOpenMap.iterator(map.values()); - } - - @Override - public int size() { - return map.size(); - } - }; + return new MapObjectCollection<>(map.values()); } @Override @@ -296,26 +266,6 @@ public void forEach(BiConsumer action) { map.forEach((ObjectObjectProcedure) action::accept); } - static Iterator iterator(ObjectCollection collection) { - final Iterator> iterator = collection.iterator(); - return new Iterator<>() { - @Override - public boolean hasNext() { - return iterator.hasNext(); - } - - @Override - public T next() { - return iterator.next().value; - } - - @Override - public void remove() { - throw new UnsupportedOperationException(); - } - }; - } - @Override public String toString() { return map.toString(); @@ -407,9 +357,7 @@ public ImmutableOpenMap build() { */ public Builder putAllFromMap(Map map) { maybeCloneMap(); - for (Map.Entry entry : map.entrySet()) { - this.mutableMap.put(entry.getKey(), entry.getValue()); - } + map.forEach(mutableMap::put); return this; } diff --git a/server/src/main/java/org/elasticsearch/common/collect/Iterators.java b/server/src/main/java/org/elasticsearch/common/collect/Iterators.java index d7c63edac2c94..4b5cef4bbbd45 100644 --- a/server/src/main/java/org/elasticsearch/common/collect/Iterators.java +++ b/server/src/main/java/org/elasticsearch/common/collect/Iterators.java @@ -10,11 +10,13 @@ import org.elasticsearch.core.Nullable; +import java.util.Arrays; import java.util.Collections; import java.util.Iterator; import java.util.NoSuchElementException; import java.util.Objects; import java.util.function.BiPredicate; +import java.util.function.Consumer; import java.util.function.Function; import java.util.function.IntFunction; import java.util.function.ToIntFunction; @@ -90,35 +92,19 @@ public T next() { } return value; } - } - - public static Iterator forArray(T[] array) { - return new ArrayIterator<>(array); - } - - private static final class ArrayIterator implements Iterator { - - private final T[] array; - private int index; - - private ArrayIterator(T[] array) { - this.array = Objects.requireNonNull(array, "Unable to iterate over a null array"); - } @Override - public boolean hasNext() { - return index < array.length; - } - - @Override - public T next() { - if (index >= array.length) { - throw new NoSuchElementException(); + public void forEachRemaining(Consumer action) { + while (index < iterators.length) { + iterators[index++].forEachRemaining(action); } - return array[index++]; } } + public static Iterator forArray(T[] array) { + return Arrays.asList(array).iterator(); + } + public static Iterator forRange(int lowerBoundInclusive, int upperBoundExclusive, IntFunction fn) { assert lowerBoundInclusive <= upperBoundExclusive : lowerBoundInclusive + " vs " + upperBoundExclusive; if (upperBoundExclusive <= lowerBoundInclusive) { @@ -183,6 +169,11 @@ public boolean hasNext() { public U next() { return fn.apply(input.next()); } + + @Override + public void forEachRemaining(Consumer action) { + input.forEachRemaining(t -> action.accept(fn.apply(t))); + } } public static Iterator flatMap(Iterator input, Function> fn) { diff --git a/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java b/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java index bda33e28fa315..5ebcca93889ff 100644 --- a/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java +++ b/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java @@ -164,11 +164,9 @@ public CompressedXContent(byte[] data) throws IOException { * @return compressed x-content normalized to not contain any whitespaces */ public static CompressedXContent fromJSON(String json) throws IOException { - return new CompressedXContent( - (ToXContentObject) (builder, params) -> builder.copyCurrentStructure( - JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, json) - ) - ); + try (var parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, json)) { + return new CompressedXContent((ToXContentObject) (builder, params) -> builder.copyCurrentStructure(parser)); + } } public CompressedXContent(String str) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeometryFormatterFactory.java b/server/src/main/java/org/elasticsearch/common/geo/GeometryFormatterFactory.java index 1f4ca454b9c8c..1201bab887861 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeometryFormatterFactory.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeometryFormatterFactory.java @@ -9,8 +9,10 @@ package org.elasticsearch.common.geo; import org.elasticsearch.geometry.Geometry; +import org.elasticsearch.geometry.utils.WellKnownBinary; import org.elasticsearch.geometry.utils.WellKnownText; +import java.nio.ByteOrder; import java.util.ArrayList; import java.util.List; import java.util.function.Function; @@ -22,6 +24,7 @@ public class GeometryFormatterFactory { public static final String GEOJSON = "geojson"; public static final String WKT = "wkt"; + public static final String WKB = "wkb"; /** * Returns a formatter by name @@ -38,6 +41,11 @@ public static Function, List> getFormatter(String name, Func geometries.forEach((shape) -> objects.add(WellKnownText.toWKT(toGeometry.apply(shape)))); return objects; }; + case WKB -> geometries -> { + final List objects = new ArrayList<>(geometries.size()); + geometries.forEach((shape) -> objects.add(WellKnownBinary.toWKB(toGeometry.apply(shape), ByteOrder.LITTLE_ENDIAN))); + return objects; + }; default -> throw new IllegalArgumentException("Unrecognized geometry format [" + name + "]."); }; } diff --git a/server/src/main/java/org/elasticsearch/common/util/BitArray.java b/server/src/main/java/org/elasticsearch/common/util/BitArray.java index 696e81b3beec9..96c00538f07d4 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BitArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BitArray.java @@ -129,6 +129,10 @@ public boolean get(long index) { return (bits.get(wordNum) & bitmask) != 0; } + public long size() { + return bits.size() * (long) Long.BYTES * Byte.SIZE; + } + private static long wordNum(long index) { return index >> 6; } diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java index 7bfba1ebdb176..7caf570806c0e 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java @@ -23,6 +23,7 @@ import org.elasticsearch.core.Tuple; import org.elasticsearch.http.HttpTransportSettings; import org.elasticsearch.tasks.Task; +import org.elasticsearch.telemetry.tracing.TraceContext; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -69,7 +70,7 @@ * * */ -public final class ThreadContext implements Writeable { +public final class ThreadContext implements Writeable, TraceContext { public static final String PREFIX = "request.headers"; public static final Setting DEFAULT_HEADERS_SETTING = Setting.groupSetting(PREFIX + ".", Property.NodeScope); diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java b/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java index 3bfe5078a3487..6723a24cfdf33 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java @@ -74,10 +74,26 @@ public static XContentParser createParser(XContentParserConfiguration config, By final XContentType contentType = XContentFactory.xContentType(compressedInput); return XContentFactory.xContent(contentType).createParser(config, compressedInput); } else { - return XContentFactory.xContent(xContentType(bytes)).createParser(config, bytes.streamInput()); + return createParserNotCompressed(config, bytes, xContentType(bytes)); } } + /** + * Same as {@link #createParser(XContentParserConfiguration, BytesReference, XContentType)} but only supports uncompressed + * {@code bytes}. + */ + public static XContentParser createParserNotCompressed( + XContentParserConfiguration config, + BytesReference bytes, + XContentType xContentType + ) throws IOException { + XContent xContent = xContentType.xContent(); + if (bytes.hasArray()) { + return xContent.createParser(config, bytes.array(), bytes.arrayOffset(), bytes.length()); + } + return xContent.createParser(config, bytes.streamInput()); + } + /** * Creates a parser for the bytes provided * @deprecated use {@link #createParser(XContentParserConfiguration, BytesReference, XContentType)} @@ -111,10 +127,7 @@ public static XContentParser createParser(XContentParserConfiguration config, By return XContentFactory.xContent(xContentType).createParser(config, compressedInput); } else { // TODO now that we have config we make a method on bytes to do this building wihout needing this check everywhere - if (bytes.hasArray()) { - return xContentType.xContent().createParser(config, bytes.array(), bytes.arrayOffset(), bytes.length()); - } - return xContentType.xContent().createParser(config, bytes.streamInput()); + return createParserNotCompressed(config, bytes, xContentType); } } @@ -310,7 +323,7 @@ public static String convertToJson(BytesReference bytes, boolean reformatJson) t @Deprecated public static String convertToJson(BytesReference bytes, boolean reformatJson, boolean prettyPrint) throws IOException { - return convertToJson(bytes, reformatJson, prettyPrint, XContentFactory.xContentType(bytes.toBytesRef().bytes)); + return convertToJson(bytes, reformatJson, prettyPrint, xContentType(bytes)); } public static String convertToJson(BytesReference bytes, boolean reformatJson, XContentType xContentType) throws IOException { @@ -337,20 +350,8 @@ public static String convertToJson(BytesReference bytes, boolean reformatJson, b return bytes.utf8ToString(); } - if (bytes.hasArray()) { - try ( - XContentParser parser = XContentFactory.xContent(xContentType) - .createParser(XContentParserConfiguration.EMPTY, bytes.array(), bytes.arrayOffset(), bytes.length()) - ) { - return toJsonString(prettyPrint, parser); - } - } else { - try ( - InputStream stream = bytes.streamInput(); - XContentParser parser = XContentFactory.xContent(xContentType).createParser(XContentParserConfiguration.EMPTY, stream) - ) { - return toJsonString(prettyPrint, parser); - } + try (var parser = createParserNotCompressed(XContentParserConfiguration.EMPTY, bytes, xContentType)) { + return toJsonString(prettyPrint, parser); } } @@ -746,7 +747,7 @@ public static void writeTo(StreamOutput out, XContentType xContentType) throws I public static XContentParser mapToXContentParser(XContentParserConfiguration config, Map source) { try (XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)) { builder.map(source); - return XContentFactory.xContent(builder.contentType()).createParser(config, Strings.toString(builder)); + return createParserNotCompressed(config, BytesReference.bytes(builder), builder.contentType()); } catch (IOException e) { throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e); } diff --git a/server/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java b/server/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java index c7145ed444d38..b9d267b922c91 100644 --- a/server/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java +++ b/server/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.AllocateUnassignedDecision; @@ -23,6 +22,8 @@ import java.util.ArrayList; import java.util.List; +import static org.elasticsearch.cluster.routing.ExpectedShardSizeEstimator.getExpectedShardSize; + /** * An abstract class that implements basic functionality for allocating * shards to nodes based on shard copies that already exist in the cluster. @@ -58,7 +59,7 @@ public void allocateUnassigned( unassignedAllocationHandler.initialize( allocateUnassignedDecision.getTargetNode().getId(), allocateUnassignedDecision.getAllocationId(), - getExpectedShardSize(shardRouting, allocation), + getExpectedShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE, allocation), allocation.changes() ); } else { @@ -66,18 +67,6 @@ public void allocateUnassigned( } } - protected static long getExpectedShardSize(ShardRouting shardRouting, RoutingAllocation allocation) { - if (shardRouting.primary()) { - if (shardRouting.recoverySource().getType() == RecoverySource.Type.SNAPSHOT) { - return allocation.snapshotShardSizeInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); - } else { - return ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE; - } - } else { - return allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); - } - } - /** * Make a decision on the allocation of an unassigned shard. This method is used by * {@link #allocateUnassigned(ShardRouting, RoutingAllocation, ExistingShardsAllocator.UnassignedAllocationHandler)} to make decisions diff --git a/server/src/main/java/org/elasticsearch/gateway/PersistedClusterStateService.java b/server/src/main/java/org/elasticsearch/gateway/PersistedClusterStateService.java index fcf50ba3a8a44..5e515291d1fea 100644 --- a/server/src/main/java/org/elasticsearch/gateway/PersistedClusterStateService.java +++ b/server/src/main/java/org/elasticsearch/gateway/PersistedClusterStateService.java @@ -61,6 +61,7 @@ import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Assertions; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.CheckedFunction; @@ -668,8 +669,7 @@ public OnDiskStateMetadata loadOnDiskStateMetadataFromUserData(Map T readXContent(BytesReference bytes, CheckedFunction reader) throws IOException { - final XContentParser parser = XContentFactory.xContent(XContentType.SMILE).createParser(parserConfig, bytes.streamInput()); - try { + try (XContentParser parser = XContentHelper.createParserNotCompressed(parserConfig, bytes, XContentType.SMILE)) { return reader.apply(parser); } catch (Exception e) { throw new CorruptStateException(e); diff --git a/server/src/main/java/org/elasticsearch/health/RestGetHealthAction.java b/server/src/main/java/org/elasticsearch/health/RestGetHealthAction.java index 8dcea1bb0e7e2..5dd7930c77d68 100644 --- a/server/src/main/java/org/elasticsearch/health/RestGetHealthAction.java +++ b/server/src/main/java/org/elasticsearch/health/RestGetHealthAction.java @@ -14,7 +14,7 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.io.IOException; import java.util.List; @@ -48,7 +48,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).execute( GetHealthAction.INSTANCE, getHealthRequest, - new RestChunkedToXContentListener<>(channel) + new RestRefCountedChunkedToXContentListener<>(channel) ); } diff --git a/server/src/main/java/org/elasticsearch/http/DefaultRestChannel.java b/server/src/main/java/org/elasticsearch/http/DefaultRestChannel.java index 24df7875f7e3d..f4dbf8115da33 100644 --- a/server/src/main/java/org/elasticsearch/http/DefaultRestChannel.java +++ b/server/src/main/java/org/elasticsearch/http/DefaultRestChannel.java @@ -26,7 +26,6 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.telemetry.tracing.SpanId; import org.elasticsearch.telemetry.tracing.Tracer; import java.util.ArrayList; @@ -91,8 +90,6 @@ public void sendResponse(RestResponse restResponse) { // We're sending a response so we know we won't be needing the request content again and release it httpRequest.release(); - final SpanId spanId = SpanId.forRestRequest(request); - final ArrayList toClose = new ArrayList<>(4); if (HttpUtils.shouldCloseConnection(httpRequest)) { toClose.add(() -> CloseableChannel.closeChannel(httpChannel)); @@ -174,9 +171,9 @@ public void sendResponse(RestResponse restResponse) { addCookies(httpResponse); - tracer.setAttribute(spanId, "http.status_code", restResponse.status().getStatus()); + tracer.setAttribute(request, "http.status_code", restResponse.status().getStatus()); restResponse.getHeaders() - .forEach((key, values) -> tracer.setAttribute(spanId, "http.response.headers." + key, String.join("; ", values))); + .forEach((key, values) -> tracer.setAttribute(request, "http.response.headers." + key, String.join("; ", values))); ActionListener listener = ActionListener.releasing(Releasables.wrap(toClose)); if (httpLogger != null) { diff --git a/server/src/main/java/org/elasticsearch/index/IndexVersion.java b/server/src/main/java/org/elasticsearch/index/IndexVersion.java index 765cc256d84b1..f4edb8b1d4039 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexVersion.java +++ b/server/src/main/java/org/elasticsearch/index/IndexVersion.java @@ -56,13 +56,11 @@ public record IndexVersion(int id, Version luceneVersion) implements VersionId null); - if (versionExtension == null) { - return IndexVersions.LATEST_DEFINED; - } - var version = versionExtension.getCurrentIndexVersion(IndexVersions.LATEST_DEFINED); + var version = ExtensionLoader.loadSingleton(ServiceLoader.load(VersionExtension.class)) + .map(e -> e.getCurrentIndexVersion(IndexVersions.LATEST_DEFINED)) + .orElse(IndexVersions.LATEST_DEFINED); assert version.onOrAfter(IndexVersions.LATEST_DEFINED); assert version.luceneVersion.equals(Version.LATEST) diff --git a/server/src/main/java/org/elasticsearch/index/IndexVersions.java b/server/src/main/java/org/elasticsearch/index/IndexVersions.java index f4edb4f79d760..4419abba73c1b 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexVersions.java +++ b/server/src/main/java/org/elasticsearch/index/IndexVersions.java @@ -92,7 +92,10 @@ private static IndexVersion def(int id, Version luceneVersion) { public static final IndexVersion ES_VERSION_8_12 = def(8_500_004, Version.LUCENE_9_8_0); public static final IndexVersion NORMALIZED_VECTOR_COSINE = def(8_500_005, Version.LUCENE_9_8_0); public static final IndexVersion UPGRADE_LUCENE_9_9 = def(8_500_006, Version.LUCENE_9_9_0); - public static final IndexVersion ES_VERSION_8_13 = def(8_500_007, Version.LUCENE_9_9_0); + public static final IndexVersion NORI_DUPLICATES = def(8_500_007, Version.LUCENE_9_9_0); + public static final IndexVersion UPGRADE_LUCENE_9_9_1 = def(8_500_008, Version.LUCENE_9_9_1); + public static final IndexVersion ES_VERSION_8_13 = def(8_500_009, Version.LUCENE_9_9_1); + public static final IndexVersion NEW_INDEXVERSION_FORMAT = def(8_501_00_0, Version.LUCENE_9_9_1); /* * STOP! READ THIS FIRST! No, really, @@ -105,18 +108,46 @@ private static IndexVersion def(int id, Version luceneVersion) { * A new index version should be added EVERY TIME a change is made to index metadata or data storage. * Each index version should only be used in a single merged commit (apart from the BwC versions copied from o.e.Version, ≤V_8_11_0). * - * To add a new index version, add a new constant at the bottom of the list, above this comment, which is one greater than the - * current highest version id. Use a descriptive constant name. Don't add other lines, comments, etc. + * ADDING AN INDEX VERSION + * To add a new index version, add a new constant at the bottom of the list, above this comment. Don't add other lines, + * comments, etc. The version id has the following layout: + * + * M_NNN_SS_P + * + * M - The major version of Elasticsearch + * NNN - The server version part + * SS - The serverless version part. It should always be 00 here, it is used by serverless only. + * P - The patch version part + * + * To determine the id of the next IndexVersion constant, do the following: + * - Use the same major version, unless bumping majors + * - Bump the server version part by 1, unless creating a patch version + * - Leave the serverless part as 00 + * - Bump the patch part if creating a patch version + * + * If a patch version is created, it should be placed sorted among the other existing constants. * * REVERTING AN INDEX VERSION * * If you revert a commit with an index version change, you MUST ensure there is a NEW index version representing the reverted * change. DO NOT let the index version go backwards, it must ALWAYS be incremented. * - * DETERMINING TRANSPORT VERSIONS FROM GIT HISTORY + * DETERMINING INDEX VERSIONS FROM GIT HISTORY + * + * If your git checkout has the expected minor-version-numbered branches and the expected release-version tags then you can find the + * index versions known by a particular release ... + * + * git show v8.12.0:server/src/main/java/org/elasticsearch/index/IndexVersions.java | grep '= def' + * + * ... or by a particular branch ... + * + * git show 8.12:server/src/main/java/org/elasticsearch/index/IndexVersions.java | grep '= def' + * + * ... and you can see which versions were added in between two versions too ... + * + * git diff v8.12.0..main -- server/src/main/java/org/elasticsearch/index/IndexVersions.java * - * TODO after the release of v8.11.0, copy the instructions about using git to track the history of versions from TransportVersion.java - * (the example commands won't make sense until at least 8.11.0 is released) + * In branches 8.7-8.11 see server/src/main/java/org/elasticsearch/index/IndexVersion.java for the equivalent definitions. */ public static final IndexVersion MINIMUM_COMPATIBLE = V_7_0_0; diff --git a/server/src/main/java/org/elasticsearch/index/analysis/Analysis.java b/server/src/main/java/org/elasticsearch/index/analysis/Analysis.java index e19ee050c93a7..d3e281ca115e1 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/Analysis.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/Analysis.java @@ -44,6 +44,7 @@ import org.apache.lucene.analysis.sv.SwedishAnalyzer; import org.apache.lucene.analysis.th.ThaiAnalyzer; import org.apache.lucene.analysis.tr.TurkishAnalyzer; +import org.apache.lucene.analysis.util.CSVUtil; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.DeprecationCategory; @@ -64,6 +65,7 @@ import java.nio.file.Path; import java.util.ArrayList; import java.util.Collection; +import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Map; @@ -257,6 +259,52 @@ public static List getWordList( } } + public static List getWordList( + Environment env, + Settings settings, + String settingPath, + String settingList, + boolean removeComments, + boolean checkDuplicate + ) { + final List ruleList = getWordList(env, settings, settingPath, settingList, removeComments); + if (ruleList != null && ruleList.isEmpty() == false && checkDuplicate) { + checkDuplicateRules(ruleList); + } + return ruleList; + } + + /** + * This method checks for any duplicate rules in the provided ruleList. Each rule in the list is parsed with CSVUtil.parse + * to separate the rule into individual components, represented as a String array. Only the first component from each rule + * is considered in the duplication check. + * + * The method will ignore any line that starts with a '#' character, treating it as a comment. + * + * The check is performed by adding the first component of each rule into a HashSet (dup), which does not allow duplicates. + * If the addition to the HashSet returns false, it means that item was already present in the set, indicating a duplicate. + * In such a case, an IllegalArgumentException is thrown specifying the duplicate term and the line number in the original list. + * + * @param ruleList The list of rules to check for duplicates. + * @throws IllegalArgumentException If a duplicate rule is found. + */ + private static void checkDuplicateRules(List ruleList) { + Set dup = new HashSet<>(); + int lineNum = 0; + for (String line : ruleList) { + // ignore comments + if (line.startsWith("#") == false) { + String[] values = CSVUtil.parse(line); + if (dup.add(values[0]) == false) { + throw new IllegalArgumentException( + "Found duplicate term [" + values[0] + "] in user dictionary " + "at line [" + lineNum + "]" + ); + } + } + ++lineNum; + } + } + private static List loadWordList(Path path, boolean removeComments) throws IOException { final List result = new ArrayList<>(); try (BufferedReader br = Files.newBufferedReader(path, StandardCharsets.UTF_8)) { diff --git a/server/src/main/java/org/elasticsearch/index/codec/PerFieldMapperCodec.java b/server/src/main/java/org/elasticsearch/index/codec/PerFieldMapperCodec.java index d2ca31fe6a197..852547ecb1073 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/PerFieldMapperCodec.java +++ b/server/src/main/java/org/elasticsearch/index/codec/PerFieldMapperCodec.java @@ -19,6 +19,7 @@ import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.codec.bloomfilter.ES87BloomFilterPostingsFormat; +import org.elasticsearch.index.codec.postings.ES812PostingsFormat; import org.elasticsearch.index.codec.tsdb.ES87TSDBDocValuesFormat; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; @@ -44,6 +45,8 @@ public final class PerFieldMapperCodec extends Lucene99Codec { private final ES87BloomFilterPostingsFormat bloomFilterPostingsFormat; private final ES87TSDBDocValuesFormat tsdbDocValuesFormat; + private final ES812PostingsFormat es812PostingsFormat; + static { assert Codec.forName(Lucene.LATEST_CODEC).getClass().isAssignableFrom(PerFieldMapperCodec.class) : "PerFieldMapperCodec must subclass the latest lucene codec: " + Lucene.LATEST_CODEC; @@ -54,6 +57,7 @@ public PerFieldMapperCodec(Mode compressionMode, MapperService mapperService, Bi this.mapperService = mapperService; this.bloomFilterPostingsFormat = new ES87BloomFilterPostingsFormat(bigArrays, this::internalGetPostingsFormatForField); this.tsdbDocValuesFormat = new ES87TSDBDocValuesFormat(); + this.es812PostingsFormat = new ES812PostingsFormat(); } @Override @@ -69,7 +73,8 @@ private PostingsFormat internalGetPostingsFormatForField(String field) { if (format != null) { return format; } - return super.getPostingsFormatForField(field); + // return our own posting format using PFOR + return es812PostingsFormat; } boolean useBloomFilter(String field) { diff --git a/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsFormat.java b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsFormat.java new file mode 100644 index 0000000000000..5270326876e08 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsFormat.java @@ -0,0 +1,506 @@ +/* + * @notice + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Modifications copyright (C) 2022 Elasticsearch B.V. + */ +package org.elasticsearch.index.codec.postings; + +import org.apache.lucene.codecs.BlockTermState; +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.codecs.FieldsConsumer; +import org.apache.lucene.codecs.FieldsProducer; +import org.apache.lucene.codecs.MultiLevelSkipListWriter; +import org.apache.lucene.codecs.PostingsFormat; +import org.apache.lucene.codecs.PostingsReaderBase; +import org.apache.lucene.codecs.PostingsWriterBase; +import org.apache.lucene.codecs.lucene90.blocktree.Lucene90BlockTreeTermsReader; +import org.apache.lucene.codecs.lucene90.blocktree.Lucene90BlockTreeTermsWriter; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.SegmentReadState; +import org.apache.lucene.index.SegmentWriteState; +import org.apache.lucene.index.TermState; +import org.apache.lucene.store.DataOutput; +import org.apache.lucene.util.packed.PackedInts; +import org.elasticsearch.core.IOUtils; + +import java.io.IOException; + +/** + * Based on Lucene 9.0 postings format, which encodes postings in packed integer blocks for fast decode. + * It is introduced to preserve PFOR space efficiency when Lucene switched back to FOR in 9.9 + * + *

Basic idea: + * + *

    + *
  • Packed Blocks and VInt Blocks: + *

    In packed blocks, integers are encoded with the same bit width ({@link PackedInts packed + * format}): the block size (i.e. number of integers inside block) is fixed (currently 128). + * Additionally blocks that are all the same value are encoded in an optimized way. + *

    In VInt blocks, integers are encoded as {@link DataOutput#writeVInt VInt}: the block + * size is variable. + *

  • Block structure: + *

    When the postings are long enough, Lucene90PostingsFormat will try to encode most + * integer data as a packed block. + *

    Take a term with 259 documents as an example, the first 256 document ids are encoded as + * two packed blocks, while the remaining 3 are encoded as one VInt block. + *

    Different kinds of data are always encoded separately into different packed blocks, but + * may possibly be interleaved into the same VInt block. + *

    This strategy is applied to pairs: <document number, frequency>, <position, + * payload length>, <position, offset start, offset length>, and <position, + * payload length, offsetstart, offset length>. + *

  • Skipdata settings: + *

    The structure of skip table is quite similar to previous version of Lucene. Skip + * interval is the same as block size, and each skip entry points to the beginning of each + * block. However, for the first block, skip data is omitted. + *

  • Positions, Payloads, and Offsets: + *

    A position is an integer indicating where the term occurs within one document. A payload + * is a blob of metadata associated with current position. An offset is a pair of integers + * indicating the tokenized start/end offsets for given term in current position: it is + * essentially a specialized payload. + *

    When payloads and offsets are not omitted, numPositions==numPayloads==numOffsets + * (assuming a null payload contributes one count). As mentioned in block structure, it is + * possible to encode these three either combined or separately. + *

    In all cases, payloads and offsets are stored together. When encoded as a packed block, + * position data is separated out as .pos, while payloads and offsets are encoded in .pay + * (payload metadata will also be stored directly in .pay). When encoded as VInt blocks, all + * these three are stored interleaved into the .pos (so is payload metadata). + *

    With this strategy, the majority of payload and offset data will be outside .pos file. + * So for queries that require only position data, running on a full index with payloads and + * offsets, this reduces disk pre-fetches. + *

+ * + *

Files and detailed format: + * + *

+ * + * + * + *
+ *
Term Dictionary + *

The .tim file contains the list of terms in each field along with per-term statistics + * (such as docfreq) and pointers to the frequencies, positions, payload and skip data in the + * .doc, .pos, and .pay files. See {@link Lucene90BlockTreeTermsWriter} for more details on + * the format. + *

NOTE: The term dictionary can plug into different postings implementations: the postings + * writer/reader are actually responsible for encoding and decoding the PostingsHeader and + * TermMetadata sections described here: + *

    + *
  • PostingsHeader --> Header, PackedBlockSize + *
  • TermMetadata --> (DocFPDelta|SingletonDocID), PosFPDelta?, PosVIntBlockFPDelta?, + * PayFPDelta?, SkipFPDelta? + *
  • Header, --> {@link CodecUtil#writeIndexHeader IndexHeader} + *
  • PackedBlockSize, SingletonDocID --> {@link DataOutput#writeVInt VInt} + *
  • DocFPDelta, PosFPDelta, PayFPDelta, PosVIntBlockFPDelta, SkipFPDelta --> {@link + * DataOutput#writeVLong VLong} + *
  • Footer --> {@link CodecUtil#writeFooter CodecFooter} + *
+ *

Notes: + *

    + *
  • Header is a {@link CodecUtil#writeIndexHeader IndexHeader} storing the version + * information for the postings. + *
  • PackedBlockSize is the fixed block size for packed blocks. In packed block, bit width + * is determined by the largest integer. Smaller block size result in smaller variance + * among width of integers hence smaller indexes. Larger block size result in more + * efficient bulk i/o hence better acceleration. This value should always be a multiple + * of 64, currently fixed as 128 as a tradeoff. It is also the skip interval used to + * accelerate {@link org.apache.lucene.index.PostingsEnum#advance(int)}. + *
  • DocFPDelta determines the position of this term's TermFreqs within the .doc file. In + * particular, it is the difference of file offset between this term's data and previous + * term's data (or zero, for the first term in the block).On disk it is stored as the + * difference from previous value in sequence. + *
  • PosFPDelta determines the position of this term's TermPositions within the .pos file. + * While PayFPDelta determines the position of this term's <TermPayloads, + * TermOffsets?> within the .pay file. Similar to DocFPDelta, it is the difference + * between two file positions (or neglected, for fields that omit payloads and offsets). + *
  • PosVIntBlockFPDelta determines the position of this term's last TermPosition in last + * pos packed block within the .pos file. It is synonym for PayVIntBlockFPDelta or + * OffsetVIntBlockFPDelta. This is actually used to indicate whether it is necessary to + * load following payloads and offsets from .pos instead of .pay. Every time a new block + * of positions are to be loaded, the PostingsReader will use this value to check + * whether current block is packed format or VInt. When packed format, payloads and + * offsets are fetched from .pay, otherwise from .pos. (this value is neglected when + * total number of positions i.e. totalTermFreq is less or equal to PackedBlockSize). + *
  • SkipFPDelta determines the position of this term's SkipData within the .doc file. In + * particular, it is the length of the TermFreq data. SkipDelta is only stored if + * DocFreq is not smaller than SkipMinimum (i.e. 128 in Lucene90PostingsFormat). + *
  • SingletonDocID is an optimization when a term only appears in one document. In this + * case, instead of writing a file pointer to the .doc file (DocFPDelta), and then a + * VIntBlock at that location, the single document ID is written to the term dictionary. + *
+ *
+ * + * + * + *
+ *
Term Index + *

The .tip file contains an index into the term dictionary, so that it can be accessed + * randomly. See {@link Lucene90BlockTreeTermsWriter} for more details on the format. + *

+ * + * + * + *
+ *
Frequencies and Skip Data + *

The .doc file contains the lists of documents which contain each term, along with the + * frequency of the term in that document (except when frequencies are omitted: {@link + * IndexOptions#DOCS}). It also saves skip data to the beginning of each packed or VInt block, + * when the length of document list is larger than packed block size. + *

    + *
  • docFile(.doc) --> Header, <TermFreqs, SkipData?>TermCount, Footer + *
  • Header --> {@link CodecUtil#writeIndexHeader IndexHeader} + *
  • TermFreqs --> <PackedBlock> PackedDocBlockNum, VIntBlock? + *
  • PackedBlock --> PackedDocDeltaBlock, PackedFreqBlock? + *
  • VIntBlock --> <DocDelta[, + * Freq?]>DocFreq-PackedBlockSize*PackedDocBlockNum + *
  • SkipData --> <<SkipLevelLength, SkipLevel> NumSkipLevels-1, + * SkipLevel>, SkipDatum? + *
  • SkipLevel --> <SkipDatum> TrimmedDocFreq/(PackedBlockSize^(Level + + * 1)) + *
  • SkipDatum --> DocSkip, DocFPSkip, <PosFPSkip, PosBlockOffset, PayLength?, + * PayFPSkip?>?, ImpactLength, <CompetitiveFreqDelta, CompetitiveNormDelta?> + * ImpactCount, SkipChildLevelPointer? + *
  • PackedDocDeltaBlock, PackedFreqBlock --> {@link PackedInts PackedInts} + *
  • DocDelta, Freq, DocSkip, DocFPSkip, PosFPSkip, PosBlockOffset, PayByteUpto, + * PayFPSkip, ImpactLength, CompetitiveFreqDelta --> {@link DataOutput#writeVInt + * VInt} + *
  • CompetitiveNormDelta --> {@link DataOutput#writeZLong ZLong} + *
  • SkipChildLevelPointer --> {@link DataOutput#writeVLong VLong} + *
  • Footer --> {@link CodecUtil#writeFooter CodecFooter} + *
+ *

Notes: + *

    + *
  • PackedDocDeltaBlock is theoretically generated from two steps: + *
      + *
    1. Calculate the difference between each document number and previous one, and get + * a d-gaps list (for the first document, use absolute value); + *
    2. For those d-gaps from first one to + * PackedDocBlockNum*PackedBlockSizeth, separately encode as packed + * blocks. + *
    + * If frequencies are not omitted, PackedFreqBlock will be generated without d-gap step. + *
  • VIntBlock stores remaining d-gaps (along with frequencies when possible) with a + * format that encodes DocDelta and Freq: + *

    DocDelta: if frequencies are indexed, this determines both the document number and + * the frequency. In particular, DocDelta/2 is the difference between this document + * number and the previous document number (or zero when this is the first document in a + * TermFreqs). When DocDelta is odd, the frequency is one. When DocDelta is even, the + * frequency is read as another VInt. If frequencies are omitted, DocDelta contains the + * gap (not multiplied by 2) between document numbers and no frequency information is + * stored. + *

    For example, the TermFreqs for a term which occurs once in document seven and + * three times in document eleven, with frequencies indexed, would be the following + * sequence of VInts: + *

    15, 8, 3 + *

    If frequencies were omitted ({@link IndexOptions#DOCS}) it would be this sequence + * of VInts instead: + *

    7,4 + *

  • PackedDocBlockNum is the number of packed blocks for current term's docids or + * frequencies. In particular, PackedDocBlockNum = floor(DocFreq/PackedBlockSize) + *
  • TrimmedDocFreq = DocFreq % PackedBlockSize == 0 ? DocFreq - 1 : DocFreq. We use this + * trick since the definition of skip entry is a little different from base interface. + * In {@link MultiLevelSkipListWriter}, skip data is assumed to be saved for + * skipIntervalth, 2*skipIntervalth ... posting in the list. + * However, in Lucene90PostingsFormat, the skip data is saved for + * skipInterval+1th, 2*skipInterval+1th ... posting + * (skipInterval==PackedBlockSize in this case). When DocFreq is multiple of + * PackedBlockSize, MultiLevelSkipListWriter will expect one more skip data than + * Lucene90SkipWriter. + *
  • SkipDatum is the metadata of one skip entry. For the first block (no matter packed or + * VInt), it is omitted. + *
  • DocSkip records the document number of every PackedBlockSizeth document + * number in the postings (i.e. last document number in each packed block). On disk it + * is stored as the difference from previous value in the sequence. + *
  • DocFPSkip records the file offsets of each block (excluding )posting at + * PackedBlockSize+1th, 2*PackedBlockSize+1th ... , in DocFile. + * The file offsets are relative to the start of current term's TermFreqs. On disk it is + * also stored as the difference from previous SkipDatum in the sequence. + *
  • Since positions and payloads are also block encoded, the skip should skip to related + * block first, then fetch the values according to in-block offset. PosFPSkip and + * PayFPSkip record the file offsets of related block in .pos and .pay, respectively. + * While PosBlockOffset indicates which value to fetch inside the related block + * (PayBlockOffset is unnecessary since it is always equal to PosBlockOffset). Same as + * DocFPSkip, the file offsets are relative to the start of current term's TermFreqs, + * and stored as a difference sequence. + *
  • PayByteUpto indicates the start offset of the current payload. It is equivalent to + * the sum of the payload lengths in the current block up to PosBlockOffset + *
  • ImpactLength is the total length of CompetitiveFreqDelta and CompetitiveNormDelta + * pairs. CompetitiveFreqDelta and CompetitiveNormDelta are used to safely skip score + * calculation for uncompetitive documents; See {@link + * org.apache.lucene.codecs.CompetitiveImpactAccumulator} for more details. + *
+ *
+ * + * + * + *
+ *
Positions + *

The .pos file contains the lists of positions that each term occurs at within documents. + * It also sometimes stores part of payloads and offsets for speedup. + *

    + *
  • PosFile(.pos) --> Header, <TermPositions> TermCount, Footer + *
  • Header --> {@link CodecUtil#writeIndexHeader IndexHeader} + *
  • TermPositions --> <PackedPosDeltaBlock> PackedPosBlockNum, + * VIntBlock? + *
  • VIntBlock --> <PositionDelta[, PayloadLength?], PayloadData?, OffsetDelta?, + * OffsetLength?>PosVIntCount + *
  • PackedPosDeltaBlock --> {@link PackedInts PackedInts} + *
  • PositionDelta, OffsetDelta, OffsetLength --> {@link DataOutput#writeVInt VInt} + *
  • PayloadData --> {@link DataOutput#writeByte byte}PayLength + *
  • Footer --> {@link CodecUtil#writeFooter CodecFooter} + *
+ *

Notes: + *

    + *
  • TermPositions are order by term (terms are implicit, from the term dictionary), and + * position values for each term document pair are incremental, and ordered by document + * number. + *
  • PackedPosBlockNum is the number of packed blocks for current term's positions, + * payloads or offsets. In particular, PackedPosBlockNum = + * floor(totalTermFreq/PackedBlockSize) + *
  • PosVIntCount is the number of positions encoded as VInt format. In particular, + * PosVIntCount = totalTermFreq - PackedPosBlockNum*PackedBlockSize + *
  • The procedure how PackedPosDeltaBlock is generated is the same as PackedDocDeltaBlock + * in chapter Frequencies and Skip Data. + *
  • PositionDelta is, if payloads are disabled for the term's field, the difference + * between the position of the current occurrence in the document and the previous + * occurrence (or zero, if this is the first occurrence in this document). If payloads + * are enabled for the term's field, then PositionDelta/2 is the difference between the + * current and the previous position. If payloads are enabled and PositionDelta is odd, + * then PayloadLength is stored, indicating the length of the payload at the current + * term position. + *
  • For example, the TermPositions for a term which occurs as the fourth term in one + * document, and as the fifth and ninth term in a subsequent document, would be the + * following sequence of VInts (payloads disabled): + *

    4, 5, 4 + *

  • PayloadData is metadata associated with the current term position. If PayloadLength + * is stored at the current position, then it indicates the length of this payload. If + * PayloadLength is not stored, then this payload has the same length as the payload at + * the previous position. + *
  • OffsetDelta/2 is the difference between this position's startOffset from the previous + * occurrence (or zero, if this is the first occurrence in this document). If + * OffsetDelta is odd, then the length (endOffset-startOffset) differs from the previous + * occurrence and an OffsetLength follows. Offset data is only written for {@link + * IndexOptions#DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS}. + *
+ *
+ * + * + * + *
+ *
Payloads and Offsets + *

The .pay file will store payloads and offsets associated with certain term-document + * positions. Some payloads and offsets will be separated out into .pos file, for performance + * reasons. + *

    + *
  • PayFile(.pay): --> Header, <TermPayloads?, TermOffsets?> + * TermCount, Footer + *
  • Header --> {@link CodecUtil#writeIndexHeader IndexHeader} + *
  • TermPayloads --> <PackedPayLengthBlock, SumPayLength, PayData> + * PackedPayBlockNum + *
  • TermOffsets --> <PackedOffsetStartDeltaBlock, PackedOffsetLengthBlock> + * PackedPayBlockNum + *
  • PackedPayLengthBlock, PackedOffsetStartDeltaBlock, PackedOffsetLengthBlock --> + * {@link PackedInts PackedInts} + *
  • SumPayLength --> {@link DataOutput#writeVInt VInt} + *
  • PayData --> {@link DataOutput#writeByte byte}SumPayLength + *
  • Footer --> {@link CodecUtil#writeFooter CodecFooter} + *
+ *

Notes: + *

    + *
  • The order of TermPayloads/TermOffsets will be the same as TermPositions, note that + * part of payload/offsets are stored in .pos. + *
  • The procedure how PackedPayLengthBlock and PackedOffsetLengthBlock are generated is + * the same as PackedFreqBlock in chapter Frequencies and Skip + * Data. While PackedStartDeltaBlock follows a same procedure as + * PackedDocDeltaBlock. + *
  • PackedPayBlockNum is always equal to PackedPosBlockNum, for the same term. It is also + * synonym for PackedOffsetBlockNum. + *
  • SumPayLength is the total length of payloads written within one block, should be the + * sum of PayLengths in one packed block. + *
  • PayLength in PackedPayLengthBlock is the length of each payload associated with the + * current position. + *
+ *
+ * + */ +public final class ES812PostingsFormat extends PostingsFormat { + + /** + * Filename extension for document number, frequencies, and skip data. See chapter: Frequencies and Skip Data + */ + public static final String DOC_EXTENSION = "doc"; + + /** Filename extension for positions. See chapter: Positions */ + public static final String POS_EXTENSION = "pos"; + + /** + * Filename extension for payloads and offsets. See chapter: Payloads and + * Offsets + */ + public static final String PAY_EXTENSION = "pay"; + + /** Size of blocks. */ + public static final int BLOCK_SIZE = ForUtil.BLOCK_SIZE; + + /** + * Expert: The maximum number of skip levels. Smaller values result in slightly smaller indexes, + * but slower skipping in big posting lists. + */ + static final int MAX_SKIP_LEVELS = 10; + + static final String CODEC_NAME = "ES812Postings"; + static final String TERMS_CODEC = "ES812PostingsWriterTerms"; + static final String DOC_CODEC = "ES812PostingsWriterDoc"; + static final String POS_CODEC = "ES812PostingsWriterPos"; + static final String PAY_CODEC = "ES812PostingsWriterPay"; + + // Increment version to change it + static final int VERSION_START = 0; + static final int VERSION_CURRENT = VERSION_START; + + /** Creates read-only {@code ES812PostingsFormat}. */ + public ES812PostingsFormat() { + super(CODEC_NAME); + } + + @Override + public String toString() { + return getName(); + } + + @Override + public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException { + PostingsWriterBase postingsWriter = new ES812PostingsWriter(state); + boolean success = false; + try { + FieldsConsumer ret = new Lucene90BlockTreeTermsWriter( + state, + postingsWriter, + Lucene90BlockTreeTermsWriter.DEFAULT_MIN_BLOCK_SIZE, + Lucene90BlockTreeTermsWriter.DEFAULT_MAX_BLOCK_SIZE + ); + success = true; + return ret; + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(postingsWriter); + } + } + } + + @Override + public FieldsProducer fieldsProducer(SegmentReadState state) throws IOException { + PostingsReaderBase postingsReader = new ES812PostingsReader(state); + boolean success = false; + try { + FieldsProducer ret = new Lucene90BlockTreeTermsReader(postingsReader, state); + success = true; + return ret; + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(postingsReader); + } + } + } + + /** + * Holds all state required for {@link ES812PostingsReader} to produce a {@link + * org.apache.lucene.index.PostingsEnum} without re-seeking the terms dict. + * + */ + public static final class IntBlockTermState extends BlockTermState { + /** file pointer to the start of the doc ids enumeration, in {@link #DOC_EXTENSION} file */ + public long docStartFP; + + /** file pointer to the start of the positions enumeration, in {@link #POS_EXTENSION} file */ + public long posStartFP; + + /** file pointer to the start of the payloads enumeration, in {@link #PAY_EXTENSION} file */ + public long payStartFP; + + /** + * file offset for the start of the skip list, relative to docStartFP, if there are more than + * {@link ForUtil#BLOCK_SIZE} docs; otherwise -1 + */ + public long skipOffset; + + /** + * file offset for the last position in the last block, if there are more than {@link + * ForUtil#BLOCK_SIZE} positions; otherwise -1 + * + *

One might think to use total term frequency to track how many positions are left to read + * as we decode the blocks, and decode the last block differently when num_left_positions < + * BLOCK_SIZE. Unfortunately this won't work since the tracking will be messed up when we skip + * blocks as the skipper will only tell us new position offset (start of block) and number of + * positions to skip for that block, without telling us how many positions it has skipped. + */ + public long lastPosBlockOffset; + + /** + * docid when there is a single pulsed posting, otherwise -1. freq is always implicitly + * totalTermFreq in this case. + */ + public int singletonDocID; + + /** Sole constructor. */ + public IntBlockTermState() { + skipOffset = -1; + lastPosBlockOffset = -1; + singletonDocID = -1; + } + + @Override + public IntBlockTermState clone() { + IntBlockTermState other = new IntBlockTermState(); + other.copyFrom(this); + return other; + } + + @Override + public void copyFrom(TermState _other) { + super.copyFrom(_other); + IntBlockTermState other = (IntBlockTermState) _other; + docStartFP = other.docStartFP; + posStartFP = other.posStartFP; + payStartFP = other.payStartFP; + lastPosBlockOffset = other.lastPosBlockOffset; + skipOffset = other.skipOffset; + singletonDocID = other.singletonDocID; + } + + @Override + public String toString() { + return super.toString() + + " docStartFP=" + + docStartFP + + " posStartFP=" + + posStartFP + + " payStartFP=" + + payStartFP + + " lastPosBlockOffset=" + + lastPosBlockOffset + + " singletonDocID=" + + singletonDocID; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsReader.java b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsReader.java new file mode 100644 index 0000000000000..8b3d5d02a04c0 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsReader.java @@ -0,0 +1,1990 @@ +/* + * @notice + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Modifications copyright (C) 2022 Elasticsearch B.V. + */ +package org.elasticsearch.index.codec.postings; + +import org.apache.lucene.codecs.BlockTermState; +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.codecs.PostingsReaderBase; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.Impacts; +import org.apache.lucene.index.ImpactsEnum; +import org.apache.lucene.index.IndexFileNames; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.PostingsEnum; +import org.apache.lucene.index.SegmentReadState; +import org.apache.lucene.index.SlowImpactsEnum; +import org.apache.lucene.store.DataInput; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.util.ArrayUtil; +import org.apache.lucene.util.BitUtil; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.core.IOUtils; +import org.elasticsearch.index.codec.postings.ES812PostingsFormat.IntBlockTermState; + +import java.io.IOException; +import java.util.Arrays; + +import static org.elasticsearch.index.codec.postings.ES812PostingsFormat.DOC_CODEC; +import static org.elasticsearch.index.codec.postings.ES812PostingsFormat.MAX_SKIP_LEVELS; +import static org.elasticsearch.index.codec.postings.ES812PostingsFormat.PAY_CODEC; +import static org.elasticsearch.index.codec.postings.ES812PostingsFormat.POS_CODEC; +import static org.elasticsearch.index.codec.postings.ES812PostingsFormat.TERMS_CODEC; +import static org.elasticsearch.index.codec.postings.ES812PostingsFormat.VERSION_CURRENT; +import static org.elasticsearch.index.codec.postings.ES812PostingsFormat.VERSION_START; +import static org.elasticsearch.index.codec.postings.ForUtil.BLOCK_SIZE; + +/** + * Concrete class that reads docId(maybe frq,pos,offset,payloads) list with postings format. + * + */ +final class ES812PostingsReader extends PostingsReaderBase { + + private final IndexInput docIn; + private final IndexInput posIn; + private final IndexInput payIn; + + private final int version; + + /** Sole constructor. */ + ES812PostingsReader(SegmentReadState state) throws IOException { + boolean success = false; + IndexInput docIn = null; + IndexInput posIn = null; + IndexInput payIn = null; + + // NOTE: these data files are too costly to verify checksum against all the bytes on open, + // but for now we at least verify proper structure of the checksum footer: which looks + // for FOOTER_MAGIC + algorithmID. This is cheap and can detect some forms of corruption + // such as file truncation. + + String docName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, ES812PostingsFormat.DOC_EXTENSION); + try { + docIn = state.directory.openInput(docName, state.context); + version = CodecUtil.checkIndexHeader( + docIn, + DOC_CODEC, + VERSION_START, + VERSION_CURRENT, + state.segmentInfo.getId(), + state.segmentSuffix + ); + CodecUtil.retrieveChecksum(docIn); + + if (state.fieldInfos.hasProx()) { + String proxName = IndexFileNames.segmentFileName( + state.segmentInfo.name, + state.segmentSuffix, + ES812PostingsFormat.POS_EXTENSION + ); + posIn = state.directory.openInput(proxName, state.context); + CodecUtil.checkIndexHeader(posIn, POS_CODEC, version, version, state.segmentInfo.getId(), state.segmentSuffix); + CodecUtil.retrieveChecksum(posIn); + + if (state.fieldInfos.hasPayloads() || state.fieldInfos.hasOffsets()) { + String payName = IndexFileNames.segmentFileName( + state.segmentInfo.name, + state.segmentSuffix, + ES812PostingsFormat.PAY_EXTENSION + ); + payIn = state.directory.openInput(payName, state.context); + CodecUtil.checkIndexHeader(payIn, PAY_CODEC, version, version, state.segmentInfo.getId(), state.segmentSuffix); + CodecUtil.retrieveChecksum(payIn); + } + } + + this.docIn = docIn; + this.posIn = posIn; + this.payIn = payIn; + success = true; + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(docIn, posIn, payIn); + } + } + } + + @Override + public void init(IndexInput termsIn, SegmentReadState state) throws IOException { + // Make sure we are talking to the matching postings writer + CodecUtil.checkIndexHeader(termsIn, TERMS_CODEC, VERSION_START, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix); + final int indexBlockSize = termsIn.readVInt(); + if (indexBlockSize != BLOCK_SIZE) { + throw new IllegalStateException("index-time BLOCK_SIZE (" + indexBlockSize + ") != read-time BLOCK_SIZE (" + BLOCK_SIZE + ")"); + } + } + + /** Read values that have been written using variable-length encoding instead of bit-packing. */ + static void readVIntBlock(IndexInput docIn, long[] docBuffer, long[] freqBuffer, int num, boolean indexHasFreq) throws IOException { + if (indexHasFreq) { + for (int i = 0; i < num; i++) { + final int code = docIn.readVInt(); + docBuffer[i] = code >>> 1; + if ((code & 1) != 0) { + freqBuffer[i] = 1; + } else { + freqBuffer[i] = docIn.readVInt(); + } + } + } else { + for (int i = 0; i < num; i++) { + docBuffer[i] = docIn.readVInt(); + } + } + } + + static void prefixSum(long[] buffer, int count, long base) { + buffer[0] += base; + for (int i = 1; i < count; ++i) { + buffer[i] += buffer[i - 1]; + } + } + + static int findFirstGreater(long[] buffer, int target, int from) { + for (int i = from; i < BLOCK_SIZE; ++i) { + if (buffer[i] >= target) { + return i; + } + } + return BLOCK_SIZE; + } + + @Override + public BlockTermState newTermState() { + return new IntBlockTermState(); + } + + @Override + public void close() throws IOException { + IOUtils.close(docIn, posIn, payIn); + } + + @Override + public void decodeTerm(DataInput in, FieldInfo fieldInfo, BlockTermState _termState, boolean absolute) throws IOException { + final IntBlockTermState termState = (IntBlockTermState) _termState; + final boolean fieldHasPositions = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0; + final boolean fieldHasOffsets = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0; + final boolean fieldHasPayloads = fieldInfo.hasPayloads(); + + if (absolute) { + termState.docStartFP = 0; + termState.posStartFP = 0; + termState.payStartFP = 0; + } + + final long l = in.readVLong(); + if ((l & 0x01) == 0) { + termState.docStartFP += l >>> 1; + if (termState.docFreq == 1) { + termState.singletonDocID = in.readVInt(); + } else { + termState.singletonDocID = -1; + } + } else { + assert absolute == false; + assert termState.singletonDocID != -1; + termState.singletonDocID += (int) BitUtil.zigZagDecode(l >>> 1); + } + + if (fieldHasPositions) { + termState.posStartFP += in.readVLong(); + if (fieldHasOffsets || fieldHasPayloads) { + termState.payStartFP += in.readVLong(); + } + if (termState.totalTermFreq > BLOCK_SIZE) { + termState.lastPosBlockOffset = in.readVLong(); + } else { + termState.lastPosBlockOffset = -1; + } + } + + if (termState.docFreq > BLOCK_SIZE) { + termState.skipOffset = in.readVLong(); + } else { + termState.skipOffset = -1; + } + } + + @Override + public PostingsEnum postings(FieldInfo fieldInfo, BlockTermState termState, PostingsEnum reuse, int flags) throws IOException { + + boolean indexHasPositions = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0; + + if (indexHasPositions == false || PostingsEnum.featureRequested(flags, PostingsEnum.POSITIONS) == false) { + BlockDocsEnum docsEnum; + if (reuse instanceof BlockDocsEnum) { + docsEnum = (BlockDocsEnum) reuse; + if (docsEnum.canReuse(docIn, fieldInfo) == false) { + docsEnum = new BlockDocsEnum(fieldInfo); + } + } else { + docsEnum = new BlockDocsEnum(fieldInfo); + } + return docsEnum.reset((IntBlockTermState) termState, flags); + } else { + EverythingEnum everythingEnum; + if (reuse instanceof EverythingEnum) { + everythingEnum = (EverythingEnum) reuse; + if (everythingEnum.canReuse(docIn, fieldInfo) == false) { + everythingEnum = new EverythingEnum(fieldInfo); + } + } else { + everythingEnum = new EverythingEnum(fieldInfo); + } + return everythingEnum.reset((IntBlockTermState) termState, flags); + } + } + + @Override + public ImpactsEnum impacts(FieldInfo fieldInfo, BlockTermState state, int flags) throws IOException { + if (state.docFreq <= BLOCK_SIZE) { + // no skip data + return new SlowImpactsEnum(postings(fieldInfo, state, null, flags)); + } + + final boolean indexHasPositions = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0; + final boolean indexHasOffsets = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0; + final boolean indexHasPayloads = fieldInfo.hasPayloads(); + + if (indexHasPositions == false || PostingsEnum.featureRequested(flags, PostingsEnum.POSITIONS) == false) { + return new BlockImpactsDocsEnum(fieldInfo, (IntBlockTermState) state); + } + + if (indexHasPositions + && PostingsEnum.featureRequested(flags, PostingsEnum.POSITIONS) + && (indexHasOffsets == false || PostingsEnum.featureRequested(flags, PostingsEnum.OFFSETS) == false) + && (indexHasPayloads == false || PostingsEnum.featureRequested(flags, PostingsEnum.PAYLOADS) == false)) { + return new BlockImpactsPostingsEnum(fieldInfo, (IntBlockTermState) state); + } + + return new BlockImpactsEverythingEnum(fieldInfo, (IntBlockTermState) state, flags); + } + + final class BlockDocsEnum extends PostingsEnum { + + final PForUtil pforUtil = new PForUtil(new ForUtil()); + + private final long[] docBuffer = new long[BLOCK_SIZE + 1]; + private final long[] freqBuffer = new long[BLOCK_SIZE]; + + private int docBufferUpto; + + private ES812SkipReader skipper; + private boolean skipped; + + final IndexInput startDocIn; + + IndexInput docIn; + final boolean indexHasFreq; + final boolean indexHasPos; + final boolean indexHasOffsets; + final boolean indexHasPayloads; + + private int docFreq; // number of docs in this posting list + private long totalTermFreq; // sum of freqBuffer in this posting list (or docFreq when omitted) + private int blockUpto; // number of docs in or before the current block + private int doc; // doc we last read + private long accum; // accumulator for doc deltas + + // Where this term's postings start in the .doc file: + private long docTermStartFP; + + // Where this term's skip data starts (after + // docTermStartFP) in the .doc file (or -1 if there is + // no skip data for this term): + private long skipOffset; + + // docID for next skip point, we won't use skipper if + // target docID is not larger than this + private int nextSkipDoc; + + private boolean needsFreq; // true if the caller actually needs frequencies + // as we read freqBuffer lazily, isFreqsRead shows if freqBuffer are read for the current block + // always true when we don't have freqBuffer (indexHasFreq=false) or don't need freqBuffer + // (needsFreq=false) + private boolean isFreqsRead; + private int singletonDocID; // docid when there is a single pulsed posting, otherwise -1 + + BlockDocsEnum(FieldInfo fieldInfo) throws IOException { + this.startDocIn = ES812PostingsReader.this.docIn; + this.docIn = null; + indexHasFreq = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) >= 0; + indexHasPos = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0; + indexHasOffsets = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0; + indexHasPayloads = fieldInfo.hasPayloads(); + // We set the last element of docBuffer to NO_MORE_DOCS, it helps save conditionals in + // advance() + docBuffer[BLOCK_SIZE] = NO_MORE_DOCS; + } + + public boolean canReuse(IndexInput docIn, FieldInfo fieldInfo) { + return docIn == startDocIn + && indexHasFreq == (fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) >= 0) + && indexHasPos == (fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0) + && indexHasPayloads == fieldInfo.hasPayloads(); + } + + public PostingsEnum reset(IntBlockTermState termState, int flags) throws IOException { + docFreq = termState.docFreq; + totalTermFreq = indexHasFreq ? termState.totalTermFreq : docFreq; + docTermStartFP = termState.docStartFP; + skipOffset = termState.skipOffset; + singletonDocID = termState.singletonDocID; + if (docFreq > 1) { + if (docIn == null) { + // lazy init + docIn = startDocIn.clone(); + } + docIn.seek(docTermStartFP); + } + + doc = -1; + this.needsFreq = PostingsEnum.featureRequested(flags, PostingsEnum.FREQS); + this.isFreqsRead = true; + if (indexHasFreq == false || needsFreq == false) { + for (int i = 0; i < ForUtil.BLOCK_SIZE; ++i) { + freqBuffer[i] = 1; + } + } + accum = 0; + blockUpto = 0; + nextSkipDoc = BLOCK_SIZE - 1; // we won't skip if target is found in first block + docBufferUpto = BLOCK_SIZE; + skipped = false; + return this; + } + + @Override + public int freq() throws IOException { + if (isFreqsRead == false) { + pforUtil.decode(docIn, freqBuffer); // read freqBuffer for this block + isFreqsRead = true; + } + return (int) freqBuffer[docBufferUpto - 1]; + } + + @Override + public int nextPosition() throws IOException { + return -1; + } + + @Override + public int startOffset() throws IOException { + return -1; + } + + @Override + public int endOffset() throws IOException { + return -1; + } + + @Override + public BytesRef getPayload() throws IOException { + return null; + } + + @Override + public int docID() { + return doc; + } + + private void refillDocs() throws IOException { + // Check if we skipped reading the previous block of freqBuffer, and if yes, position docIn + // after it + if (isFreqsRead == false) { + pforUtil.skip(docIn); + isFreqsRead = true; + } + + final int left = docFreq - blockUpto; + assert left >= 0; + + if (left >= BLOCK_SIZE) { + pforUtil.decodeAndPrefixSum(docIn, accum, docBuffer); + + if (indexHasFreq) { + if (needsFreq) { + isFreqsRead = false; + } else { + pforUtil.skip(docIn); // skip over freqBuffer if we don't need them at all + } + } + blockUpto += BLOCK_SIZE; + } else if (docFreq == 1) { + docBuffer[0] = singletonDocID; + freqBuffer[0] = totalTermFreq; + docBuffer[1] = NO_MORE_DOCS; + blockUpto++; + } else { + // Read vInts: + readVIntBlock(docIn, docBuffer, freqBuffer, left, indexHasFreq); + prefixSum(docBuffer, left, accum); + docBuffer[left] = NO_MORE_DOCS; + blockUpto += left; + } + accum = docBuffer[BLOCK_SIZE - 1]; + docBufferUpto = 0; + assert docBuffer[BLOCK_SIZE] == NO_MORE_DOCS; + } + + @Override + public int nextDoc() throws IOException { + if (docBufferUpto == BLOCK_SIZE) { + refillDocs(); // we don't need to load freqBuffer for now (will be loaded later if + // necessary) + } + + doc = (int) docBuffer[docBufferUpto]; + docBufferUpto++; + return doc; + } + + @Override + public int advance(int target) throws IOException { + // current skip docID < docIDs generated from current buffer <= next skip docID + // we don't need to skip if target is buffered already + if (docFreq > BLOCK_SIZE && target > nextSkipDoc) { + + if (skipper == null) { + // Lazy init: first time this enum has ever been used for skipping + skipper = new ES812SkipReader(docIn.clone(), MAX_SKIP_LEVELS, indexHasPos, indexHasOffsets, indexHasPayloads); + } + + if (skipped == false) { + assert skipOffset != -1; + // This is the first time this enum has skipped + // since reset() was called; load the skip data: + skipper.init(docTermStartFP + skipOffset, docTermStartFP, 0, 0, docFreq); + skipped = true; + } + + // always plus one to fix the result, since skip position in Lucene90SkipReader + // is a little different from MultiLevelSkipListReader + final int newDocUpto = skipper.skipTo(target) + 1; + + if (newDocUpto >= blockUpto) { + // Skipper moved + assert newDocUpto % BLOCK_SIZE == 0 : "got " + newDocUpto; + blockUpto = newDocUpto; + + // Force to read next block + docBufferUpto = BLOCK_SIZE; + accum = skipper.getDoc(); // actually, this is just lastSkipEntry + docIn.seek(skipper.getDocPointer()); // now point to the block we want to search + // even if freqBuffer were not read from the previous block, we will mark them as read, + // as we don't need to skip the previous block freqBuffer in refillDocs, + // as we have already positioned docIn where in needs to be. + isFreqsRead = true; + } + // next time we call advance, this is used to + // foresee whether skipper is necessary. + nextSkipDoc = skipper.getNextSkipDoc(); + } + if (docBufferUpto == BLOCK_SIZE) { + refillDocs(); + } + + // Now scan... this is an inlined/pared down version + // of nextDoc(): + long doc; + while (true) { + doc = docBuffer[docBufferUpto]; + + if (doc >= target) { + break; + } + ++docBufferUpto; + } + + docBufferUpto++; + return this.doc = (int) doc; + } + + @Override + public long cost() { + return docFreq; + } + } + + // Also handles payloads + offsets + final class EverythingEnum extends PostingsEnum { + + final PForUtil pforUtil = new PForUtil(new ForUtil()); + + private final long[] docBuffer = new long[BLOCK_SIZE + 1]; + private final long[] freqBuffer = new long[BLOCK_SIZE + 1]; + private final long[] posDeltaBuffer = new long[BLOCK_SIZE]; + + private final long[] payloadLengthBuffer; + private final long[] offsetStartDeltaBuffer; + private final long[] offsetLengthBuffer; + + private byte[] payloadBytes; + private int payloadByteUpto; + private int payloadLength; + + private int lastStartOffset; + private int startOffset; + private int endOffset; + + private int docBufferUpto; + private int posBufferUpto; + + private ES812SkipReader skipper; + private boolean skipped; + + final IndexInput startDocIn; + + IndexInput docIn; + final IndexInput posIn; + final IndexInput payIn; + final BytesRef payload; + + final boolean indexHasOffsets; + final boolean indexHasPayloads; + + private int docFreq; // number of docs in this posting list + private long totalTermFreq; // number of positions in this posting list + private int blockUpto; // number of docs in or before the current block + private int doc; // doc we last read + private long accum; // accumulator for doc deltas + private int freq; // freq we last read + private int position; // current position + + // how many positions "behind" we are; nextPosition must + // skip these to "catch up": + private int posPendingCount; + + // Lazy pos seek: if != -1 then we must seek to this FP + // before reading positions: + private long posPendingFP; + + // Lazy pay seek: if != -1 then we must seek to this FP + // before reading payloads/offsets: + private long payPendingFP; + + // Where this term's postings start in the .doc file: + private long docTermStartFP; + + // Where this term's postings start in the .pos file: + private long posTermStartFP; + + // Where this term's payloads/offsets start in the .pay + // file: + private long payTermStartFP; + + // File pointer where the last (vInt encoded) pos delta + // block is. We need this to know whether to bulk + // decode vs vInt decode the block: + private long lastPosBlockFP; + + // Where this term's skip data starts (after + // docTermStartFP) in the .doc file (or -1 if there is + // no skip data for this term): + private long skipOffset; + + private int nextSkipDoc; + + private boolean needsOffsets; // true if we actually need offsets + private boolean needsPayloads; // true if we actually need payloads + private int singletonDocID; // docid when there is a single pulsed posting, otherwise -1 + + EverythingEnum(FieldInfo fieldInfo) throws IOException { + indexHasOffsets = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0; + indexHasPayloads = fieldInfo.hasPayloads(); + + this.startDocIn = ES812PostingsReader.this.docIn; + this.docIn = null; + this.posIn = ES812PostingsReader.this.posIn.clone(); + if (indexHasOffsets || indexHasPayloads) { + this.payIn = ES812PostingsReader.this.payIn.clone(); + } else { + this.payIn = null; + } + if (indexHasOffsets) { + offsetStartDeltaBuffer = new long[BLOCK_SIZE]; + offsetLengthBuffer = new long[BLOCK_SIZE]; + } else { + offsetStartDeltaBuffer = null; + offsetLengthBuffer = null; + startOffset = -1; + endOffset = -1; + } + + if (indexHasPayloads) { + payloadLengthBuffer = new long[BLOCK_SIZE]; + payloadBytes = new byte[128]; + payload = new BytesRef(); + } else { + payloadLengthBuffer = null; + payloadBytes = null; + payload = null; + } + + // We set the last element of docBuffer to NO_MORE_DOCS, it helps save conditionals in + // advance() + docBuffer[BLOCK_SIZE] = NO_MORE_DOCS; + } + + public boolean canReuse(IndexInput docIn, FieldInfo fieldInfo) { + return docIn == startDocIn + && indexHasOffsets == (fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0) + && indexHasPayloads == fieldInfo.hasPayloads(); + } + + public EverythingEnum reset(IntBlockTermState termState, int flags) throws IOException { + docFreq = termState.docFreq; + docTermStartFP = termState.docStartFP; + posTermStartFP = termState.posStartFP; + payTermStartFP = termState.payStartFP; + skipOffset = termState.skipOffset; + totalTermFreq = termState.totalTermFreq; + singletonDocID = termState.singletonDocID; + if (docFreq > 1) { + if (docIn == null) { + // lazy init + docIn = startDocIn.clone(); + } + docIn.seek(docTermStartFP); + } + posPendingFP = posTermStartFP; + payPendingFP = payTermStartFP; + posPendingCount = 0; + if (termState.totalTermFreq < BLOCK_SIZE) { + lastPosBlockFP = posTermStartFP; + } else if (termState.totalTermFreq == BLOCK_SIZE) { + lastPosBlockFP = -1; + } else { + lastPosBlockFP = posTermStartFP + termState.lastPosBlockOffset; + } + + this.needsOffsets = PostingsEnum.featureRequested(flags, PostingsEnum.OFFSETS); + this.needsPayloads = PostingsEnum.featureRequested(flags, PostingsEnum.PAYLOADS); + + doc = -1; + accum = 0; + blockUpto = 0; + if (docFreq > BLOCK_SIZE) { + nextSkipDoc = BLOCK_SIZE - 1; // we won't skip if target is found in first block + } else { + nextSkipDoc = NO_MORE_DOCS; // not enough docs for skipping + } + docBufferUpto = BLOCK_SIZE; + skipped = false; + return this; + } + + @Override + public int freq() throws IOException { + return freq; + } + + @Override + public int docID() { + return doc; + } + + private void refillDocs() throws IOException { + final int left = docFreq - blockUpto; + assert left >= 0; + + if (left >= BLOCK_SIZE) { + pforUtil.decodeAndPrefixSum(docIn, accum, docBuffer); + pforUtil.decode(docIn, freqBuffer); + blockUpto += BLOCK_SIZE; + } else if (docFreq == 1) { + docBuffer[0] = singletonDocID; + freqBuffer[0] = totalTermFreq; + docBuffer[1] = NO_MORE_DOCS; + blockUpto++; + } else { + readVIntBlock(docIn, docBuffer, freqBuffer, left, true); + prefixSum(docBuffer, left, accum); + docBuffer[left] = NO_MORE_DOCS; + blockUpto += left; + } + accum = docBuffer[BLOCK_SIZE - 1]; + docBufferUpto = 0; + assert docBuffer[BLOCK_SIZE] == NO_MORE_DOCS; + } + + private void refillPositions() throws IOException { + if (posIn.getFilePointer() == lastPosBlockFP) { + final int count = (int) (totalTermFreq % BLOCK_SIZE); + int payloadLength = 0; + int offsetLength = 0; + payloadByteUpto = 0; + for (int i = 0; i < count; i++) { + int code = posIn.readVInt(); + if (indexHasPayloads) { + if ((code & 1) != 0) { + payloadLength = posIn.readVInt(); + } + payloadLengthBuffer[i] = payloadLength; + posDeltaBuffer[i] = code >>> 1; + if (payloadLength != 0) { + if (payloadByteUpto + payloadLength > payloadBytes.length) { + payloadBytes = ArrayUtil.grow(payloadBytes, payloadByteUpto + payloadLength); + } + posIn.readBytes(payloadBytes, payloadByteUpto, payloadLength); + payloadByteUpto += payloadLength; + } + } else { + posDeltaBuffer[i] = code; + } + + if (indexHasOffsets) { + int deltaCode = posIn.readVInt(); + if ((deltaCode & 1) != 0) { + offsetLength = posIn.readVInt(); + } + offsetStartDeltaBuffer[i] = deltaCode >>> 1; + offsetLengthBuffer[i] = offsetLength; + } + } + payloadByteUpto = 0; + } else { + pforUtil.decode(posIn, posDeltaBuffer); + + if (indexHasPayloads) { + if (needsPayloads) { + pforUtil.decode(payIn, payloadLengthBuffer); + int numBytes = payIn.readVInt(); + + if (numBytes > payloadBytes.length) { + payloadBytes = ArrayUtil.growNoCopy(payloadBytes, numBytes); + } + payIn.readBytes(payloadBytes, 0, numBytes); + } else { + // this works, because when writing a vint block we always force the first length to be + // written + pforUtil.skip(payIn); // skip over lengths + int numBytes = payIn.readVInt(); // read length of payloadBytes + payIn.seek(payIn.getFilePointer() + numBytes); // skip over payloadBytes + } + payloadByteUpto = 0; + } + + if (indexHasOffsets) { + if (needsOffsets) { + pforUtil.decode(payIn, offsetStartDeltaBuffer); + pforUtil.decode(payIn, offsetLengthBuffer); + } else { + // this works, because when writing a vint block we always force the first length to be + // written + pforUtil.skip(payIn); // skip over starts + pforUtil.skip(payIn); // skip over lengths + } + } + } + } + + @Override + public int nextDoc() throws IOException { + if (docBufferUpto == BLOCK_SIZE) { + refillDocs(); + } + + doc = (int) docBuffer[docBufferUpto]; + freq = (int) freqBuffer[docBufferUpto]; + posPendingCount += freq; + docBufferUpto++; + + position = 0; + lastStartOffset = 0; + return doc; + } + + @Override + public int advance(int target) throws IOException { + if (target > nextSkipDoc) { + if (skipper == null) { + // Lazy init: first time this enum has ever been used for skipping + skipper = new ES812SkipReader(docIn.clone(), MAX_SKIP_LEVELS, true, indexHasOffsets, indexHasPayloads); + } + + if (skipped == false) { + assert skipOffset != -1; + // This is the first time this enum has skipped + // since reset() was called; load the skip data: + skipper.init(docTermStartFP + skipOffset, docTermStartFP, posTermStartFP, payTermStartFP, docFreq); + skipped = true; + } + + final int newDocUpto = skipper.skipTo(target) + 1; + + if (newDocUpto > blockUpto - BLOCK_SIZE + docBufferUpto) { + // Skipper moved + assert newDocUpto % BLOCK_SIZE == 0 : "got " + newDocUpto; + blockUpto = newDocUpto; + + // Force to read next block + docBufferUpto = BLOCK_SIZE; + accum = skipper.getDoc(); + docIn.seek(skipper.getDocPointer()); + posPendingFP = skipper.getPosPointer(); + payPendingFP = skipper.getPayPointer(); + posPendingCount = skipper.getPosBufferUpto(); + lastStartOffset = 0; // new document + payloadByteUpto = skipper.getPayloadByteUpto(); + } + nextSkipDoc = skipper.getNextSkipDoc(); + } + if (docBufferUpto == BLOCK_SIZE) { + refillDocs(); + } + + // Now scan: + long doc; + while (true) { + doc = docBuffer[docBufferUpto]; + freq = (int) freqBuffer[docBufferUpto]; + posPendingCount += freq; + docBufferUpto++; + + if (doc >= target) { + break; + } + } + + position = 0; + lastStartOffset = 0; + return this.doc = (int) doc; + } + + // TODO: in theory we could avoid loading frq block + // when not needed, ie, use skip data to load how far to + // seek the pos pointer ... instead of having to load frq + // blocks only to sum up how many positions to skip + private void skipPositions() throws IOException { + // Skip positions now: + int toSkip = posPendingCount - freq; + // if (DEBUG) { + // System.out.println(" FPR.skipPositions: toSkip=" + toSkip); + // } + + final int leftInBlock = BLOCK_SIZE - posBufferUpto; + if (toSkip < leftInBlock) { + int end = posBufferUpto + toSkip; + while (posBufferUpto < end) { + if (indexHasPayloads) { + payloadByteUpto += (int) payloadLengthBuffer[posBufferUpto]; + } + posBufferUpto++; + } + } else { + toSkip -= leftInBlock; + while (toSkip >= BLOCK_SIZE) { + assert posIn.getFilePointer() != lastPosBlockFP; + pforUtil.skip(posIn); + + if (indexHasPayloads) { + // Skip payloadLength block: + pforUtil.skip(payIn); + + // Skip payloadBytes block: + int numBytes = payIn.readVInt(); + payIn.seek(payIn.getFilePointer() + numBytes); + } + + if (indexHasOffsets) { + pforUtil.skip(payIn); + pforUtil.skip(payIn); + } + toSkip -= BLOCK_SIZE; + } + refillPositions(); + payloadByteUpto = 0; + posBufferUpto = 0; + while (posBufferUpto < toSkip) { + if (indexHasPayloads) { + payloadByteUpto += (int) payloadLengthBuffer[posBufferUpto]; + } + posBufferUpto++; + } + } + + position = 0; + lastStartOffset = 0; + } + + @Override + public int nextPosition() throws IOException { + assert posPendingCount > 0; + + if (posPendingFP != -1) { + posIn.seek(posPendingFP); + posPendingFP = -1; + + if (payPendingFP != -1 && payIn != null) { + payIn.seek(payPendingFP); + payPendingFP = -1; + } + + // Force buffer refill: + posBufferUpto = BLOCK_SIZE; + } + + if (posPendingCount > freq) { + skipPositions(); + posPendingCount = freq; + } + + if (posBufferUpto == BLOCK_SIZE) { + refillPositions(); + posBufferUpto = 0; + } + position += (int) posDeltaBuffer[posBufferUpto]; + + if (indexHasPayloads) { + payloadLength = (int) payloadLengthBuffer[posBufferUpto]; + payload.bytes = payloadBytes; + payload.offset = payloadByteUpto; + payload.length = payloadLength; + payloadByteUpto += payloadLength; + } + + if (indexHasOffsets) { + startOffset = lastStartOffset + (int) offsetStartDeltaBuffer[posBufferUpto]; + endOffset = startOffset + (int) offsetLengthBuffer[posBufferUpto]; + lastStartOffset = startOffset; + } + + posBufferUpto++; + posPendingCount--; + return position; + } + + @Override + public int startOffset() { + return startOffset; + } + + @Override + public int endOffset() { + return endOffset; + } + + @Override + public BytesRef getPayload() { + if (payloadLength == 0) { + return null; + } else { + return payload; + } + } + + @Override + public long cost() { + return docFreq; + } + } + + final class BlockImpactsDocsEnum extends ImpactsEnum { + + final PForUtil pforUtil = new PForUtil(new ForUtil()); + + private final long[] docBuffer = new long[BLOCK_SIZE + 1]; + private final long[] freqBuffer = new long[BLOCK_SIZE]; + + private int docBufferUpto; + + private final ES812ScoreSkipReader skipper; + + final IndexInput docIn; + + final boolean indexHasFreqs; + + private int docFreq; // number of docs in this posting list + private int blockUpto; // number of documents in or before the current block + private int doc; // doc we last read + private long accum; // accumulator for doc deltas + + private int nextSkipDoc = -1; + + private long seekTo = -1; + + // as we read freqBuffer lazily, isFreqsRead shows if freqBuffer are read for the current block + // always true when we don't have freqBuffer (indexHasFreq=false) or don't need freqBuffer + // (needsFreq=false) + private boolean isFreqsRead; + + BlockImpactsDocsEnum(FieldInfo fieldInfo, IntBlockTermState termState) throws IOException { + indexHasFreqs = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) >= 0; + final boolean indexHasPositions = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0; + final boolean indexHasOffsets = fieldInfo.getIndexOptions() + .compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0; + final boolean indexHasPayloads = fieldInfo.hasPayloads(); + + this.docIn = ES812PostingsReader.this.docIn.clone(); + + docFreq = termState.docFreq; + docIn.seek(termState.docStartFP); + + doc = -1; + accum = 0; + blockUpto = 0; + docBufferUpto = BLOCK_SIZE; + + skipper = new ES812ScoreSkipReader(docIn.clone(), MAX_SKIP_LEVELS, indexHasPositions, indexHasOffsets, indexHasPayloads); + skipper.init( + termState.docStartFP + termState.skipOffset, + termState.docStartFP, + termState.posStartFP, + termState.payStartFP, + docFreq + ); + + // We set the last element of docBuffer to NO_MORE_DOCS, it helps save conditionals in + // advance() + docBuffer[BLOCK_SIZE] = NO_MORE_DOCS; + this.isFreqsRead = true; + if (indexHasFreqs == false) { + Arrays.fill(freqBuffer, 1L); + } + } + + @Override + public int freq() throws IOException { + if (isFreqsRead == false) { + pforUtil.decode(docIn, freqBuffer); // read freqBuffer for this block + isFreqsRead = true; + } + return (int) freqBuffer[docBufferUpto - 1]; + } + + @Override + public int docID() { + return doc; + } + + private void refillDocs() throws IOException { + // Check if we skipped reading the previous block of freqBuffer, and if yes, position docIn + // after it + if (isFreqsRead == false) { + pforUtil.skip(docIn); + isFreqsRead = true; + } + + final int left = docFreq - blockUpto; + assert left >= 0; + + if (left >= BLOCK_SIZE) { + pforUtil.decodeAndPrefixSum(docIn, accum, docBuffer); + if (indexHasFreqs) { + isFreqsRead = false; + } + blockUpto += BLOCK_SIZE; + } else { + readVIntBlock(docIn, docBuffer, freqBuffer, left, indexHasFreqs); + prefixSum(docBuffer, left, accum); + docBuffer[left] = NO_MORE_DOCS; + blockUpto += left; + } + accum = docBuffer[BLOCK_SIZE - 1]; + docBufferUpto = 0; + assert docBuffer[BLOCK_SIZE] == NO_MORE_DOCS; + } + + @Override + public void advanceShallow(int target) throws IOException { + if (target > nextSkipDoc) { + // always plus one to fix the result, since skip position in Lucene90SkipReader + // is a little different from MultiLevelSkipListReader + final int newDocUpto = skipper.skipTo(target) + 1; + + if (newDocUpto >= blockUpto) { + // Skipper moved + assert newDocUpto % BLOCK_SIZE == 0 : "got " + newDocUpto; + blockUpto = newDocUpto; + + // Force to read next block + docBufferUpto = BLOCK_SIZE; + accum = skipper.getDoc(); + seekTo = skipper.getDocPointer(); // delay the seek + } + // next time we call advance, this is used to + // foresee whether skipper is necessary. + nextSkipDoc = skipper.getNextSkipDoc(); + } + assert nextSkipDoc >= target; + } + + @Override + public Impacts getImpacts() throws IOException { + // nextDoc() doesn't advance skip lists, so it's important to do it here to make sure we're + // not returning impacts over a bigger range of doc IDs than necessary. + advanceShallow(doc); + return skipper.getImpacts(); + } + + @Override + public int nextDoc() throws IOException { + if (docBufferUpto == BLOCK_SIZE) { + if (seekTo >= 0) { + docIn.seek(seekTo); + isFreqsRead = true; // reset isFreqsRead + seekTo = -1; + } + refillDocs(); + } + return this.doc = (int) docBuffer[docBufferUpto++]; + } + + @Override + public int advance(int target) throws IOException { + if (target > nextSkipDoc) { + advanceShallow(target); + } + if (docBufferUpto == BLOCK_SIZE) { + if (seekTo >= 0) { + docIn.seek(seekTo); + isFreqsRead = true; // reset isFreqsRead + seekTo = -1; + } + refillDocs(); + } + + int next = findFirstGreater(docBuffer, target, docBufferUpto); + this.doc = (int) docBuffer[next]; + docBufferUpto = next + 1; + return doc; + } + + @Override + public int nextPosition() throws IOException { + return -1; + } + + @Override + public int startOffset() { + return -1; + } + + @Override + public int endOffset() { + return -1; + } + + @Override + public BytesRef getPayload() { + return null; + } + + @Override + public long cost() { + return docFreq; + } + } + + final class BlockImpactsPostingsEnum extends ImpactsEnum { + + final PForUtil pforUtil = new PForUtil(new ForUtil()); + + private final long[] docBuffer = new long[BLOCK_SIZE]; + private final long[] freqBuffer = new long[BLOCK_SIZE]; + private final long[] posDeltaBuffer = new long[BLOCK_SIZE]; + + private int docBufferUpto; + private int posBufferUpto; + + private final ES812ScoreSkipReader skipper; + + final IndexInput docIn; + final IndexInput posIn; + + final boolean indexHasOffsets; + final boolean indexHasPayloads; + + private int docFreq; // number of docs in this posting list + private long totalTermFreq; // number of positions in this posting list + private int docUpto; // how many docs we've read + private int doc; // doc we last read + private long accum; // accumulator for doc deltas + private int freq; // freq we last read + private int position; // current position + + // how many positions "behind" we are; nextPosition must + // skip these to "catch up": + private int posPendingCount; + + // Lazy pos seek: if != -1 then we must seek to this FP + // before reading positions: + private long posPendingFP; + + // Where this term's postings start in the .doc file: + private long docTermStartFP; + + // Where this term's postings start in the .pos file: + private long posTermStartFP; + + // Where this term's payloads/offsets start in the .pay + // file: + private long payTermStartFP; + + // File pointer where the last (vInt encoded) pos delta + // block is. We need this to know whether to bulk + // decode vs vInt decode the block: + private long lastPosBlockFP; + + private int nextSkipDoc = -1; + + private long seekTo = -1; + + BlockImpactsPostingsEnum(FieldInfo fieldInfo, IntBlockTermState termState) throws IOException { + indexHasOffsets = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0; + indexHasPayloads = fieldInfo.hasPayloads(); + + this.docIn = ES812PostingsReader.this.docIn.clone(); + + this.posIn = ES812PostingsReader.this.posIn.clone(); + + docFreq = termState.docFreq; + docTermStartFP = termState.docStartFP; + posTermStartFP = termState.posStartFP; + payTermStartFP = termState.payStartFP; + totalTermFreq = termState.totalTermFreq; + docIn.seek(docTermStartFP); + posPendingFP = posTermStartFP; + posPendingCount = 0; + if (termState.totalTermFreq < BLOCK_SIZE) { + lastPosBlockFP = posTermStartFP; + } else if (termState.totalTermFreq == BLOCK_SIZE) { + lastPosBlockFP = -1; + } else { + lastPosBlockFP = posTermStartFP + termState.lastPosBlockOffset; + } + + doc = -1; + accum = 0; + docUpto = 0; + docBufferUpto = BLOCK_SIZE; + + skipper = new ES812ScoreSkipReader(docIn.clone(), MAX_SKIP_LEVELS, true, indexHasOffsets, indexHasPayloads); + skipper.init(docTermStartFP + termState.skipOffset, docTermStartFP, posTermStartFP, payTermStartFP, docFreq); + } + + @Override + public int freq() throws IOException { + return freq; + } + + @Override + public int docID() { + return doc; + } + + private void refillDocs() throws IOException { + final int left = docFreq - docUpto; + assert left >= 0; + + if (left >= BLOCK_SIZE) { + pforUtil.decodeAndPrefixSum(docIn, accum, docBuffer); + pforUtil.decode(docIn, freqBuffer); + } else { + readVIntBlock(docIn, docBuffer, freqBuffer, left, true); + prefixSum(docBuffer, left, accum); + docBuffer[left] = NO_MORE_DOCS; + } + accum = docBuffer[BLOCK_SIZE - 1]; + docBufferUpto = 0; + } + + private void refillPositions() throws IOException { + if (posIn.getFilePointer() == lastPosBlockFP) { + final int count = (int) (totalTermFreq % BLOCK_SIZE); + int payloadLength = 0; + for (int i = 0; i < count; i++) { + int code = posIn.readVInt(); + if (indexHasPayloads) { + if ((code & 1) != 0) { + payloadLength = posIn.readVInt(); + } + posDeltaBuffer[i] = code >>> 1; + if (payloadLength != 0) { + posIn.seek(posIn.getFilePointer() + payloadLength); + } + } else { + posDeltaBuffer[i] = code; + } + if (indexHasOffsets) { + if ((posIn.readVInt() & 1) != 0) { + // offset length changed + posIn.readVInt(); + } + } + } + } else { + pforUtil.decode(posIn, posDeltaBuffer); + } + } + + @Override + public void advanceShallow(int target) throws IOException { + if (target > nextSkipDoc) { + // always plus one to fix the result, since skip position in Lucene90SkipReader + // is a little different from MultiLevelSkipListReader + final int newDocUpto = skipper.skipTo(target) + 1; + + if (newDocUpto > docUpto) { + // Skipper moved + assert newDocUpto % BLOCK_SIZE == 0 : "got " + newDocUpto; + docUpto = newDocUpto; + + // Force to read next block + docBufferUpto = BLOCK_SIZE; + accum = skipper.getDoc(); + posPendingFP = skipper.getPosPointer(); + posPendingCount = skipper.getPosBufferUpto(); + seekTo = skipper.getDocPointer(); // delay the seek + } + // next time we call advance, this is used to + // foresee whether skipper is necessary. + nextSkipDoc = skipper.getNextSkipDoc(); + } + assert nextSkipDoc >= target; + } + + @Override + public Impacts getImpacts() throws IOException { + advanceShallow(doc); + return skipper.getImpacts(); + } + + @Override + public int nextDoc() throws IOException { + return advance(doc + 1); + } + + @Override + public int advance(int target) throws IOException { + if (target > nextSkipDoc) { + advanceShallow(target); + } + if (docBufferUpto == BLOCK_SIZE) { + if (seekTo >= 0) { + docIn.seek(seekTo); + seekTo = -1; + } + refillDocs(); + } + + int next = findFirstGreater(docBuffer, target, docBufferUpto); + if (next == BLOCK_SIZE) { + return doc = NO_MORE_DOCS; + } + this.doc = (int) docBuffer[next]; + this.freq = (int) freqBuffer[next]; + for (int i = docBufferUpto; i <= next; ++i) { + posPendingCount += (int) freqBuffer[i]; + } + docUpto += next - docBufferUpto + 1; + docBufferUpto = next + 1; + position = 0; + return doc; + } + + // TODO: in theory we could avoid loading frq block + // when not needed, ie, use skip data to load how far to + // seek the pos pointer ... instead of having to load frq + // blocks only to sum up how many positions to skip + private void skipPositions() throws IOException { + // Skip positions now: + int toSkip = posPendingCount - freq; + + final int leftInBlock = BLOCK_SIZE - posBufferUpto; + if (toSkip < leftInBlock) { + posBufferUpto += toSkip; + } else { + toSkip -= leftInBlock; + while (toSkip >= BLOCK_SIZE) { + assert posIn.getFilePointer() != lastPosBlockFP; + pforUtil.skip(posIn); + toSkip -= BLOCK_SIZE; + } + refillPositions(); + posBufferUpto = toSkip; + } + + position = 0; + } + + @Override + public int nextPosition() throws IOException { + assert posPendingCount > 0; + + if (posPendingFP != -1) { + posIn.seek(posPendingFP); + posPendingFP = -1; + + // Force buffer refill: + posBufferUpto = BLOCK_SIZE; + } + + if (posPendingCount > freq) { + skipPositions(); + posPendingCount = freq; + } + + if (posBufferUpto == BLOCK_SIZE) { + refillPositions(); + posBufferUpto = 0; + } + position += (int) posDeltaBuffer[posBufferUpto++]; + + posPendingCount--; + return position; + } + + @Override + public int startOffset() { + return -1; + } + + @Override + public int endOffset() { + return -1; + } + + @Override + public BytesRef getPayload() { + return null; + } + + @Override + public long cost() { + return docFreq; + } + } + + final class BlockImpactsEverythingEnum extends ImpactsEnum { + + final PForUtil pforUtil = new PForUtil(new ForUtil()); + + private final long[] docBuffer = new long[BLOCK_SIZE]; + private final long[] freqBuffer = new long[BLOCK_SIZE]; + private final long[] posDeltaBuffer = new long[BLOCK_SIZE]; + + private final long[] payloadLengthBuffer; + private final long[] offsetStartDeltaBuffer; + private final long[] offsetLengthBuffer; + + private byte[] payloadBytes; + private int payloadByteUpto; + private int payloadLength; + + private int lastStartOffset; + private int startOffset = -1; + private int endOffset = -1; + + private int docBufferUpto; + private int posBufferUpto; + + private final ES812ScoreSkipReader skipper; + + final IndexInput docIn; + final IndexInput posIn; + final IndexInput payIn; + final BytesRef payload; + + final boolean indexHasFreq; + final boolean indexHasPos; + final boolean indexHasOffsets; + final boolean indexHasPayloads; + + private int docFreq; // number of docs in this posting list + private long totalTermFreq; // number of positions in this posting list + private int docUpto; // how many docs we've read + private int posDocUpTo; // for how many docs we've read positions, offsets, and payloads + private int doc; // doc we last read + private long accum; // accumulator for doc deltas + private int position; // current position + + // how many positions "behind" we are; nextPosition must + // skip these to "catch up": + private int posPendingCount; + + // Lazy pos seek: if != -1 then we must seek to this FP + // before reading positions: + private long posPendingFP; + + // Lazy pay seek: if != -1 then we must seek to this FP + // before reading payloads/offsets: + private long payPendingFP; + + // Where this term's postings start in the .doc file: + private long docTermStartFP; + + // Where this term's postings start in the .pos file: + private long posTermStartFP; + + // Where this term's payloads/offsets start in the .pay + // file: + private long payTermStartFP; + + // File pointer where the last (vInt encoded) pos delta + // block is. We need this to know whether to bulk + // decode vs vInt decode the block: + private long lastPosBlockFP; + + private int nextSkipDoc = -1; + + private final boolean needsPositions; + private final boolean needsOffsets; // true if we actually need offsets + private final boolean needsPayloads; // true if we actually need payloads + + private boolean isFreqsRead; // shows if freqBuffer for the current doc block are read into freqBuffer + + private long seekTo = -1; + + BlockImpactsEverythingEnum(FieldInfo fieldInfo, IntBlockTermState termState, int flags) throws IOException { + indexHasFreq = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) >= 0; + indexHasPos = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0; + indexHasOffsets = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0; + indexHasPayloads = fieldInfo.hasPayloads(); + + needsPositions = PostingsEnum.featureRequested(flags, PostingsEnum.POSITIONS); + needsOffsets = PostingsEnum.featureRequested(flags, PostingsEnum.OFFSETS); + needsPayloads = PostingsEnum.featureRequested(flags, PostingsEnum.PAYLOADS); + + this.docIn = ES812PostingsReader.this.docIn.clone(); + + if (indexHasPos && needsPositions) { + this.posIn = ES812PostingsReader.this.posIn.clone(); + } else { + this.posIn = null; + } + + if ((indexHasOffsets && needsOffsets) || (indexHasPayloads && needsPayloads)) { + this.payIn = ES812PostingsReader.this.payIn.clone(); + } else { + this.payIn = null; + } + + if (indexHasOffsets) { + offsetStartDeltaBuffer = new long[BLOCK_SIZE]; + offsetLengthBuffer = new long[BLOCK_SIZE]; + } else { + offsetStartDeltaBuffer = null; + offsetLengthBuffer = null; + startOffset = -1; + endOffset = -1; + } + + if (indexHasPayloads) { + payloadLengthBuffer = new long[BLOCK_SIZE]; + payloadBytes = new byte[128]; + payload = new BytesRef(); + } else { + payloadLengthBuffer = null; + payloadBytes = null; + payload = null; + } + + docFreq = termState.docFreq; + docTermStartFP = termState.docStartFP; + posTermStartFP = termState.posStartFP; + payTermStartFP = termState.payStartFP; + totalTermFreq = termState.totalTermFreq; + docIn.seek(docTermStartFP); + posPendingFP = posTermStartFP; + payPendingFP = payTermStartFP; + posPendingCount = 0; + if (termState.totalTermFreq < BLOCK_SIZE) { + lastPosBlockFP = posTermStartFP; + } else if (termState.totalTermFreq == BLOCK_SIZE) { + lastPosBlockFP = -1; + } else { + lastPosBlockFP = posTermStartFP + termState.lastPosBlockOffset; + } + + doc = -1; + accum = 0; + docUpto = 0; + posDocUpTo = 0; + isFreqsRead = true; + docBufferUpto = BLOCK_SIZE; + + skipper = new ES812ScoreSkipReader(docIn.clone(), MAX_SKIP_LEVELS, indexHasPos, indexHasOffsets, indexHasPayloads); + skipper.init(docTermStartFP + termState.skipOffset, docTermStartFP, posTermStartFP, payTermStartFP, docFreq); + + if (indexHasFreq == false) { + for (int i = 0; i < ForUtil.BLOCK_SIZE; ++i) { + freqBuffer[i] = 1; + } + } + } + + @Override + public int freq() throws IOException { + if (indexHasFreq && (isFreqsRead == false)) { + pforUtil.decode(docIn, freqBuffer); // read freqBuffer for this block + isFreqsRead = true; + } + return (int) freqBuffer[docBufferUpto - 1]; + } + + @Override + public int docID() { + return doc; + } + + private void refillDocs() throws IOException { + if (indexHasFreq) { + if (isFreqsRead == false) { // previous freq block was not read + // check if we need to load the previous freq block to catch up on positions or we can + // skip it + if (indexHasPos && needsPositions && (posDocUpTo < docUpto)) { + pforUtil.decode(docIn, freqBuffer); // load the previous freq block + } else { + pforUtil.skip(docIn); // skip it + } + isFreqsRead = true; + } + if (indexHasPos && needsPositions) { + while (posDocUpTo < docUpto) { // catch on positions, bring posPendingCount upto the current doc + posPendingCount += (int) freqBuffer[docBufferUpto - (docUpto - posDocUpTo)]; + posDocUpTo++; + } + } + } + + final int left = docFreq - docUpto; + assert left >= 0; + + if (left >= BLOCK_SIZE) { + pforUtil.decodeAndPrefixSum(docIn, accum, docBuffer); + if (indexHasFreq) { + isFreqsRead = false; // freq block will be loaded lazily when necessary, we don't load it here + } + } else { + readVIntBlock(docIn, docBuffer, freqBuffer, left, indexHasFreq); + prefixSum(docBuffer, left, accum); + docBuffer[left] = NO_MORE_DOCS; + } + accum = docBuffer[BLOCK_SIZE - 1]; + docBufferUpto = 0; + } + + private void refillPositions() throws IOException { + if (posIn.getFilePointer() == lastPosBlockFP) { + final int count = (int) (totalTermFreq % BLOCK_SIZE); + int payloadLength = 0; + int offsetLength = 0; + payloadByteUpto = 0; + for (int i = 0; i < count; i++) { + int code = posIn.readVInt(); + if (indexHasPayloads) { + if ((code & 1) != 0) { + payloadLength = posIn.readVInt(); + } + payloadLengthBuffer[i] = payloadLength; + posDeltaBuffer[i] = code >>> 1; + if (payloadLength != 0) { + if (payloadByteUpto + payloadLength > payloadBytes.length) { + payloadBytes = ArrayUtil.grow(payloadBytes, payloadByteUpto + payloadLength); + } + posIn.readBytes(payloadBytes, payloadByteUpto, payloadLength); + payloadByteUpto += payloadLength; + } + } else { + posDeltaBuffer[i] = code; + } + + if (indexHasOffsets) { + int deltaCode = posIn.readVInt(); + if ((deltaCode & 1) != 0) { + offsetLength = posIn.readVInt(); + } + offsetStartDeltaBuffer[i] = deltaCode >>> 1; + offsetLengthBuffer[i] = offsetLength; + } + } + payloadByteUpto = 0; + } else { + pforUtil.decode(posIn, posDeltaBuffer); + + if (indexHasPayloads && payIn != null) { + if (needsPayloads) { + pforUtil.decode(payIn, payloadLengthBuffer); + int numBytes = payIn.readVInt(); + + if (numBytes > payloadBytes.length) { + payloadBytes = ArrayUtil.growNoCopy(payloadBytes, numBytes); + } + payIn.readBytes(payloadBytes, 0, numBytes); + } else { + // this works, because when writing a vint block we always force the first length to be + // written + pforUtil.skip(payIn); // skip over lengths + int numBytes = payIn.readVInt(); // read length of payloadBytes + payIn.seek(payIn.getFilePointer() + numBytes); // skip over payloadBytes + } + payloadByteUpto = 0; + } + + if (indexHasOffsets && payIn != null) { + if (needsOffsets) { + pforUtil.decode(payIn, offsetStartDeltaBuffer); + pforUtil.decode(payIn, offsetLengthBuffer); + } else { + // this works, because when writing a vint block we always force the first length to be + // written + pforUtil.skip(payIn); // skip over starts + pforUtil.skip(payIn); // skip over lengths + } + } + } + } + + @Override + public void advanceShallow(int target) throws IOException { + if (target > nextSkipDoc) { + // always plus one to fix the result, since skip position in Lucene90SkipReader + // is a little different from MultiLevelSkipListReader + final int newDocUpto = skipper.skipTo(target) + 1; + + if (newDocUpto > docUpto) { + // Skipper moved + assert newDocUpto % BLOCK_SIZE == 0 : "got " + newDocUpto; + docUpto = newDocUpto; + posDocUpTo = docUpto; + + // Force to read next block + docBufferUpto = BLOCK_SIZE; + accum = skipper.getDoc(); + posPendingFP = skipper.getPosPointer(); + payPendingFP = skipper.getPayPointer(); + posPendingCount = skipper.getPosBufferUpto(); + lastStartOffset = 0; // new document + payloadByteUpto = skipper.getPayloadByteUpto(); // actually, this is just lastSkipEntry + seekTo = skipper.getDocPointer(); // delay the seek + } + // next time we call advance, this is used to + // foresee whether skipper is necessary. + nextSkipDoc = skipper.getNextSkipDoc(); + } + assert nextSkipDoc >= target; + } + + @Override + public Impacts getImpacts() throws IOException { + advanceShallow(doc); + return skipper.getImpacts(); + } + + @Override + public int nextDoc() throws IOException { + return advance(doc + 1); + } + + @Override + public int advance(int target) throws IOException { + if (target > nextSkipDoc) { + advanceShallow(target); + } + if (docBufferUpto == BLOCK_SIZE) { + if (seekTo >= 0) { + docIn.seek(seekTo); + seekTo = -1; + isFreqsRead = true; // reset isFreqsRead + } + refillDocs(); + } + + // Now scan: + long doc; + while (true) { + doc = docBuffer[docBufferUpto]; + docBufferUpto++; + docUpto++; + + if (doc >= target) { + break; + } + + if (docBufferUpto == BLOCK_SIZE) { + return this.doc = NO_MORE_DOCS; + } + } + position = 0; + lastStartOffset = 0; + + return this.doc = (int) doc; + } + + // TODO: in theory we could avoid loading frq block + // when not needed, ie, use skip data to load how far to + // seek the pos pointer ... instead of having to load frq + // blocks only to sum up how many positions to skip + private void skipPositions() throws IOException { + // Skip positions now: + int toSkip = posPendingCount - (int) freqBuffer[docBufferUpto - 1]; + // if (DEBUG) { + // System.out.println(" FPR.skipPositions: toSkip=" + toSkip); + // } + + final int leftInBlock = BLOCK_SIZE - posBufferUpto; + if (toSkip < leftInBlock) { + int end = posBufferUpto + toSkip; + while (posBufferUpto < end) { + if (indexHasPayloads) { + payloadByteUpto += (int) payloadLengthBuffer[posBufferUpto]; + } + posBufferUpto++; + } + } else { + toSkip -= leftInBlock; + while (toSkip >= BLOCK_SIZE) { + assert posIn.getFilePointer() != lastPosBlockFP; + pforUtil.skip(posIn); + + if (indexHasPayloads && payIn != null) { + // Skip payloadLength block: + pforUtil.skip(payIn); + + // Skip payloadBytes block: + int numBytes = payIn.readVInt(); + payIn.seek(payIn.getFilePointer() + numBytes); + } + + if (indexHasOffsets && payIn != null) { + pforUtil.skip(payIn); + pforUtil.skip(payIn); + } + toSkip -= BLOCK_SIZE; + } + refillPositions(); + payloadByteUpto = 0; + posBufferUpto = 0; + while (posBufferUpto < toSkip) { + if (indexHasPayloads) { + payloadByteUpto += (int) payloadLengthBuffer[posBufferUpto]; + } + posBufferUpto++; + } + } + + position = 0; + lastStartOffset = 0; + } + + @Override + public int nextPosition() throws IOException { + if (indexHasPos == false || needsPositions == false) { + return -1; + } + + if (isFreqsRead == false) { + pforUtil.decode(docIn, freqBuffer); // read freqBuffer for this docs block + isFreqsRead = true; + } + while (posDocUpTo < docUpto) { // bring posPendingCount upto the current doc + posPendingCount += (int) freqBuffer[docBufferUpto - (docUpto - posDocUpTo)]; + posDocUpTo++; + } + + assert posPendingCount > 0; + + if (posPendingFP != -1) { + posIn.seek(posPendingFP); + posPendingFP = -1; + + if (payPendingFP != -1 && payIn != null) { + payIn.seek(payPendingFP); + payPendingFP = -1; + } + + // Force buffer refill: + posBufferUpto = BLOCK_SIZE; + } + + if (posPendingCount > freqBuffer[docBufferUpto - 1]) { + skipPositions(); + posPendingCount = (int) freqBuffer[docBufferUpto - 1]; + } + + if (posBufferUpto == BLOCK_SIZE) { + refillPositions(); + posBufferUpto = 0; + } + position += (int) posDeltaBuffer[posBufferUpto]; + + if (indexHasPayloads) { + payloadLength = (int) payloadLengthBuffer[posBufferUpto]; + payload.bytes = payloadBytes; + payload.offset = payloadByteUpto; + payload.length = payloadLength; + payloadByteUpto += payloadLength; + } + + if (indexHasOffsets && needsOffsets) { + startOffset = lastStartOffset + (int) offsetStartDeltaBuffer[posBufferUpto]; + endOffset = startOffset + (int) offsetLengthBuffer[posBufferUpto]; + lastStartOffset = startOffset; + } + + posBufferUpto++; + posPendingCount--; + return position; + } + + @Override + public int startOffset() { + return startOffset; + } + + @Override + public int endOffset() { + return endOffset; + } + + @Override + public BytesRef getPayload() { + if (payloadLength == 0) { + return null; + } else { + return payload; + } + } + + @Override + public long cost() { + return docFreq; + } + } + + @Override + public void checkIntegrity() throws IOException { + if (docIn != null) { + CodecUtil.checksumEntireFile(docIn); + } + if (posIn != null) { + CodecUtil.checksumEntireFile(posIn); + } + if (payIn != null) { + CodecUtil.checksumEntireFile(payIn); + } + } + + @Override + public String toString() { + return getClass().getSimpleName() + "(positions=" + (posIn != null) + ",payloads=" + (payIn != null) + ")"; + } +} diff --git a/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsWriter.java b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsWriter.java new file mode 100644 index 0000000000000..9ab7ed42efb09 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812PostingsWriter.java @@ -0,0 +1,523 @@ +/* + * @notice + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Modifications copyright (C) 2022 Elasticsearch B.V. + */ +package org.elasticsearch.index.codec.postings; + +import org.apache.lucene.codecs.BlockTermState; +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.codecs.CompetitiveImpactAccumulator; +import org.apache.lucene.codecs.PushPostingsWriterBase; +import org.apache.lucene.index.CorruptIndexException; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.IndexFileNames; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.index.SegmentWriteState; +import org.apache.lucene.store.DataOutput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.util.ArrayUtil; +import org.apache.lucene.util.BitUtil; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.core.IOUtils; +import org.elasticsearch.index.codec.postings.ES812PostingsFormat.IntBlockTermState; + +import java.io.IOException; + +import static org.elasticsearch.index.codec.postings.ES812PostingsFormat.BLOCK_SIZE; +import static org.elasticsearch.index.codec.postings.ES812PostingsFormat.DOC_CODEC; +import static org.elasticsearch.index.codec.postings.ES812PostingsFormat.MAX_SKIP_LEVELS; +import static org.elasticsearch.index.codec.postings.ES812PostingsFormat.PAY_CODEC; +import static org.elasticsearch.index.codec.postings.ES812PostingsFormat.POS_CODEC; +import static org.elasticsearch.index.codec.postings.ES812PostingsFormat.TERMS_CODEC; +import static org.elasticsearch.index.codec.postings.ES812PostingsFormat.VERSION_CURRENT; + +/** + * Concrete class that writes docId(maybe frq,pos,offset,payloads) list with postings format. + * + *

Postings list for each term will be stored separately. + * + * @see ES812SkipWriter for details about skipping setting and postings layout. + */ +final class ES812PostingsWriter extends PushPostingsWriterBase { + + IndexOutput docOut; + IndexOutput posOut; + IndexOutput payOut; + + static final IntBlockTermState emptyState = new IntBlockTermState(); + IntBlockTermState lastState; + + // Holds starting file pointers for current term: + private long docStartFP; + private long posStartFP; + private long payStartFP; + + final long[] docDeltaBuffer; + final long[] freqBuffer; + private int docBufferUpto; + + final long[] posDeltaBuffer; + final long[] payloadLengthBuffer; + final long[] offsetStartDeltaBuffer; + final long[] offsetLengthBuffer; + private int posBufferUpto; + + private byte[] payloadBytes; + private int payloadByteUpto; + + private int lastBlockDocID; + private long lastBlockPosFP; + private long lastBlockPayFP; + private int lastBlockPosBufferUpto; + private int lastBlockPayloadByteUpto; + + private int lastDocID; + private int lastPosition; + private int lastStartOffset; + private int docCount; + + private final PForUtil pforUtil; + private final ES812SkipWriter skipWriter; + + private boolean fieldHasNorms; + private NumericDocValues norms; + private final CompetitiveImpactAccumulator competitiveFreqNormAccumulator = new CompetitiveImpactAccumulator(); + + /** Creates a postings writer */ + ES812PostingsWriter(SegmentWriteState state) throws IOException { + + String docFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, ES812PostingsFormat.DOC_EXTENSION); + docOut = state.directory.createOutput(docFileName, state.context); + IndexOutput posOut = null; + IndexOutput payOut = null; + boolean success = false; + try { + CodecUtil.writeIndexHeader(docOut, DOC_CODEC, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix); + pforUtil = new PForUtil(new ForUtil()); + if (state.fieldInfos.hasProx()) { + posDeltaBuffer = new long[BLOCK_SIZE]; + String posFileName = IndexFileNames.segmentFileName( + state.segmentInfo.name, + state.segmentSuffix, + ES812PostingsFormat.POS_EXTENSION + ); + posOut = state.directory.createOutput(posFileName, state.context); + CodecUtil.writeIndexHeader(posOut, POS_CODEC, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix); + + if (state.fieldInfos.hasPayloads()) { + payloadBytes = new byte[128]; + payloadLengthBuffer = new long[BLOCK_SIZE]; + } else { + payloadBytes = null; + payloadLengthBuffer = null; + } + + if (state.fieldInfos.hasOffsets()) { + offsetStartDeltaBuffer = new long[BLOCK_SIZE]; + offsetLengthBuffer = new long[BLOCK_SIZE]; + } else { + offsetStartDeltaBuffer = null; + offsetLengthBuffer = null; + } + + if (state.fieldInfos.hasPayloads() || state.fieldInfos.hasOffsets()) { + String payFileName = IndexFileNames.segmentFileName( + state.segmentInfo.name, + state.segmentSuffix, + ES812PostingsFormat.PAY_EXTENSION + ); + payOut = state.directory.createOutput(payFileName, state.context); + CodecUtil.writeIndexHeader(payOut, PAY_CODEC, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix); + } + } else { + posDeltaBuffer = null; + payloadLengthBuffer = null; + offsetStartDeltaBuffer = null; + offsetLengthBuffer = null; + payloadBytes = null; + } + this.payOut = payOut; + this.posOut = posOut; + success = true; + } finally { + if (success == false) { + IOUtils.closeWhileHandlingException(docOut, posOut, payOut); + } + } + + docDeltaBuffer = new long[BLOCK_SIZE]; + freqBuffer = new long[BLOCK_SIZE]; + + // TODO: should we try skipping every 2/4 blocks...? + skipWriter = new ES812SkipWriter(MAX_SKIP_LEVELS, BLOCK_SIZE, state.segmentInfo.maxDoc(), docOut, posOut, payOut); + } + + @Override + public IntBlockTermState newTermState() { + return new IntBlockTermState(); + } + + @Override + public void init(IndexOutput termsOut, SegmentWriteState state) throws IOException { + CodecUtil.writeIndexHeader(termsOut, TERMS_CODEC, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix); + termsOut.writeVInt(BLOCK_SIZE); + } + + @Override + public void setField(FieldInfo fieldInfo) { + super.setField(fieldInfo); + skipWriter.setField(writePositions, writeOffsets, writePayloads); + lastState = emptyState; + fieldHasNorms = fieldInfo.hasNorms(); + } + + @Override + public void startTerm(NumericDocValues norms) { + docStartFP = docOut.getFilePointer(); + if (writePositions) { + posStartFP = posOut.getFilePointer(); + if (writePayloads || writeOffsets) { + payStartFP = payOut.getFilePointer(); + } + } + lastDocID = 0; + lastBlockDocID = -1; + skipWriter.resetSkip(); + this.norms = norms; + competitiveFreqNormAccumulator.clear(); + } + + @Override + public void startDoc(int docID, int termDocFreq) throws IOException { + // Have collected a block of docs, and get a new doc. + // Should write skip data as well as postings list for + // current block. + if (lastBlockDocID != -1 && docBufferUpto == 0) { + skipWriter.bufferSkip( + lastBlockDocID, + competitiveFreqNormAccumulator, + docCount, + lastBlockPosFP, + lastBlockPayFP, + lastBlockPosBufferUpto, + lastBlockPayloadByteUpto + ); + competitiveFreqNormAccumulator.clear(); + } + + final int docDelta = docID - lastDocID; + + if (docID < 0 || (docCount > 0 && docDelta <= 0)) { + throw new CorruptIndexException("docs out of order (" + docID + " <= " + lastDocID + " )", docOut); + } + + docDeltaBuffer[docBufferUpto] = docDelta; + if (writeFreqs) { + freqBuffer[docBufferUpto] = termDocFreq; + } + + docBufferUpto++; + docCount++; + + if (docBufferUpto == BLOCK_SIZE) { + pforUtil.encode(docDeltaBuffer, docOut); + if (writeFreqs) { + pforUtil.encode(freqBuffer, docOut); + } + // NOTE: don't set docBufferUpto back to 0 here; + // finishDoc will do so (because it needs to see that + // the block was filled so it can save skip data) + } + + lastDocID = docID; + lastPosition = 0; + lastStartOffset = 0; + + long norm; + if (fieldHasNorms) { + boolean found = norms.advanceExact(docID); + if (found == false) { + // This can happen if indexing hits a problem after adding a doc to the + // postings but before buffering the norm. Such documents are written + // deleted and will go away on the first merge. + norm = 1L; + } else { + norm = norms.longValue(); + assert norm != 0 : docID; + } + } else { + norm = 1L; + } + + competitiveFreqNormAccumulator.add(writeFreqs ? termDocFreq : 1, norm); + } + + @Override + public void addPosition(int position, BytesRef payload, int startOffset, int endOffset) throws IOException { + if (position > IndexWriter.MAX_POSITION) { + throw new CorruptIndexException( + "position=" + position + " is too large (> IndexWriter.MAX_POSITION=" + IndexWriter.MAX_POSITION + ")", + docOut + ); + } + if (position < 0) { + throw new CorruptIndexException("position=" + position + " is < 0", docOut); + } + posDeltaBuffer[posBufferUpto] = position - lastPosition; + if (writePayloads) { + if (payload == null || payload.length == 0) { + // no payload + payloadLengthBuffer[posBufferUpto] = 0; + } else { + payloadLengthBuffer[posBufferUpto] = payload.length; + if (payloadByteUpto + payload.length > payloadBytes.length) { + payloadBytes = ArrayUtil.grow(payloadBytes, payloadByteUpto + payload.length); + } + System.arraycopy(payload.bytes, payload.offset, payloadBytes, payloadByteUpto, payload.length); + payloadByteUpto += payload.length; + } + } + + if (writeOffsets) { + assert startOffset >= lastStartOffset; + assert endOffset >= startOffset; + offsetStartDeltaBuffer[posBufferUpto] = startOffset - lastStartOffset; + offsetLengthBuffer[posBufferUpto] = endOffset - startOffset; + lastStartOffset = startOffset; + } + + posBufferUpto++; + lastPosition = position; + if (posBufferUpto == BLOCK_SIZE) { + pforUtil.encode(posDeltaBuffer, posOut); + + if (writePayloads) { + pforUtil.encode(payloadLengthBuffer, payOut); + payOut.writeVInt(payloadByteUpto); + payOut.writeBytes(payloadBytes, 0, payloadByteUpto); + payloadByteUpto = 0; + } + if (writeOffsets) { + pforUtil.encode(offsetStartDeltaBuffer, payOut); + pforUtil.encode(offsetLengthBuffer, payOut); + } + posBufferUpto = 0; + } + } + + @Override + public void finishDoc() throws IOException { + // Since we don't know df for current term, we had to buffer + // those skip data for each block, and when a new doc comes, + // write them to skip file. + if (docBufferUpto == BLOCK_SIZE) { + lastBlockDocID = lastDocID; + if (posOut != null) { + if (payOut != null) { + lastBlockPayFP = payOut.getFilePointer(); + } + lastBlockPosFP = posOut.getFilePointer(); + lastBlockPosBufferUpto = posBufferUpto; + lastBlockPayloadByteUpto = payloadByteUpto; + } + docBufferUpto = 0; + } + } + + /** Called when we are done adding docs to this term */ + @Override + public void finishTerm(BlockTermState _state) throws IOException { + IntBlockTermState state = (IntBlockTermState) _state; + assert state.docFreq > 0; + + // TODO: wasteful we are counting this (counting # docs + // for this term) in two places? + assert state.docFreq == docCount : state.docFreq + " vs " + docCount; + + // docFreq == 1, don't write the single docid/freq to a separate file along with a pointer to + // it. + final int singletonDocID; + if (state.docFreq == 1) { + // pulse the singleton docid into the term dictionary, freq is implicitly totalTermFreq + singletonDocID = (int) docDeltaBuffer[0]; + } else { + singletonDocID = -1; + // vInt encode the remaining doc deltas and freqs: + for (int i = 0; i < docBufferUpto; i++) { + final int docDelta = (int) docDeltaBuffer[i]; + final int freq = (int) freqBuffer[i]; + if (writeFreqs == false) { + docOut.writeVInt(docDelta); + } else if (freq == 1) { + docOut.writeVInt((docDelta << 1) | 1); + } else { + docOut.writeVInt(docDelta << 1); + docOut.writeVInt(freq); + } + } + } + + final long lastPosBlockOffset; + + if (writePositions) { + // totalTermFreq is just total number of positions(or payloads, or offsets) + // associated with current term. + assert state.totalTermFreq != -1; + if (state.totalTermFreq > BLOCK_SIZE) { + // record file offset for last pos in last block + lastPosBlockOffset = posOut.getFilePointer() - posStartFP; + } else { + lastPosBlockOffset = -1; + } + if (posBufferUpto > 0) { + // TODO: should we send offsets/payloads to + // .pay...? seems wasteful (have to store extra + // vLong for low (< BLOCK_SIZE) DF terms = vast vast + // majority) + + // vInt encode the remaining positions/payloads/offsets: + int lastPayloadLength = -1; // force first payload length to be written + int lastOffsetLength = -1; // force first offset length to be written + int payloadBytesReadUpto = 0; + for (int i = 0; i < posBufferUpto; i++) { + final int posDelta = (int) posDeltaBuffer[i]; + if (writePayloads) { + final int payloadLength = (int) payloadLengthBuffer[i]; + if (payloadLength != lastPayloadLength) { + lastPayloadLength = payloadLength; + posOut.writeVInt((posDelta << 1) | 1); + posOut.writeVInt(payloadLength); + } else { + posOut.writeVInt(posDelta << 1); + } + + if (payloadLength != 0) { + posOut.writeBytes(payloadBytes, payloadBytesReadUpto, payloadLength); + payloadBytesReadUpto += payloadLength; + } + } else { + posOut.writeVInt(posDelta); + } + + if (writeOffsets) { + int delta = (int) offsetStartDeltaBuffer[i]; + int length = (int) offsetLengthBuffer[i]; + if (length == lastOffsetLength) { + posOut.writeVInt(delta << 1); + } else { + posOut.writeVInt(delta << 1 | 1); + posOut.writeVInt(length); + lastOffsetLength = length; + } + } + } + + if (writePayloads) { + assert payloadBytesReadUpto == payloadByteUpto; + payloadByteUpto = 0; + } + } + } else { + lastPosBlockOffset = -1; + } + + long skipOffset; + if (docCount > BLOCK_SIZE) { + skipOffset = skipWriter.writeSkip(docOut) - docStartFP; + } else { + skipOffset = -1; + } + + state.docStartFP = docStartFP; + state.posStartFP = posStartFP; + state.payStartFP = payStartFP; + state.singletonDocID = singletonDocID; + state.skipOffset = skipOffset; + state.lastPosBlockOffset = lastPosBlockOffset; + docBufferUpto = 0; + posBufferUpto = 0; + lastDocID = 0; + docCount = 0; + } + + @Override + public void encodeTerm(DataOutput out, FieldInfo fieldInfo, BlockTermState _state, boolean absolute) throws IOException { + IntBlockTermState state = (IntBlockTermState) _state; + if (absolute) { + lastState = emptyState; + assert lastState.docStartFP == 0; + } + + if (lastState.singletonDocID != -1 && state.singletonDocID != -1 && state.docStartFP == lastState.docStartFP) { + // With runs of rare values such as ID fields, the increment of pointers in the docs file is + // often 0. + // Furthermore some ID schemes like auto-increment IDs or Flake IDs are monotonic, so we + // encode the delta + // between consecutive doc IDs to save space. + final long delta = (long) state.singletonDocID - lastState.singletonDocID; + out.writeVLong((BitUtil.zigZagEncode(delta) << 1) | 0x01); + } else { + out.writeVLong((state.docStartFP - lastState.docStartFP) << 1); + if (state.singletonDocID != -1) { + out.writeVInt(state.singletonDocID); + } + } + + if (writePositions) { + out.writeVLong(state.posStartFP - lastState.posStartFP); + if (writePayloads || writeOffsets) { + out.writeVLong(state.payStartFP - lastState.payStartFP); + } + } + if (writePositions) { + if (state.lastPosBlockOffset != -1) { + out.writeVLong(state.lastPosBlockOffset); + } + } + if (state.skipOffset != -1) { + out.writeVLong(state.skipOffset); + } + lastState = state; + } + + @Override + public void close() throws IOException { + // TODO: add a finish() at least to PushBase? DV too...? + boolean success = false; + try { + if (docOut != null) { + CodecUtil.writeFooter(docOut); + } + if (posOut != null) { + CodecUtil.writeFooter(posOut); + } + if (payOut != null) { + CodecUtil.writeFooter(payOut); + } + success = true; + } finally { + if (success) { + IOUtils.close(docOut, posOut, payOut); + } else { + IOUtils.closeWhileHandlingException(docOut, posOut, payOut); + } + docOut = posOut = payOut = null; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/index/codec/postings/ES812ScoreSkipReader.java b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812ScoreSkipReader.java new file mode 100644 index 0000000000000..f76e1026945e6 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812ScoreSkipReader.java @@ -0,0 +1,157 @@ +/* + * @notice + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Modifications copyright (C) 2022 Elasticsearch B.V. + */ +package org.elasticsearch.index.codec.postings; + +import org.apache.lucene.index.Impact; +import org.apache.lucene.index.Impacts; +import org.apache.lucene.store.ByteArrayDataInput; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.util.ArrayUtil; + +import java.io.IOException; +import java.util.AbstractList; +import java.util.Arrays; +import java.util.List; +import java.util.RandomAccess; + +final class ES812ScoreSkipReader extends ES812SkipReader { + + private final byte[][] impactData; + private final int[] impactDataLength; + private final ByteArrayDataInput badi = new ByteArrayDataInput(); + private final Impacts impacts; + private int numLevels = 1; + private final MutableImpactList[] perLevelImpacts; + + ES812ScoreSkipReader(IndexInput skipStream, int maxSkipLevels, boolean hasPos, boolean hasOffsets, boolean hasPayloads) { + super(skipStream, maxSkipLevels, hasPos, hasOffsets, hasPayloads); + this.impactData = new byte[maxSkipLevels][]; + Arrays.fill(impactData, new byte[0]); + this.impactDataLength = new int[maxSkipLevels]; + this.perLevelImpacts = new MutableImpactList[maxSkipLevels]; + for (int i = 0; i < perLevelImpacts.length; ++i) { + perLevelImpacts[i] = new MutableImpactList(); + } + impacts = new Impacts() { + + @Override + public int numLevels() { + return numLevels; + } + + @Override + public int getDocIdUpTo(int level) { + return skipDoc[level]; + } + + @Override + public List getImpacts(int level) { + assert level < numLevels; + if (impactDataLength[level] > 0) { + badi.reset(impactData[level], 0, impactDataLength[level]); + perLevelImpacts[level] = readImpacts(badi, perLevelImpacts[level]); + impactDataLength[level] = 0; + } + return perLevelImpacts[level]; + } + }; + } + + @Override + public int skipTo(int target) throws IOException { + int result = super.skipTo(target); + if (numberOfSkipLevels > 0) { + numLevels = numberOfSkipLevels; + } else { + // End of postings don't have skip data anymore, so we fill with dummy data + // like SlowImpactsEnum. + numLevels = 1; + perLevelImpacts[0].length = 1; + perLevelImpacts[0].impacts[0].freq = Integer.MAX_VALUE; + perLevelImpacts[0].impacts[0].norm = 1L; + impactDataLength[0] = 0; + } + return result; + } + + Impacts getImpacts() { + return impacts; + } + + @Override + protected void readImpacts(int level, IndexInput skipStream) throws IOException { + int length = skipStream.readVInt(); + if (impactData[level].length < length) { + impactData[level] = new byte[ArrayUtil.oversize(length, Byte.BYTES)]; + } + skipStream.readBytes(impactData[level], 0, length); + impactDataLength[level] = length; + } + + static MutableImpactList readImpacts(ByteArrayDataInput in, MutableImpactList reuse) { + int maxNumImpacts = in.length(); // at most one impact per byte + if (reuse.impacts.length < maxNumImpacts) { + int oldLength = reuse.impacts.length; + reuse.impacts = ArrayUtil.grow(reuse.impacts, maxNumImpacts); + for (int i = oldLength; i < reuse.impacts.length; ++i) { + reuse.impacts[i] = new Impact(Integer.MAX_VALUE, 1L); + } + } + + int freq = 0; + long norm = 0; + int length = 0; + while (in.getPosition() < in.length()) { + int freqDelta = in.readVInt(); + if ((freqDelta & 0x01) != 0) { + freq += 1 + (freqDelta >>> 1); + try { + norm += 1 + in.readZLong(); + } catch (IOException e) { + throw new RuntimeException(e); // cannot happen on a BADI + } + } else { + freq += 1 + (freqDelta >>> 1); + norm++; + } + Impact impact = reuse.impacts[length]; + impact.freq = freq; + impact.norm = norm; + length++; + } + reuse.length = length; + return reuse; + } + + static class MutableImpactList extends AbstractList implements RandomAccess { + int length = 1; + Impact[] impacts = new Impact[] { new Impact(Integer.MAX_VALUE, 1L) }; + + @Override + public Impact get(int index) { + return impacts[index]; + } + + @Override + public int size() { + return length; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/index/codec/postings/ES812SkipReader.java b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812SkipReader.java new file mode 100644 index 0000000000000..11c0c611312fc --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812SkipReader.java @@ -0,0 +1,203 @@ +/* + * @notice + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Modifications copyright (C) 2022 Elasticsearch B.V. + */ +package org.elasticsearch.index.codec.postings; + +import org.apache.lucene.codecs.MultiLevelSkipListReader; +import org.apache.lucene.store.IndexInput; + +import java.io.IOException; +import java.util.Arrays; + +/** + * Implements the skip list reader for block postings format that stores positions and payloads. + * + *

Although this skipper uses MultiLevelSkipListReader as an interface, its definition of skip + * position will be a little different. + * + *

For example, when skipInterval = blockSize = 3, df = 2*skipInterval = 6, + * + *

+ * 0 1 2 3 4 5
+ * d d d d d d    (posting list)
+ *     ^     ^    (skip point in MultiLeveSkipWriter)
+ *       ^        (skip point in Lucene90SkipWriter)
+ * 
+ * + *

In this case, MultiLevelSkipListReader will use the last document as a skip point, while + * Lucene90SkipReader should assume no skip point will comes. + * + *

If we use the interface directly in Lucene90SkipReader, it may silly try to read another skip + * data after the only skip point is loaded. + * + *

To illustrate this, we can call skipTo(d[5]), since skip point d[3] has smaller docId, and + * numSkipped+blockSize== df, the MultiLevelSkipListReader will assume the skip list isn't exhausted + * yet, and try to load a non-existed skip point + * + *

Therefore, we'll trim df before passing it to the interface. see trim(int) + */ +class ES812SkipReader extends MultiLevelSkipListReader { + private long[] docPointer; + private long[] posPointer; + private long[] payPointer; + private int[] posBufferUpto; + private int[] payloadByteUpto; + + private long lastPosPointer; + private long lastPayPointer; + private int lastPayloadByteUpto; + private long lastDocPointer; + private int lastPosBufferUpto; + + ES812SkipReader(IndexInput skipStream, int maxSkipLevels, boolean hasPos, boolean hasOffsets, boolean hasPayloads) { + super(skipStream, maxSkipLevels, ForUtil.BLOCK_SIZE, 8); + docPointer = new long[maxSkipLevels]; + if (hasPos) { + posPointer = new long[maxSkipLevels]; + posBufferUpto = new int[maxSkipLevels]; + if (hasPayloads) { + payloadByteUpto = new int[maxSkipLevels]; + } else { + payloadByteUpto = null; + } + if (hasOffsets || hasPayloads) { + payPointer = new long[maxSkipLevels]; + } else { + payPointer = null; + } + } else { + posPointer = null; + } + } + + /** + * Trim original docFreq to tell skipReader read proper number of skip points. + * + *

Since our definition in Lucene90Skip* is a little different from MultiLevelSkip* This + * trimmed docFreq will prevent skipReader from: 1. silly reading a non-existed skip point after + * the last block boundary 2. moving into the vInt block + */ + protected int trim(int df) { + return df % ForUtil.BLOCK_SIZE == 0 ? df - 1 : df; + } + + public void init(long skipPointer, long docBasePointer, long posBasePointer, long payBasePointer, int df) throws IOException { + super.init(skipPointer, trim(df)); + lastDocPointer = docBasePointer; + lastPosPointer = posBasePointer; + lastPayPointer = payBasePointer; + + Arrays.fill(docPointer, docBasePointer); + if (posPointer != null) { + Arrays.fill(posPointer, posBasePointer); + if (payPointer != null) { + Arrays.fill(payPointer, payBasePointer); + } + } else { + assert posBasePointer == 0; + } + } + + /** + * Returns the doc pointer of the doc to which the last call of {@link + * MultiLevelSkipListReader#skipTo(int)} has skipped. + */ + public long getDocPointer() { + return lastDocPointer; + } + + public long getPosPointer() { + return lastPosPointer; + } + + public int getPosBufferUpto() { + return lastPosBufferUpto; + } + + public long getPayPointer() { + return lastPayPointer; + } + + public int getPayloadByteUpto() { + return lastPayloadByteUpto; + } + + public int getNextSkipDoc() { + return skipDoc[0]; + } + + @Override + protected void seekChild(int level) throws IOException { + super.seekChild(level); + docPointer[level] = lastDocPointer; + if (posPointer != null) { + posPointer[level] = lastPosPointer; + posBufferUpto[level] = lastPosBufferUpto; + if (payloadByteUpto != null) { + payloadByteUpto[level] = lastPayloadByteUpto; + } + if (payPointer != null) { + payPointer[level] = lastPayPointer; + } + } + } + + @Override + protected void setLastSkipData(int level) { + super.setLastSkipData(level); + lastDocPointer = docPointer[level]; + + if (posPointer != null) { + lastPosPointer = posPointer[level]; + lastPosBufferUpto = posBufferUpto[level]; + if (payPointer != null) { + lastPayPointer = payPointer[level]; + } + if (payloadByteUpto != null) { + lastPayloadByteUpto = payloadByteUpto[level]; + } + } + } + + @Override + protected int readSkipData(int level, IndexInput skipStream) throws IOException { + int delta = skipStream.readVInt(); + docPointer[level] += skipStream.readVLong(); + + if (posPointer != null) { + posPointer[level] += skipStream.readVLong(); + posBufferUpto[level] = skipStream.readVInt(); + + if (payloadByteUpto != null) { + payloadByteUpto[level] = skipStream.readVInt(); + } + + if (payPointer != null) { + payPointer[level] += skipStream.readVLong(); + } + } + readImpacts(level, skipStream); + return delta; + } + + // The default impl skips impacts + protected void readImpacts(int level, IndexInput skipStream) throws IOException { + skipStream.skipBytes(skipStream.readVInt()); + } +} diff --git a/server/src/main/java/org/elasticsearch/index/codec/postings/ES812SkipWriter.java b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812SkipWriter.java new file mode 100644 index 0000000000000..dbfb7c86a1475 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/codec/postings/ES812SkipWriter.java @@ -0,0 +1,229 @@ +/* + * @notice + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Modifications copyright (C) 2022 Elasticsearch B.V. + */ + +package org.elasticsearch.index.codec.postings; + +import org.apache.lucene.codecs.CompetitiveImpactAccumulator; +import org.apache.lucene.codecs.MultiLevelSkipListWriter; +import org.apache.lucene.index.Impact; +import org.apache.lucene.store.ByteBuffersDataOutput; +import org.apache.lucene.store.DataOutput; +import org.apache.lucene.store.IndexOutput; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; + +/** + * Write skip lists with multiple levels, and support skip within block ints. + * + *

Assume that docFreq = 28, skipInterval = blockSize = 12 + * + *

+ *  |       block#0       | |      block#1        | |vInts|
+ *  d d d d d d d d d d d d d d d d d d d d d d d d d d d d (posting list)
+ *                          ^                       ^       (level 0 skip point)
+ * 
+ * + *

Note that skipWriter will ignore first document in block#0, since it is useless as a skip + * point. Also, we'll never skip into the vInts block, only record skip data at the start its start + * point(if it exist). + * + *

For each skip point, we will record: 1. docID in former position, i.e. for position 12, record + * docID[11], etc. 2. its related file points(position, payload), 3. related numbers or + * uptos(position, payload). 4. start offset. + */ +final class ES812SkipWriter extends MultiLevelSkipListWriter { + private int[] lastSkipDoc; + private long[] lastSkipDocPointer; + private long[] lastSkipPosPointer; + private long[] lastSkipPayPointer; + + private final IndexOutput docOut; + private final IndexOutput posOut; + private final IndexOutput payOut; + + private int curDoc; + private long curDocPointer; + private long curPosPointer; + private long curPayPointer; + private int curPosBufferUpto; + private int curPayloadByteUpto; + private CompetitiveImpactAccumulator[] curCompetitiveFreqNorms; + private boolean fieldHasPositions; + private boolean fieldHasOffsets; + private boolean fieldHasPayloads; + + ES812SkipWriter(int maxSkipLevels, int blockSize, int docCount, IndexOutput docOut, IndexOutput posOut, IndexOutput payOut) { + super(blockSize, 8, maxSkipLevels, docCount); + this.docOut = docOut; + this.posOut = posOut; + this.payOut = payOut; + + lastSkipDoc = new int[maxSkipLevels]; + lastSkipDocPointer = new long[maxSkipLevels]; + if (posOut != null) { + lastSkipPosPointer = new long[maxSkipLevels]; + if (payOut != null) { + lastSkipPayPointer = new long[maxSkipLevels]; + } + } + curCompetitiveFreqNorms = new CompetitiveImpactAccumulator[maxSkipLevels]; + for (int i = 0; i < maxSkipLevels; ++i) { + curCompetitiveFreqNorms[i] = new CompetitiveImpactAccumulator(); + } + } + + void setField(boolean fieldHasPositions, boolean fieldHasOffsets, boolean fieldHasPayloads) { + this.fieldHasPositions = fieldHasPositions; + this.fieldHasOffsets = fieldHasOffsets; + this.fieldHasPayloads = fieldHasPayloads; + } + + // tricky: we only skip data for blocks (terms with more than 128 docs), but re-init'ing the + // skipper + // is pretty slow for rare terms in large segments as we have to fill O(log #docs in segment) of + // junk. + // this is the vast majority of terms (worst case: ID field or similar). so in resetSkip() we + // save + // away the previous pointers, and lazy-init only if we need to buffer skip data for the term. + private boolean initialized; + long lastDocFP; + long lastPosFP; + long lastPayFP; + + @Override + public void resetSkip() { + lastDocFP = docOut.getFilePointer(); + if (fieldHasPositions) { + lastPosFP = posOut.getFilePointer(); + if (fieldHasOffsets || fieldHasPayloads) { + lastPayFP = payOut.getFilePointer(); + } + } + if (initialized) { + for (CompetitiveImpactAccumulator acc : curCompetitiveFreqNorms) { + acc.clear(); + } + } + initialized = false; + } + + private void initSkip() { + if (initialized == false) { + super.resetSkip(); + Arrays.fill(lastSkipDoc, 0); + Arrays.fill(lastSkipDocPointer, lastDocFP); + if (fieldHasPositions) { + Arrays.fill(lastSkipPosPointer, lastPosFP); + if (fieldHasOffsets || fieldHasPayloads) { + Arrays.fill(lastSkipPayPointer, lastPayFP); + } + } + // sets of competitive freq,norm pairs should be empty at this point + assert Arrays.stream(curCompetitiveFreqNorms) + .map(CompetitiveImpactAccumulator::getCompetitiveFreqNormPairs) + .mapToInt(Collection::size) + .sum() == 0; + initialized = true; + } + } + + /** Sets the values for the current skip data. */ + public void bufferSkip( + int doc, + CompetitiveImpactAccumulator competitiveFreqNorms, + int numDocs, + long posFP, + long payFP, + int posBufferUpto, + int payloadByteUpto + ) throws IOException { + initSkip(); + this.curDoc = doc; + this.curDocPointer = docOut.getFilePointer(); + this.curPosPointer = posFP; + this.curPayPointer = payFP; + this.curPosBufferUpto = posBufferUpto; + this.curPayloadByteUpto = payloadByteUpto; + this.curCompetitiveFreqNorms[0].addAll(competitiveFreqNorms); + bufferSkip(numDocs); + } + + private final ByteBuffersDataOutput freqNormOut = ByteBuffersDataOutput.newResettableInstance(); + + @Override + protected void writeSkipData(int level, DataOutput skipBuffer) throws IOException { + + int delta = curDoc - lastSkipDoc[level]; + + skipBuffer.writeVInt(delta); + lastSkipDoc[level] = curDoc; + + skipBuffer.writeVLong(curDocPointer - lastSkipDocPointer[level]); + lastSkipDocPointer[level] = curDocPointer; + + if (fieldHasPositions) { + + skipBuffer.writeVLong(curPosPointer - lastSkipPosPointer[level]); + lastSkipPosPointer[level] = curPosPointer; + skipBuffer.writeVInt(curPosBufferUpto); + + if (fieldHasPayloads) { + skipBuffer.writeVInt(curPayloadByteUpto); + } + + if (fieldHasOffsets || fieldHasPayloads) { + skipBuffer.writeVLong(curPayPointer - lastSkipPayPointer[level]); + lastSkipPayPointer[level] = curPayPointer; + } + } + + CompetitiveImpactAccumulator competitiveFreqNorms = curCompetitiveFreqNorms[level]; + assert competitiveFreqNorms.getCompetitiveFreqNormPairs().size() > 0; + if (level + 1 < numberOfSkipLevels) { + curCompetitiveFreqNorms[level + 1].addAll(competitiveFreqNorms); + } + writeImpacts(competitiveFreqNorms, freqNormOut); + skipBuffer.writeVInt(Math.toIntExact(freqNormOut.size())); + freqNormOut.copyTo(skipBuffer); + freqNormOut.reset(); + competitiveFreqNorms.clear(); + } + + static void writeImpacts(CompetitiveImpactAccumulator acc, DataOutput out) throws IOException { + Collection impacts = acc.getCompetitiveFreqNormPairs(); + Impact previous = new Impact(0, 0); + for (Impact impact : impacts) { + assert impact.freq > previous.freq; + assert Long.compareUnsigned(impact.norm, previous.norm) > 0; + int freqDelta = impact.freq - previous.freq - 1; + long normDelta = impact.norm - previous.norm - 1; + if (normDelta == 0) { + // most of time, norm only increases by 1, so we can fold everything in a single byte + out.writeVInt(freqDelta << 1); + } else { + out.writeVInt((freqDelta << 1) | 1); + out.writeZLong(normDelta); + } + previous = impact; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/index/codec/postings/ForUtil.java b/server/src/main/java/org/elasticsearch/index/codec/postings/ForUtil.java new file mode 100644 index 0000000000000..d874caab1b8c0 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/codec/postings/ForUtil.java @@ -0,0 +1,1049 @@ +/* + * @notice + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Modifications copyright (C) 2022 Elasticsearch B.V. + */ +package org.elasticsearch.index.codec.postings; + +import org.apache.lucene.store.DataInput; +import org.apache.lucene.store.DataOutput; + +import java.io.IOException; + +// Inspired from https://fulmicoton.com/posts/bitpacking/ +// Encodes multiple integers in a long to get SIMD-like speedups. +// If bitsPerValue <= 8 then we pack 8 ints per long +// else if bitsPerValue <= 16 we pack 4 ints per long +// else we pack 2 ints per long +final class ForUtil { + + static final int BLOCK_SIZE = 128; + private static final int BLOCK_SIZE_LOG2 = 7; + + private static long expandMask32(long mask32) { + return mask32 | (mask32 << 32); + } + + private static long expandMask16(long mask16) { + return expandMask32(mask16 | (mask16 << 16)); + } + + private static long expandMask8(long mask8) { + return expandMask16(mask8 | (mask8 << 8)); + } + + private static long mask32(int bitsPerValue) { + return expandMask32((1L << bitsPerValue) - 1); + } + + private static long mask16(int bitsPerValue) { + return expandMask16((1L << bitsPerValue) - 1); + } + + private static long mask8(int bitsPerValue) { + return expandMask8((1L << bitsPerValue) - 1); + } + + private static void expand8(long[] arr) { + for (int i = 0; i < 16; ++i) { + long l = arr[i]; + arr[i] = (l >>> 56) & 0xFFL; + arr[16 + i] = (l >>> 48) & 0xFFL; + arr[32 + i] = (l >>> 40) & 0xFFL; + arr[48 + i] = (l >>> 32) & 0xFFL; + arr[64 + i] = (l >>> 24) & 0xFFL; + arr[80 + i] = (l >>> 16) & 0xFFL; + arr[96 + i] = (l >>> 8) & 0xFFL; + arr[112 + i] = l & 0xFFL; + } + } + + private static void expand8To32(long[] arr) { + for (int i = 0; i < 16; ++i) { + long l = arr[i]; + arr[i] = (l >>> 24) & 0x000000FF000000FFL; + arr[16 + i] = (l >>> 16) & 0x000000FF000000FFL; + arr[32 + i] = (l >>> 8) & 0x000000FF000000FFL; + arr[48 + i] = l & 0x000000FF000000FFL; + } + } + + private static void collapse8(long[] arr) { + for (int i = 0; i < 16; ++i) { + arr[i] = (arr[i] << 56) | (arr[16 + i] << 48) | (arr[32 + i] << 40) | (arr[48 + i] << 32) | (arr[64 + i] << 24) | (arr[80 + i] + << 16) | (arr[96 + i] << 8) | arr[112 + i]; + } + } + + private static void expand16(long[] arr) { + for (int i = 0; i < 32; ++i) { + long l = arr[i]; + arr[i] = (l >>> 48) & 0xFFFFL; + arr[32 + i] = (l >>> 32) & 0xFFFFL; + arr[64 + i] = (l >>> 16) & 0xFFFFL; + arr[96 + i] = l & 0xFFFFL; + } + } + + private static void expand16To32(long[] arr) { + for (int i = 0; i < 32; ++i) { + long l = arr[i]; + arr[i] = (l >>> 16) & 0x0000FFFF0000FFFFL; + arr[32 + i] = l & 0x0000FFFF0000FFFFL; + } + } + + private static void collapse16(long[] arr) { + for (int i = 0; i < 32; ++i) { + arr[i] = (arr[i] << 48) | (arr[32 + i] << 32) | (arr[64 + i] << 16) | arr[96 + i]; + } + } + + private static void expand32(long[] arr) { + for (int i = 0; i < 64; ++i) { + long l = arr[i]; + arr[i] = l >>> 32; + arr[64 + i] = l & 0xFFFFFFFFL; + } + } + + private static void collapse32(long[] arr) { + for (int i = 0; i < 64; ++i) { + arr[i] = (arr[i] << 32) | arr[64 + i]; + } + } + + private final long[] tmp = new long[BLOCK_SIZE / 2]; + + /** Encode 128 integers from {@code longs} into {@code out}. */ + void encode(long[] longs, int bitsPerValue, DataOutput out) throws IOException { + final int nextPrimitive; + final int numLongs; + if (bitsPerValue <= 8) { + nextPrimitive = 8; + numLongs = BLOCK_SIZE / 8; + collapse8(longs); + } else if (bitsPerValue <= 16) { + nextPrimitive = 16; + numLongs = BLOCK_SIZE / 4; + collapse16(longs); + } else { + nextPrimitive = 32; + numLongs = BLOCK_SIZE / 2; + collapse32(longs); + } + + final int numLongsPerShift = bitsPerValue * 2; + int idx = 0; + int shift = nextPrimitive - bitsPerValue; + for (int i = 0; i < numLongsPerShift; ++i) { + tmp[i] = longs[idx++] << shift; + } + for (shift = shift - bitsPerValue; shift >= 0; shift -= bitsPerValue) { + for (int i = 0; i < numLongsPerShift; ++i) { + tmp[i] |= longs[idx++] << shift; + } + } + + final int remainingBitsPerLong = shift + bitsPerValue; + final long maskRemainingBitsPerLong; + if (nextPrimitive == 8) { + maskRemainingBitsPerLong = MASKS8[remainingBitsPerLong]; + } else if (nextPrimitive == 16) { + maskRemainingBitsPerLong = MASKS16[remainingBitsPerLong]; + } else { + maskRemainingBitsPerLong = MASKS32[remainingBitsPerLong]; + } + + int tmpIdx = 0; + int remainingBitsPerValue = bitsPerValue; + while (idx < numLongs) { + if (remainingBitsPerValue >= remainingBitsPerLong) { + remainingBitsPerValue -= remainingBitsPerLong; + tmp[tmpIdx++] |= (longs[idx] >>> remainingBitsPerValue) & maskRemainingBitsPerLong; + if (remainingBitsPerValue == 0) { + idx++; + remainingBitsPerValue = bitsPerValue; + } + } else { + final long mask1, mask2; + if (nextPrimitive == 8) { + mask1 = MASKS8[remainingBitsPerValue]; + mask2 = MASKS8[remainingBitsPerLong - remainingBitsPerValue]; + } else if (nextPrimitive == 16) { + mask1 = MASKS16[remainingBitsPerValue]; + mask2 = MASKS16[remainingBitsPerLong - remainingBitsPerValue]; + } else { + mask1 = MASKS32[remainingBitsPerValue]; + mask2 = MASKS32[remainingBitsPerLong - remainingBitsPerValue]; + } + tmp[tmpIdx] |= (longs[idx++] & mask1) << (remainingBitsPerLong - remainingBitsPerValue); + remainingBitsPerValue = bitsPerValue - remainingBitsPerLong + remainingBitsPerValue; + tmp[tmpIdx++] |= (longs[idx] >>> remainingBitsPerValue) & mask2; + } + } + + for (int i = 0; i < numLongsPerShift; ++i) { + out.writeLong(tmp[i]); + } + } + + /** Number of bytes required to encode 128 integers of {@code bitsPerValue} bits per value. */ + int numBytes(int bitsPerValue) { + return bitsPerValue << (BLOCK_SIZE_LOG2 - 3); + } + + private static void decodeSlow(int bitsPerValue, DataInput in, long[] tmp, long[] longs) throws IOException { + final int numLongs = bitsPerValue << 1; + in.readLongs(tmp, 0, numLongs); + final long mask = MASKS32[bitsPerValue]; + int longsIdx = 0; + int shift = 32 - bitsPerValue; + for (; shift >= 0; shift -= bitsPerValue) { + shiftLongs(tmp, numLongs, longs, longsIdx, shift, mask); + longsIdx += numLongs; + } + final int remainingBitsPerLong = shift + bitsPerValue; + final long mask32RemainingBitsPerLong = MASKS32[remainingBitsPerLong]; + int tmpIdx = 0; + int remainingBits = remainingBitsPerLong; + for (; longsIdx < BLOCK_SIZE / 2; ++longsIdx) { + int b = bitsPerValue - remainingBits; + long l = (tmp[tmpIdx++] & MASKS32[remainingBits]) << b; + while (b >= remainingBitsPerLong) { + b -= remainingBitsPerLong; + l |= (tmp[tmpIdx++] & mask32RemainingBitsPerLong) << b; + } + if (b > 0) { + l |= (tmp[tmpIdx] >>> (remainingBitsPerLong - b)) & MASKS32[b]; + remainingBits = remainingBitsPerLong - b; + } else { + remainingBits = remainingBitsPerLong; + } + longs[longsIdx] = l; + } + } + + /** + * The pattern that this shiftLongs method applies is recognized by the C2 compiler, which + * generates SIMD instructions for it in order to shift multiple longs at once. + */ + private static void shiftLongs(long[] a, int count, long[] b, int bi, int shift, long mask) { + for (int i = 0; i < count; ++i) { + b[bi + i] = (a[i] >>> shift) & mask; + } + } + + private static final long[] MASKS8 = new long[8]; + private static final long[] MASKS16 = new long[16]; + private static final long[] MASKS32 = new long[32]; + + static { + for (int i = 0; i < 8; ++i) { + MASKS8[i] = mask8(i); + } + for (int i = 0; i < 16; ++i) { + MASKS16[i] = mask16(i); + } + for (int i = 0; i < 32; ++i) { + MASKS32[i] = mask32(i); + } + } + + // mark values in array as final longs to avoid the cost of reading array, arrays should only be + // used when the idx is a variable + private static final long MASK8_1 = MASKS8[1]; + private static final long MASK8_2 = MASKS8[2]; + private static final long MASK8_3 = MASKS8[3]; + private static final long MASK8_4 = MASKS8[4]; + private static final long MASK8_5 = MASKS8[5]; + private static final long MASK8_6 = MASKS8[6]; + private static final long MASK8_7 = MASKS8[7]; + private static final long MASK16_1 = MASKS16[1]; + private static final long MASK16_2 = MASKS16[2]; + private static final long MASK16_3 = MASKS16[3]; + private static final long MASK16_4 = MASKS16[4]; + private static final long MASK16_5 = MASKS16[5]; + private static final long MASK16_6 = MASKS16[6]; + private static final long MASK16_7 = MASKS16[7]; + private static final long MASK16_9 = MASKS16[9]; + private static final long MASK16_10 = MASKS16[10]; + private static final long MASK16_11 = MASKS16[11]; + private static final long MASK16_12 = MASKS16[12]; + private static final long MASK16_13 = MASKS16[13]; + private static final long MASK16_14 = MASKS16[14]; + private static final long MASK16_15 = MASKS16[15]; + private static final long MASK32_1 = MASKS32[1]; + private static final long MASK32_2 = MASKS32[2]; + private static final long MASK32_3 = MASKS32[3]; + private static final long MASK32_4 = MASKS32[4]; + private static final long MASK32_5 = MASKS32[5]; + private static final long MASK32_6 = MASKS32[6]; + private static final long MASK32_7 = MASKS32[7]; + private static final long MASK32_8 = MASKS32[8]; + private static final long MASK32_9 = MASKS32[9]; + private static final long MASK32_10 = MASKS32[10]; + private static final long MASK32_11 = MASKS32[11]; + private static final long MASK32_12 = MASKS32[12]; + private static final long MASK32_13 = MASKS32[13]; + private static final long MASK32_14 = MASKS32[14]; + private static final long MASK32_15 = MASKS32[15]; + private static final long MASK32_17 = MASKS32[17]; + private static final long MASK32_18 = MASKS32[18]; + private static final long MASK32_19 = MASKS32[19]; + private static final long MASK32_20 = MASKS32[20]; + private static final long MASK32_21 = MASKS32[21]; + private static final long MASK32_22 = MASKS32[22]; + private static final long MASK32_23 = MASKS32[23]; + private static final long MASK32_24 = MASKS32[24]; + + /** Decode 128 integers into {@code longs}. */ + void decode(int bitsPerValue, DataInput in, long[] longs) throws IOException { + switch (bitsPerValue) { + case 1: + decode1(in, tmp, longs); + expand8(longs); + break; + case 2: + decode2(in, tmp, longs); + expand8(longs); + break; + case 3: + decode3(in, tmp, longs); + expand8(longs); + break; + case 4: + decode4(in, tmp, longs); + expand8(longs); + break; + case 5: + decode5(in, tmp, longs); + expand8(longs); + break; + case 6: + decode6(in, tmp, longs); + expand8(longs); + break; + case 7: + decode7(in, tmp, longs); + expand8(longs); + break; + case 8: + decode8(in, tmp, longs); + expand8(longs); + break; + case 9: + decode9(in, tmp, longs); + expand16(longs); + break; + case 10: + decode10(in, tmp, longs); + expand16(longs); + break; + case 11: + decode11(in, tmp, longs); + expand16(longs); + break; + case 12: + decode12(in, tmp, longs); + expand16(longs); + break; + case 13: + decode13(in, tmp, longs); + expand16(longs); + break; + case 14: + decode14(in, tmp, longs); + expand16(longs); + break; + case 15: + decode15(in, tmp, longs); + expand16(longs); + break; + case 16: + decode16(in, tmp, longs); + expand16(longs); + break; + case 17: + decode17(in, tmp, longs); + expand32(longs); + break; + case 18: + decode18(in, tmp, longs); + expand32(longs); + break; + case 19: + decode19(in, tmp, longs); + expand32(longs); + break; + case 20: + decode20(in, tmp, longs); + expand32(longs); + break; + case 21: + decode21(in, tmp, longs); + expand32(longs); + break; + case 22: + decode22(in, tmp, longs); + expand32(longs); + break; + case 23: + decode23(in, tmp, longs); + expand32(longs); + break; + case 24: + decode24(in, tmp, longs); + expand32(longs); + break; + default: + decodeSlow(bitsPerValue, in, tmp, longs); + expand32(longs); + break; + } + } + + /** + * Decodes 128 integers into 64 {@code longs} such that each long contains two values, each + * represented with 32 bits. Values [0..63] are encoded in the high-order bits of {@code longs} + * [0..63], and values [64..127] are encoded in the low-order bits of {@code longs} [0..63]. This + * representation may allow subsequent operations to be performed on two values at a time. + */ + void decodeTo32(int bitsPerValue, DataInput in, long[] longs) throws IOException { + switch (bitsPerValue) { + case 1: + decode1(in, tmp, longs); + expand8To32(longs); + break; + case 2: + decode2(in, tmp, longs); + expand8To32(longs); + break; + case 3: + decode3(in, tmp, longs); + expand8To32(longs); + break; + case 4: + decode4(in, tmp, longs); + expand8To32(longs); + break; + case 5: + decode5(in, tmp, longs); + expand8To32(longs); + break; + case 6: + decode6(in, tmp, longs); + expand8To32(longs); + break; + case 7: + decode7(in, tmp, longs); + expand8To32(longs); + break; + case 8: + decode8(in, tmp, longs); + expand8To32(longs); + break; + case 9: + decode9(in, tmp, longs); + expand16To32(longs); + break; + case 10: + decode10(in, tmp, longs); + expand16To32(longs); + break; + case 11: + decode11(in, tmp, longs); + expand16To32(longs); + break; + case 12: + decode12(in, tmp, longs); + expand16To32(longs); + break; + case 13: + decode13(in, tmp, longs); + expand16To32(longs); + break; + case 14: + decode14(in, tmp, longs); + expand16To32(longs); + break; + case 15: + decode15(in, tmp, longs); + expand16To32(longs); + break; + case 16: + decode16(in, tmp, longs); + expand16To32(longs); + break; + case 17: + decode17(in, tmp, longs); + break; + case 18: + decode18(in, tmp, longs); + break; + case 19: + decode19(in, tmp, longs); + break; + case 20: + decode20(in, tmp, longs); + break; + case 21: + decode21(in, tmp, longs); + break; + case 22: + decode22(in, tmp, longs); + break; + case 23: + decode23(in, tmp, longs); + break; + case 24: + decode24(in, tmp, longs); + break; + default: + decodeSlow(bitsPerValue, in, tmp, longs); + break; + } + } + + private static void decode1(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 2); + shiftLongs(tmp, 2, longs, 0, 7, MASK8_1); + shiftLongs(tmp, 2, longs, 2, 6, MASK8_1); + shiftLongs(tmp, 2, longs, 4, 5, MASK8_1); + shiftLongs(tmp, 2, longs, 6, 4, MASK8_1); + shiftLongs(tmp, 2, longs, 8, 3, MASK8_1); + shiftLongs(tmp, 2, longs, 10, 2, MASK8_1); + shiftLongs(tmp, 2, longs, 12, 1, MASK8_1); + shiftLongs(tmp, 2, longs, 14, 0, MASK8_1); + } + + private static void decode2(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 4); + shiftLongs(tmp, 4, longs, 0, 6, MASK8_2); + shiftLongs(tmp, 4, longs, 4, 4, MASK8_2); + shiftLongs(tmp, 4, longs, 8, 2, MASK8_2); + shiftLongs(tmp, 4, longs, 12, 0, MASK8_2); + } + + private static void decode3(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 6); + shiftLongs(tmp, 6, longs, 0, 5, MASK8_3); + shiftLongs(tmp, 6, longs, 6, 2, MASK8_3); + for (int iter = 0, tmpIdx = 0, longsIdx = 12; iter < 2; ++iter, tmpIdx += 3, longsIdx += 2) { + long l0 = (tmp[tmpIdx + 0] & MASK8_2) << 1; + l0 |= (tmp[tmpIdx + 1] >>> 1) & MASK8_1; + longs[longsIdx + 0] = l0; + long l1 = (tmp[tmpIdx + 1] & MASK8_1) << 2; + l1 |= (tmp[tmpIdx + 2] & MASK8_2) << 0; + longs[longsIdx + 1] = l1; + } + } + + private static void decode4(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 8); + shiftLongs(tmp, 8, longs, 0, 4, MASK8_4); + shiftLongs(tmp, 8, longs, 8, 0, MASK8_4); + } + + private static void decode5(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 10); + shiftLongs(tmp, 10, longs, 0, 3, MASK8_5); + for (int iter = 0, tmpIdx = 0, longsIdx = 10; iter < 2; ++iter, tmpIdx += 5, longsIdx += 3) { + long l0 = (tmp[tmpIdx + 0] & MASK8_3) << 2; + l0 |= (tmp[tmpIdx + 1] >>> 1) & MASK8_2; + longs[longsIdx + 0] = l0; + long l1 = (tmp[tmpIdx + 1] & MASK8_1) << 4; + l1 |= (tmp[tmpIdx + 2] & MASK8_3) << 1; + l1 |= (tmp[tmpIdx + 3] >>> 2) & MASK8_1; + longs[longsIdx + 1] = l1; + long l2 = (tmp[tmpIdx + 3] & MASK8_2) << 3; + l2 |= (tmp[tmpIdx + 4] & MASK8_3) << 0; + longs[longsIdx + 2] = l2; + } + } + + private static void decode6(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 12); + shiftLongs(tmp, 12, longs, 0, 2, MASK8_6); + shiftLongs(tmp, 12, tmp, 0, 0, MASK8_2); + for (int iter = 0, tmpIdx = 0, longsIdx = 12; iter < 4; ++iter, tmpIdx += 3, longsIdx += 1) { + long l0 = tmp[tmpIdx + 0] << 4; + l0 |= tmp[tmpIdx + 1] << 2; + l0 |= tmp[tmpIdx + 2] << 0; + longs[longsIdx + 0] = l0; + } + } + + private static void decode7(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 14); + shiftLongs(tmp, 14, longs, 0, 1, MASK8_7); + shiftLongs(tmp, 14, tmp, 0, 0, MASK8_1); + for (int iter = 0, tmpIdx = 0, longsIdx = 14; iter < 2; ++iter, tmpIdx += 7, longsIdx += 1) { + long l0 = tmp[tmpIdx + 0] << 6; + l0 |= tmp[tmpIdx + 1] << 5; + l0 |= tmp[tmpIdx + 2] << 4; + l0 |= tmp[tmpIdx + 3] << 3; + l0 |= tmp[tmpIdx + 4] << 2; + l0 |= tmp[tmpIdx + 5] << 1; + l0 |= tmp[tmpIdx + 6] << 0; + longs[longsIdx + 0] = l0; + } + } + + private static void decode8(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(longs, 0, 16); + } + + private static void decode9(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 18); + shiftLongs(tmp, 18, longs, 0, 7, MASK16_9); + for (int iter = 0, tmpIdx = 0, longsIdx = 18; iter < 2; ++iter, tmpIdx += 9, longsIdx += 7) { + long l0 = (tmp[tmpIdx + 0] & MASK16_7) << 2; + l0 |= (tmp[tmpIdx + 1] >>> 5) & MASK16_2; + longs[longsIdx + 0] = l0; + long l1 = (tmp[tmpIdx + 1] & MASK16_5) << 4; + l1 |= (tmp[tmpIdx + 2] >>> 3) & MASK16_4; + longs[longsIdx + 1] = l1; + long l2 = (tmp[tmpIdx + 2] & MASK16_3) << 6; + l2 |= (tmp[tmpIdx + 3] >>> 1) & MASK16_6; + longs[longsIdx + 2] = l2; + long l3 = (tmp[tmpIdx + 3] & MASK16_1) << 8; + l3 |= (tmp[tmpIdx + 4] & MASK16_7) << 1; + l3 |= (tmp[tmpIdx + 5] >>> 6) & MASK16_1; + longs[longsIdx + 3] = l3; + long l4 = (tmp[tmpIdx + 5] & MASK16_6) << 3; + l4 |= (tmp[tmpIdx + 6] >>> 4) & MASK16_3; + longs[longsIdx + 4] = l4; + long l5 = (tmp[tmpIdx + 6] & MASK16_4) << 5; + l5 |= (tmp[tmpIdx + 7] >>> 2) & MASK16_5; + longs[longsIdx + 5] = l5; + long l6 = (tmp[tmpIdx + 7] & MASK16_2) << 7; + l6 |= (tmp[tmpIdx + 8] & MASK16_7) << 0; + longs[longsIdx + 6] = l6; + } + } + + private static void decode10(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 20); + shiftLongs(tmp, 20, longs, 0, 6, MASK16_10); + for (int iter = 0, tmpIdx = 0, longsIdx = 20; iter < 4; ++iter, tmpIdx += 5, longsIdx += 3) { + long l0 = (tmp[tmpIdx + 0] & MASK16_6) << 4; + l0 |= (tmp[tmpIdx + 1] >>> 2) & MASK16_4; + longs[longsIdx + 0] = l0; + long l1 = (tmp[tmpIdx + 1] & MASK16_2) << 8; + l1 |= (tmp[tmpIdx + 2] & MASK16_6) << 2; + l1 |= (tmp[tmpIdx + 3] >>> 4) & MASK16_2; + longs[longsIdx + 1] = l1; + long l2 = (tmp[tmpIdx + 3] & MASK16_4) << 6; + l2 |= (tmp[tmpIdx + 4] & MASK16_6) << 0; + longs[longsIdx + 2] = l2; + } + } + + private static void decode11(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 22); + shiftLongs(tmp, 22, longs, 0, 5, MASK16_11); + for (int iter = 0, tmpIdx = 0, longsIdx = 22; iter < 2; ++iter, tmpIdx += 11, longsIdx += 5) { + long l0 = (tmp[tmpIdx + 0] & MASK16_5) << 6; + l0 |= (tmp[tmpIdx + 1] & MASK16_5) << 1; + l0 |= (tmp[tmpIdx + 2] >>> 4) & MASK16_1; + longs[longsIdx + 0] = l0; + long l1 = (tmp[tmpIdx + 2] & MASK16_4) << 7; + l1 |= (tmp[tmpIdx + 3] & MASK16_5) << 2; + l1 |= (tmp[tmpIdx + 4] >>> 3) & MASK16_2; + longs[longsIdx + 1] = l1; + long l2 = (tmp[tmpIdx + 4] & MASK16_3) << 8; + l2 |= (tmp[tmpIdx + 5] & MASK16_5) << 3; + l2 |= (tmp[tmpIdx + 6] >>> 2) & MASK16_3; + longs[longsIdx + 2] = l2; + long l3 = (tmp[tmpIdx + 6] & MASK16_2) << 9; + l3 |= (tmp[tmpIdx + 7] & MASK16_5) << 4; + l3 |= (tmp[tmpIdx + 8] >>> 1) & MASK16_4; + longs[longsIdx + 3] = l3; + long l4 = (tmp[tmpIdx + 8] & MASK16_1) << 10; + l4 |= (tmp[tmpIdx + 9] & MASK16_5) << 5; + l4 |= (tmp[tmpIdx + 10] & MASK16_5) << 0; + longs[longsIdx + 4] = l4; + } + } + + private static void decode12(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 24); + shiftLongs(tmp, 24, longs, 0, 4, MASK16_12); + shiftLongs(tmp, 24, tmp, 0, 0, MASK16_4); + for (int iter = 0, tmpIdx = 0, longsIdx = 24; iter < 8; ++iter, tmpIdx += 3, longsIdx += 1) { + long l0 = tmp[tmpIdx + 0] << 8; + l0 |= tmp[tmpIdx + 1] << 4; + l0 |= tmp[tmpIdx + 2] << 0; + longs[longsIdx + 0] = l0; + } + } + + private static void decode13(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 26); + shiftLongs(tmp, 26, longs, 0, 3, MASK16_13); + for (int iter = 0, tmpIdx = 0, longsIdx = 26; iter < 2; ++iter, tmpIdx += 13, longsIdx += 3) { + long l0 = (tmp[tmpIdx + 0] & MASK16_3) << 10; + l0 |= (tmp[tmpIdx + 1] & MASK16_3) << 7; + l0 |= (tmp[tmpIdx + 2] & MASK16_3) << 4; + l0 |= (tmp[tmpIdx + 3] & MASK16_3) << 1; + l0 |= (tmp[tmpIdx + 4] >>> 2) & MASK16_1; + longs[longsIdx + 0] = l0; + long l1 = (tmp[tmpIdx + 4] & MASK16_2) << 11; + l1 |= (tmp[tmpIdx + 5] & MASK16_3) << 8; + l1 |= (tmp[tmpIdx + 6] & MASK16_3) << 5; + l1 |= (tmp[tmpIdx + 7] & MASK16_3) << 2; + l1 |= (tmp[tmpIdx + 8] >>> 1) & MASK16_2; + longs[longsIdx + 1] = l1; + long l2 = (tmp[tmpIdx + 8] & MASK16_1) << 12; + l2 |= (tmp[tmpIdx + 9] & MASK16_3) << 9; + l2 |= (tmp[tmpIdx + 10] & MASK16_3) << 6; + l2 |= (tmp[tmpIdx + 11] & MASK16_3) << 3; + l2 |= (tmp[tmpIdx + 12] & MASK16_3) << 0; + longs[longsIdx + 2] = l2; + } + } + + private static void decode14(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 28); + shiftLongs(tmp, 28, longs, 0, 2, MASK16_14); + shiftLongs(tmp, 28, tmp, 0, 0, MASK16_2); + for (int iter = 0, tmpIdx = 0, longsIdx = 28; iter < 4; ++iter, tmpIdx += 7, longsIdx += 1) { + long l0 = tmp[tmpIdx + 0] << 12; + l0 |= tmp[tmpIdx + 1] << 10; + l0 |= tmp[tmpIdx + 2] << 8; + l0 |= tmp[tmpIdx + 3] << 6; + l0 |= tmp[tmpIdx + 4] << 4; + l0 |= tmp[tmpIdx + 5] << 2; + l0 |= tmp[tmpIdx + 6] << 0; + longs[longsIdx + 0] = l0; + } + } + + private static void decode15(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 30); + shiftLongs(tmp, 30, longs, 0, 1, MASK16_15); + shiftLongs(tmp, 30, tmp, 0, 0, MASK16_1); + for (int iter = 0, tmpIdx = 0, longsIdx = 30; iter < 2; ++iter, tmpIdx += 15, longsIdx += 1) { + long l0 = tmp[tmpIdx + 0] << 14; + l0 |= tmp[tmpIdx + 1] << 13; + l0 |= tmp[tmpIdx + 2] << 12; + l0 |= tmp[tmpIdx + 3] << 11; + l0 |= tmp[tmpIdx + 4] << 10; + l0 |= tmp[tmpIdx + 5] << 9; + l0 |= tmp[tmpIdx + 6] << 8; + l0 |= tmp[tmpIdx + 7] << 7; + l0 |= tmp[tmpIdx + 8] << 6; + l0 |= tmp[tmpIdx + 9] << 5; + l0 |= tmp[tmpIdx + 10] << 4; + l0 |= tmp[tmpIdx + 11] << 3; + l0 |= tmp[tmpIdx + 12] << 2; + l0 |= tmp[tmpIdx + 13] << 1; + l0 |= tmp[tmpIdx + 14] << 0; + longs[longsIdx + 0] = l0; + } + } + + private static void decode16(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(longs, 0, 32); + } + + private static void decode17(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 34); + shiftLongs(tmp, 34, longs, 0, 15, MASK32_17); + for (int iter = 0, tmpIdx = 0, longsIdx = 34; iter < 2; ++iter, tmpIdx += 17, longsIdx += 15) { + long l0 = (tmp[tmpIdx + 0] & MASK32_15) << 2; + l0 |= (tmp[tmpIdx + 1] >>> 13) & MASK32_2; + longs[longsIdx + 0] = l0; + long l1 = (tmp[tmpIdx + 1] & MASK32_13) << 4; + l1 |= (tmp[tmpIdx + 2] >>> 11) & MASK32_4; + longs[longsIdx + 1] = l1; + long l2 = (tmp[tmpIdx + 2] & MASK32_11) << 6; + l2 |= (tmp[tmpIdx + 3] >>> 9) & MASK32_6; + longs[longsIdx + 2] = l2; + long l3 = (tmp[tmpIdx + 3] & MASK32_9) << 8; + l3 |= (tmp[tmpIdx + 4] >>> 7) & MASK32_8; + longs[longsIdx + 3] = l3; + long l4 = (tmp[tmpIdx + 4] & MASK32_7) << 10; + l4 |= (tmp[tmpIdx + 5] >>> 5) & MASK32_10; + longs[longsIdx + 4] = l4; + long l5 = (tmp[tmpIdx + 5] & MASK32_5) << 12; + l5 |= (tmp[tmpIdx + 6] >>> 3) & MASK32_12; + longs[longsIdx + 5] = l5; + long l6 = (tmp[tmpIdx + 6] & MASK32_3) << 14; + l6 |= (tmp[tmpIdx + 7] >>> 1) & MASK32_14; + longs[longsIdx + 6] = l6; + long l7 = (tmp[tmpIdx + 7] & MASK32_1) << 16; + l7 |= (tmp[tmpIdx + 8] & MASK32_15) << 1; + l7 |= (tmp[tmpIdx + 9] >>> 14) & MASK32_1; + longs[longsIdx + 7] = l7; + long l8 = (tmp[tmpIdx + 9] & MASK32_14) << 3; + l8 |= (tmp[tmpIdx + 10] >>> 12) & MASK32_3; + longs[longsIdx + 8] = l8; + long l9 = (tmp[tmpIdx + 10] & MASK32_12) << 5; + l9 |= (tmp[tmpIdx + 11] >>> 10) & MASK32_5; + longs[longsIdx + 9] = l9; + long l10 = (tmp[tmpIdx + 11] & MASK32_10) << 7; + l10 |= (tmp[tmpIdx + 12] >>> 8) & MASK32_7; + longs[longsIdx + 10] = l10; + long l11 = (tmp[tmpIdx + 12] & MASK32_8) << 9; + l11 |= (tmp[tmpIdx + 13] >>> 6) & MASK32_9; + longs[longsIdx + 11] = l11; + long l12 = (tmp[tmpIdx + 13] & MASK32_6) << 11; + l12 |= (tmp[tmpIdx + 14] >>> 4) & MASK32_11; + longs[longsIdx + 12] = l12; + long l13 = (tmp[tmpIdx + 14] & MASK32_4) << 13; + l13 |= (tmp[tmpIdx + 15] >>> 2) & MASK32_13; + longs[longsIdx + 13] = l13; + long l14 = (tmp[tmpIdx + 15] & MASK32_2) << 15; + l14 |= (tmp[tmpIdx + 16] & MASK32_15) << 0; + longs[longsIdx + 14] = l14; + } + } + + private static void decode18(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 36); + shiftLongs(tmp, 36, longs, 0, 14, MASK32_18); + for (int iter = 0, tmpIdx = 0, longsIdx = 36; iter < 4; ++iter, tmpIdx += 9, longsIdx += 7) { + long l0 = (tmp[tmpIdx + 0] & MASK32_14) << 4; + l0 |= (tmp[tmpIdx + 1] >>> 10) & MASK32_4; + longs[longsIdx + 0] = l0; + long l1 = (tmp[tmpIdx + 1] & MASK32_10) << 8; + l1 |= (tmp[tmpIdx + 2] >>> 6) & MASK32_8; + longs[longsIdx + 1] = l1; + long l2 = (tmp[tmpIdx + 2] & MASK32_6) << 12; + l2 |= (tmp[tmpIdx + 3] >>> 2) & MASK32_12; + longs[longsIdx + 2] = l2; + long l3 = (tmp[tmpIdx + 3] & MASK32_2) << 16; + l3 |= (tmp[tmpIdx + 4] & MASK32_14) << 2; + l3 |= (tmp[tmpIdx + 5] >>> 12) & MASK32_2; + longs[longsIdx + 3] = l3; + long l4 = (tmp[tmpIdx + 5] & MASK32_12) << 6; + l4 |= (tmp[tmpIdx + 6] >>> 8) & MASK32_6; + longs[longsIdx + 4] = l4; + long l5 = (tmp[tmpIdx + 6] & MASK32_8) << 10; + l5 |= (tmp[tmpIdx + 7] >>> 4) & MASK32_10; + longs[longsIdx + 5] = l5; + long l6 = (tmp[tmpIdx + 7] & MASK32_4) << 14; + l6 |= (tmp[tmpIdx + 8] & MASK32_14) << 0; + longs[longsIdx + 6] = l6; + } + } + + private static void decode19(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 38); + shiftLongs(tmp, 38, longs, 0, 13, MASK32_19); + for (int iter = 0, tmpIdx = 0, longsIdx = 38; iter < 2; ++iter, tmpIdx += 19, longsIdx += 13) { + long l0 = (tmp[tmpIdx + 0] & MASK32_13) << 6; + l0 |= (tmp[tmpIdx + 1] >>> 7) & MASK32_6; + longs[longsIdx + 0] = l0; + long l1 = (tmp[tmpIdx + 1] & MASK32_7) << 12; + l1 |= (tmp[tmpIdx + 2] >>> 1) & MASK32_12; + longs[longsIdx + 1] = l1; + long l2 = (tmp[tmpIdx + 2] & MASK32_1) << 18; + l2 |= (tmp[tmpIdx + 3] & MASK32_13) << 5; + l2 |= (tmp[tmpIdx + 4] >>> 8) & MASK32_5; + longs[longsIdx + 2] = l2; + long l3 = (tmp[tmpIdx + 4] & MASK32_8) << 11; + l3 |= (tmp[tmpIdx + 5] >>> 2) & MASK32_11; + longs[longsIdx + 3] = l3; + long l4 = (tmp[tmpIdx + 5] & MASK32_2) << 17; + l4 |= (tmp[tmpIdx + 6] & MASK32_13) << 4; + l4 |= (tmp[tmpIdx + 7] >>> 9) & MASK32_4; + longs[longsIdx + 4] = l4; + long l5 = (tmp[tmpIdx + 7] & MASK32_9) << 10; + l5 |= (tmp[tmpIdx + 8] >>> 3) & MASK32_10; + longs[longsIdx + 5] = l5; + long l6 = (tmp[tmpIdx + 8] & MASK32_3) << 16; + l6 |= (tmp[tmpIdx + 9] & MASK32_13) << 3; + l6 |= (tmp[tmpIdx + 10] >>> 10) & MASK32_3; + longs[longsIdx + 6] = l6; + long l7 = (tmp[tmpIdx + 10] & MASK32_10) << 9; + l7 |= (tmp[tmpIdx + 11] >>> 4) & MASK32_9; + longs[longsIdx + 7] = l7; + long l8 = (tmp[tmpIdx + 11] & MASK32_4) << 15; + l8 |= (tmp[tmpIdx + 12] & MASK32_13) << 2; + l8 |= (tmp[tmpIdx + 13] >>> 11) & MASK32_2; + longs[longsIdx + 8] = l8; + long l9 = (tmp[tmpIdx + 13] & MASK32_11) << 8; + l9 |= (tmp[tmpIdx + 14] >>> 5) & MASK32_8; + longs[longsIdx + 9] = l9; + long l10 = (tmp[tmpIdx + 14] & MASK32_5) << 14; + l10 |= (tmp[tmpIdx + 15] & MASK32_13) << 1; + l10 |= (tmp[tmpIdx + 16] >>> 12) & MASK32_1; + longs[longsIdx + 10] = l10; + long l11 = (tmp[tmpIdx + 16] & MASK32_12) << 7; + l11 |= (tmp[tmpIdx + 17] >>> 6) & MASK32_7; + longs[longsIdx + 11] = l11; + long l12 = (tmp[tmpIdx + 17] & MASK32_6) << 13; + l12 |= (tmp[tmpIdx + 18] & MASK32_13) << 0; + longs[longsIdx + 12] = l12; + } + } + + private static void decode20(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 40); + shiftLongs(tmp, 40, longs, 0, 12, MASK32_20); + for (int iter = 0, tmpIdx = 0, longsIdx = 40; iter < 8; ++iter, tmpIdx += 5, longsIdx += 3) { + long l0 = (tmp[tmpIdx + 0] & MASK32_12) << 8; + l0 |= (tmp[tmpIdx + 1] >>> 4) & MASK32_8; + longs[longsIdx + 0] = l0; + long l1 = (tmp[tmpIdx + 1] & MASK32_4) << 16; + l1 |= (tmp[tmpIdx + 2] & MASK32_12) << 4; + l1 |= (tmp[tmpIdx + 3] >>> 8) & MASK32_4; + longs[longsIdx + 1] = l1; + long l2 = (tmp[tmpIdx + 3] & MASK32_8) << 12; + l2 |= (tmp[tmpIdx + 4] & MASK32_12) << 0; + longs[longsIdx + 2] = l2; + } + } + + private static void decode21(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 42); + shiftLongs(tmp, 42, longs, 0, 11, MASK32_21); + for (int iter = 0, tmpIdx = 0, longsIdx = 42; iter < 2; ++iter, tmpIdx += 21, longsIdx += 11) { + long l0 = (tmp[tmpIdx + 0] & MASK32_11) << 10; + l0 |= (tmp[tmpIdx + 1] >>> 1) & MASK32_10; + longs[longsIdx + 0] = l0; + long l1 = (tmp[tmpIdx + 1] & MASK32_1) << 20; + l1 |= (tmp[tmpIdx + 2] & MASK32_11) << 9; + l1 |= (tmp[tmpIdx + 3] >>> 2) & MASK32_9; + longs[longsIdx + 1] = l1; + long l2 = (tmp[tmpIdx + 3] & MASK32_2) << 19; + l2 |= (tmp[tmpIdx + 4] & MASK32_11) << 8; + l2 |= (tmp[tmpIdx + 5] >>> 3) & MASK32_8; + longs[longsIdx + 2] = l2; + long l3 = (tmp[tmpIdx + 5] & MASK32_3) << 18; + l3 |= (tmp[tmpIdx + 6] & MASK32_11) << 7; + l3 |= (tmp[tmpIdx + 7] >>> 4) & MASK32_7; + longs[longsIdx + 3] = l3; + long l4 = (tmp[tmpIdx + 7] & MASK32_4) << 17; + l4 |= (tmp[tmpIdx + 8] & MASK32_11) << 6; + l4 |= (tmp[tmpIdx + 9] >>> 5) & MASK32_6; + longs[longsIdx + 4] = l4; + long l5 = (tmp[tmpIdx + 9] & MASK32_5) << 16; + l5 |= (tmp[tmpIdx + 10] & MASK32_11) << 5; + l5 |= (tmp[tmpIdx + 11] >>> 6) & MASK32_5; + longs[longsIdx + 5] = l5; + long l6 = (tmp[tmpIdx + 11] & MASK32_6) << 15; + l6 |= (tmp[tmpIdx + 12] & MASK32_11) << 4; + l6 |= (tmp[tmpIdx + 13] >>> 7) & MASK32_4; + longs[longsIdx + 6] = l6; + long l7 = (tmp[tmpIdx + 13] & MASK32_7) << 14; + l7 |= (tmp[tmpIdx + 14] & MASK32_11) << 3; + l7 |= (tmp[tmpIdx + 15] >>> 8) & MASK32_3; + longs[longsIdx + 7] = l7; + long l8 = (tmp[tmpIdx + 15] & MASK32_8) << 13; + l8 |= (tmp[tmpIdx + 16] & MASK32_11) << 2; + l8 |= (tmp[tmpIdx + 17] >>> 9) & MASK32_2; + longs[longsIdx + 8] = l8; + long l9 = (tmp[tmpIdx + 17] & MASK32_9) << 12; + l9 |= (tmp[tmpIdx + 18] & MASK32_11) << 1; + l9 |= (tmp[tmpIdx + 19] >>> 10) & MASK32_1; + longs[longsIdx + 9] = l9; + long l10 = (tmp[tmpIdx + 19] & MASK32_10) << 11; + l10 |= (tmp[tmpIdx + 20] & MASK32_11) << 0; + longs[longsIdx + 10] = l10; + } + } + + private static void decode22(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 44); + shiftLongs(tmp, 44, longs, 0, 10, MASK32_22); + for (int iter = 0, tmpIdx = 0, longsIdx = 44; iter < 4; ++iter, tmpIdx += 11, longsIdx += 5) { + long l0 = (tmp[tmpIdx + 0] & MASK32_10) << 12; + l0 |= (tmp[tmpIdx + 1] & MASK32_10) << 2; + l0 |= (tmp[tmpIdx + 2] >>> 8) & MASK32_2; + longs[longsIdx + 0] = l0; + long l1 = (tmp[tmpIdx + 2] & MASK32_8) << 14; + l1 |= (tmp[tmpIdx + 3] & MASK32_10) << 4; + l1 |= (tmp[tmpIdx + 4] >>> 6) & MASK32_4; + longs[longsIdx + 1] = l1; + long l2 = (tmp[tmpIdx + 4] & MASK32_6) << 16; + l2 |= (tmp[tmpIdx + 5] & MASK32_10) << 6; + l2 |= (tmp[tmpIdx + 6] >>> 4) & MASK32_6; + longs[longsIdx + 2] = l2; + long l3 = (tmp[tmpIdx + 6] & MASK32_4) << 18; + l3 |= (tmp[tmpIdx + 7] & MASK32_10) << 8; + l3 |= (tmp[tmpIdx + 8] >>> 2) & MASK32_8; + longs[longsIdx + 3] = l3; + long l4 = (tmp[tmpIdx + 8] & MASK32_2) << 20; + l4 |= (tmp[tmpIdx + 9] & MASK32_10) << 10; + l4 |= (tmp[tmpIdx + 10] & MASK32_10) << 0; + longs[longsIdx + 4] = l4; + } + } + + private static void decode23(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 46); + shiftLongs(tmp, 46, longs, 0, 9, MASK32_23); + for (int iter = 0, tmpIdx = 0, longsIdx = 46; iter < 2; ++iter, tmpIdx += 23, longsIdx += 9) { + long l0 = (tmp[tmpIdx + 0] & MASK32_9) << 14; + l0 |= (tmp[tmpIdx + 1] & MASK32_9) << 5; + l0 |= (tmp[tmpIdx + 2] >>> 4) & MASK32_5; + longs[longsIdx + 0] = l0; + long l1 = (tmp[tmpIdx + 2] & MASK32_4) << 19; + l1 |= (tmp[tmpIdx + 3] & MASK32_9) << 10; + l1 |= (tmp[tmpIdx + 4] & MASK32_9) << 1; + l1 |= (tmp[tmpIdx + 5] >>> 8) & MASK32_1; + longs[longsIdx + 1] = l1; + long l2 = (tmp[tmpIdx + 5] & MASK32_8) << 15; + l2 |= (tmp[tmpIdx + 6] & MASK32_9) << 6; + l2 |= (tmp[tmpIdx + 7] >>> 3) & MASK32_6; + longs[longsIdx + 2] = l2; + long l3 = (tmp[tmpIdx + 7] & MASK32_3) << 20; + l3 |= (tmp[tmpIdx + 8] & MASK32_9) << 11; + l3 |= (tmp[tmpIdx + 9] & MASK32_9) << 2; + l3 |= (tmp[tmpIdx + 10] >>> 7) & MASK32_2; + longs[longsIdx + 3] = l3; + long l4 = (tmp[tmpIdx + 10] & MASK32_7) << 16; + l4 |= (tmp[tmpIdx + 11] & MASK32_9) << 7; + l4 |= (tmp[tmpIdx + 12] >>> 2) & MASK32_7; + longs[longsIdx + 4] = l4; + long l5 = (tmp[tmpIdx + 12] & MASK32_2) << 21; + l5 |= (tmp[tmpIdx + 13] & MASK32_9) << 12; + l5 |= (tmp[tmpIdx + 14] & MASK32_9) << 3; + l5 |= (tmp[tmpIdx + 15] >>> 6) & MASK32_3; + longs[longsIdx + 5] = l5; + long l6 = (tmp[tmpIdx + 15] & MASK32_6) << 17; + l6 |= (tmp[tmpIdx + 16] & MASK32_9) << 8; + l6 |= (tmp[tmpIdx + 17] >>> 1) & MASK32_8; + longs[longsIdx + 6] = l6; + long l7 = (tmp[tmpIdx + 17] & MASK32_1) << 22; + l7 |= (tmp[tmpIdx + 18] & MASK32_9) << 13; + l7 |= (tmp[tmpIdx + 19] & MASK32_9) << 4; + l7 |= (tmp[tmpIdx + 20] >>> 5) & MASK32_4; + longs[longsIdx + 7] = l7; + long l8 = (tmp[tmpIdx + 20] & MASK32_5) << 18; + l8 |= (tmp[tmpIdx + 21] & MASK32_9) << 9; + l8 |= (tmp[tmpIdx + 22] & MASK32_9) << 0; + longs[longsIdx + 8] = l8; + } + } + + private static void decode24(DataInput in, long[] tmp, long[] longs) throws IOException { + in.readLongs(tmp, 0, 48); + shiftLongs(tmp, 48, longs, 0, 8, MASK32_24); + shiftLongs(tmp, 48, tmp, 0, 0, MASK32_8); + for (int iter = 0, tmpIdx = 0, longsIdx = 48; iter < 16; ++iter, tmpIdx += 3, longsIdx += 1) { + long l0 = tmp[tmpIdx + 0] << 16; + l0 |= tmp[tmpIdx + 1] << 8; + l0 |= tmp[tmpIdx + 2] << 0; + longs[longsIdx + 0] = l0; + } + } +} diff --git a/server/src/main/java/org/elasticsearch/index/codec/postings/PForUtil.java b/server/src/main/java/org/elasticsearch/index/codec/postings/PForUtil.java new file mode 100644 index 0000000000000..26a600c73eeb5 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/codec/postings/PForUtil.java @@ -0,0 +1,323 @@ +/* + * @notice + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Modifications copyright (C) 2022 Elasticsearch B.V. + */ +package org.elasticsearch.index.codec.postings; + +import org.apache.lucene.store.DataInput; +import org.apache.lucene.store.DataOutput; +import org.apache.lucene.util.LongHeap; +import org.apache.lucene.util.packed.PackedInts; + +import java.io.IOException; +import java.util.Arrays; + +/** Utility class to encode sequences of 128 small positive integers. */ +final class PForUtil { + + private static final int MAX_EXCEPTIONS = 7; + private static final int HALF_BLOCK_SIZE = ForUtil.BLOCK_SIZE / 2; + + // IDENTITY_PLUS_ONE[i] == i + 1 + private static final long[] IDENTITY_PLUS_ONE = new long[ForUtil.BLOCK_SIZE]; + + static { + for (int i = 0; i < ForUtil.BLOCK_SIZE; ++i) { + IDENTITY_PLUS_ONE[i] = i + 1; + } + } + + static boolean allEqual(long[] l) { + for (int i = 1; i < ForUtil.BLOCK_SIZE; ++i) { + if (l[i] != l[0]) { + return false; + } + } + return true; + } + + private final ForUtil forUtil; + // buffer for reading exception data; each exception uses two bytes (pos + high-order bits of the + // exception) + private final byte[] exceptionBuff = new byte[MAX_EXCEPTIONS * 2]; + + PForUtil(ForUtil forUtil) { + assert ForUtil.BLOCK_SIZE <= 256 : "blocksize must fit in one byte. got " + ForUtil.BLOCK_SIZE; + this.forUtil = forUtil; + } + + /** Encode 128 integers from {@code longs} into {@code out}. */ + void encode(long[] longs, DataOutput out) throws IOException { + // Determine the top MAX_EXCEPTIONS + 1 values + final LongHeap top = new LongHeap(MAX_EXCEPTIONS + 1); + for (int i = 0; i <= MAX_EXCEPTIONS; ++i) { + top.push(longs[i]); + } + long topValue = top.top(); + for (int i = MAX_EXCEPTIONS + 1; i < ForUtil.BLOCK_SIZE; ++i) { + if (longs[i] > topValue) { + topValue = top.updateTop(longs[i]); + } + } + + long max = 0L; + for (int i = 1; i <= top.size(); ++i) { + max = Math.max(max, top.get(i)); + } + + final int maxBitsRequired = PackedInts.bitsRequired(max); + // We store the patch on a byte, so we can't decrease the number of bits required by more than 8 + final int patchedBitsRequired = Math.max(PackedInts.bitsRequired(topValue), maxBitsRequired - 8); + int numExceptions = 0; + final long maxUnpatchedValue = (1L << patchedBitsRequired) - 1; + for (int i = 2; i <= top.size(); ++i) { + if (top.get(i) > maxUnpatchedValue) { + numExceptions++; + } + } + final byte[] exceptions = new byte[numExceptions * 2]; + if (numExceptions > 0) { + int exceptionCount = 0; + for (int i = 0; i < ForUtil.BLOCK_SIZE; ++i) { + if (longs[i] > maxUnpatchedValue) { + exceptions[exceptionCount * 2] = (byte) i; + exceptions[exceptionCount * 2 + 1] = (byte) (longs[i] >>> patchedBitsRequired); + longs[i] &= maxUnpatchedValue; + exceptionCount++; + } + } + assert exceptionCount == numExceptions : exceptionCount + " " + numExceptions; + } + + if (allEqual(longs) && maxBitsRequired <= 8) { + for (int i = 0; i < numExceptions; ++i) { + exceptions[2 * i + 1] = (byte) (Byte.toUnsignedLong(exceptions[2 * i + 1]) << patchedBitsRequired); + } + out.writeByte((byte) (numExceptions << 5)); + out.writeVLong(longs[0]); + } else { + final int token = (numExceptions << 5) | patchedBitsRequired; + out.writeByte((byte) token); + forUtil.encode(longs, patchedBitsRequired, out); + } + out.writeBytes(exceptions, exceptions.length); + } + + /** Decode 128 integers into {@code longs}. */ + void decode(DataInput in, long[] longs) throws IOException { + final int token = Byte.toUnsignedInt(in.readByte()); + final int bitsPerValue = token & 0x1f; + final int numExceptions = token >>> 5; + if (bitsPerValue == 0) { + Arrays.fill(longs, 0, ForUtil.BLOCK_SIZE, in.readVLong()); + } else { + forUtil.decode(bitsPerValue, in, longs); + } + for (int i = 0; i < numExceptions; ++i) { + longs[Byte.toUnsignedInt(in.readByte())] |= Byte.toUnsignedLong(in.readByte()) << bitsPerValue; + } + } + + /** Decode deltas, compute the prefix sum and add {@code base} to all decoded longs. */ + void decodeAndPrefixSum(DataInput in, long base, long[] longs) throws IOException { + final int token = Byte.toUnsignedInt(in.readByte()); + final int bitsPerValue = token & 0x1f; + final int numExceptions = token >>> 5; + if (numExceptions == 0) { + // when there are no exceptions to apply, we can be a bit more efficient with our decoding + if (bitsPerValue == 0) { + // a bpv of zero indicates all delta values are the same + long val = in.readVLong(); + if (val == 1) { + // this will often be the common case when working with doc IDs, so we special-case it to + // be slightly more efficient + prefixSumOfOnes(longs, base); + } else { + prefixSumOf(longs, base, val); + } + } else { + // decode the deltas then apply the prefix sum logic + forUtil.decodeTo32(bitsPerValue, in, longs); + prefixSum32(longs, base); + } + } else { + // pack two values per long so we can apply prefixes two-at-a-time + if (bitsPerValue == 0) { + fillSameValue32(longs, in.readVLong()); + } else { + forUtil.decodeTo32(bitsPerValue, in, longs); + } + applyExceptions32(bitsPerValue, numExceptions, in, longs); + prefixSum32(longs, base); + } + } + + /** Skip 128 integers. */ + void skip(DataInput in) throws IOException { + final int token = Byte.toUnsignedInt(in.readByte()); + final int bitsPerValue = token & 0x1f; + final int numExceptions = token >>> 5; + if (bitsPerValue == 0) { + in.readVLong(); + in.skipBytes((numExceptions << 1)); + } else { + in.skipBytes(forUtil.numBytes(bitsPerValue) + (numExceptions << 1)); + } + } + + /** + * Fill {@code longs} with the final values for the case of all deltas being 1. Note this assumes + * there are no exceptions to apply. + */ + private static void prefixSumOfOnes(long[] longs, long base) { + System.arraycopy(IDENTITY_PLUS_ONE, 0, longs, 0, ForUtil.BLOCK_SIZE); + // This loop gets auto-vectorized + for (int i = 0; i < ForUtil.BLOCK_SIZE; ++i) { + longs[i] += base; + } + } + + /** + * Fill {@code longs} with the final values for the case of all deltas being {@code val}. Note + * this assumes there are no exceptions to apply. + */ + private static void prefixSumOf(long[] longs, long base, long val) { + for (int i = 0; i < ForUtil.BLOCK_SIZE; i++) { + longs[i] = (i + 1) * val + base; + } + } + + /** + * Fills the {@code longs} with the provided {@code val}, packed two values per long (using 32 + * bits per value). + */ + private static void fillSameValue32(long[] longs, long val) { + final long token = val << 32 | val; + Arrays.fill(longs, 0, HALF_BLOCK_SIZE, token); + } + + /** Apply the exceptions where the values are packed two-per-long in {@code longs}. */ + private void applyExceptions32(int bitsPerValue, int numExceptions, DataInput in, long[] longs) throws IOException { + in.readBytes(exceptionBuff, 0, numExceptions * 2); + for (int i = 0; i < numExceptions; ++i) { + final int exceptionPos = Byte.toUnsignedInt(exceptionBuff[i * 2]); + final long exception = Byte.toUnsignedLong(exceptionBuff[i * 2 + 1]); + // note that we pack two values per long, so the index is [0..63] for 128 values + final int idx = exceptionPos & 0x3f; // mod 64 + // we need to shift by 1) the bpv, and 2) 32 for positions [0..63] (and no 32 shift for + // [64..127]) + final int shift = bitsPerValue + ((1 ^ (exceptionPos >>> 6)) << 5); + longs[idx] |= exception << shift; + } + } + + /** Apply prefix sum logic where the values are packed two-per-long in {@code longs}. */ + private static void prefixSum32(long[] longs, long base) { + longs[0] += base << 32; + innerPrefixSum32(longs); + expand32(longs); + final long l = longs[HALF_BLOCK_SIZE - 1]; + for (int i = HALF_BLOCK_SIZE; i < ForUtil.BLOCK_SIZE; ++i) { + longs[i] += l; + } + } + + /** + * Expand the values packed two-per-long in {@code longs} into 128 individual long values stored + * back into {@code longs}. + */ + private static void expand32(long[] longs) { + for (int i = 0; i < 64; ++i) { + final long l = longs[i]; + longs[i] = l >>> 32; + longs[64 + i] = l & 0xFFFFFFFFL; + } + } + + /** + * Unrolled "inner" prefix sum logic where the values are packed two-per-long in {@code longs}. + * After this method, the final values will be correct for all high-order bits (values [0..63]) + * but a final prefix loop will still need to run to "correct" the values of [64..127] in the + * low-order bits, which need the 64th value added to all of them. + */ + private static void innerPrefixSum32(long[] longs) { + longs[1] += longs[0]; + longs[2] += longs[1]; + longs[3] += longs[2]; + longs[4] += longs[3]; + longs[5] += longs[4]; + longs[6] += longs[5]; + longs[7] += longs[6]; + longs[8] += longs[7]; + longs[9] += longs[8]; + longs[10] += longs[9]; + longs[11] += longs[10]; + longs[12] += longs[11]; + longs[13] += longs[12]; + longs[14] += longs[13]; + longs[15] += longs[14]; + longs[16] += longs[15]; + longs[17] += longs[16]; + longs[18] += longs[17]; + longs[19] += longs[18]; + longs[20] += longs[19]; + longs[21] += longs[20]; + longs[22] += longs[21]; + longs[23] += longs[22]; + longs[24] += longs[23]; + longs[25] += longs[24]; + longs[26] += longs[25]; + longs[27] += longs[26]; + longs[28] += longs[27]; + longs[29] += longs[28]; + longs[30] += longs[29]; + longs[31] += longs[30]; + longs[32] += longs[31]; + longs[33] += longs[32]; + longs[34] += longs[33]; + longs[35] += longs[34]; + longs[36] += longs[35]; + longs[37] += longs[36]; + longs[38] += longs[37]; + longs[39] += longs[38]; + longs[40] += longs[39]; + longs[41] += longs[40]; + longs[42] += longs[41]; + longs[43] += longs[42]; + longs[44] += longs[43]; + longs[45] += longs[44]; + longs[46] += longs[45]; + longs[47] += longs[46]; + longs[48] += longs[47]; + longs[49] += longs[48]; + longs[50] += longs[49]; + longs[51] += longs[50]; + longs[52] += longs[51]; + longs[53] += longs[52]; + longs[54] += longs[53]; + longs[55] += longs[54]; + longs[56] += longs[55]; + longs[57] += longs[56]; + longs[58] += longs[57]; + longs[59] += longs[58]; + longs[60] += longs[59]; + longs[61] += longs[60]; + longs[62] += longs[61]; + longs[63] += longs[62]; + } +} diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index 43437529cd301..815ec11af923f 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -1160,7 +1160,19 @@ public void flush(boolean force, boolean waitIfOngoing) throws EngineException { * request is detected, no flush will have occurred and the listener will be completed with a marker * indicating no flush and unknown generation. */ - public abstract void flush(boolean force, boolean waitIfOngoing, ActionListener listener) throws EngineException; + public final void flush(boolean force, boolean waitIfOngoing, ActionListener listener) throws EngineException { + try (var ignored = readLock.acquire()) { + ensureOpen(); + flushHoldingLock(force, waitIfOngoing, listener); + } + } + + /** + * The actual implementation of {@link #flush(boolean, boolean, ActionListener)}, which should only be called when holding either {@link + * #readLock} (the normal case) or {@link #writeLock} (if this flush is happening because the shard is closing gracefully) + */ + protected abstract void flushHoldingLock(boolean force, boolean waitIfOngoing, ActionListener listener) + throws EngineException; /** * Flushes the state of the engine including the transaction log, clearing memory and persisting @@ -1871,10 +1883,8 @@ public void flushAndClose() throws IOException { try { logger.debug("flushing shard on close - this might take some time to sync files to disk"); try { - // TODO we might force a flush in the future since we have the write lock already even though recoveries - // are running. // TODO: We are not waiting for full durability here atm because we are on the cluster state update thread - flush(false, false, ActionListener.noop()); + flushHoldingLock(false, false, ActionListener.noop()); } catch (AlreadyClosedException ex) { logger.debug("engine already closed - skipping flushAndClose"); } diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 58a3c02316430..ca9ed06d4d266 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -2179,7 +2179,7 @@ private boolean shouldPeriodicallyFlush(long flushThresholdSizeInBytes, long flu } @Override - public void flush(boolean force, boolean waitIfOngoing, ActionListener listener) throws EngineException { + protected void flushHoldingLock(boolean force, boolean waitIfOngoing, ActionListener listener) throws EngineException { ensureOpen(); if (force && waitIfOngoing == false) { assert false : "wait_if_ongoing must be true for a force flush: force=" + force + " wait_if_ongoing=" + waitIfOngoing; @@ -2188,77 +2188,75 @@ public void flush(boolean force, boolean waitIfOngoing, ActionListener Long.parseLong( - lastCommittedSegmentInfos.userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY) - )) { - ensureCanFlush(); - Translog.Location commitLocation = getTranslogLastWriteLocation(); - try { - translog.rollGeneration(); - logger.trace("starting commit for flush; commitTranslog=true"); - long lastFlushTimestamp = relativeTimeInNanosSupplier.getAsLong(); - // Pre-emptively recording the upcoming segment generation so that the live version map archive records - // the correct segment generation for doc IDs that go to the archive while a flush is happening. Otherwise, - // if right after committing the IndexWriter new docs get indexed/updated and a refresh moves them to the archive, - // we clear them from the archive once we see that segment generation on the search shards, but those changes - // were not included in the commit since they happened right after it. - preCommitSegmentGeneration.set(lastCommittedSegmentInfos.getGeneration() + 1); - commitIndexWriter(indexWriter, translog); - logger.trace("finished commit for flush"); - // we need to refresh in order to clear older version values - refresh("version_table_flush", SearcherScope.INTERNAL, true); - translog.trimUnreferencedReaders(); - // Use the timestamp from when the flush started, but only update it in case of success, so that any exception in - // the above lines would not lead the engine to think that it recently flushed, when it did not. - this.lastFlushTimestamp = lastFlushTimestamp; - } catch (AlreadyClosedException e) { - failOnTragicEvent(e); - throw e; - } catch (Exception e) { - throw new FlushFailedEngineException(shardId, e); - } - refreshLastCommittedSegmentInfos(); - generation = lastCommittedSegmentInfos.getGeneration(); - flushListener.afterFlush(generation, commitLocation); - } else { - generation = lastCommittedSegmentInfos.getGeneration(); + try { + // Only flush if (1) Lucene has uncommitted docs, or (2) forced by caller, or (3) the + // newly created commit points to a different translog generation (can free translog), + // or (4) the local checkpoint information in the last commit is stale, which slows down future recoveries. + boolean hasUncommittedChanges = hasUncommittedChanges(); + if (hasUncommittedChanges + || force + || shouldPeriodicallyFlush() + || getProcessedLocalCheckpoint() > Long.parseLong( + lastCommittedSegmentInfos.userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY) + )) { + ensureCanFlush(); + Translog.Location commitLocation = getTranslogLastWriteLocation(); + try { + translog.rollGeneration(); + logger.trace("starting commit for flush; commitTranslog=true"); + long lastFlushTimestamp = relativeTimeInNanosSupplier.getAsLong(); + // Pre-emptively recording the upcoming segment generation so that the live version map archive records + // the correct segment generation for doc IDs that go to the archive while a flush is happening. Otherwise, + // if right after committing the IndexWriter new docs get indexed/updated and a refresh moves them to the archive, + // we clear them from the archive once we see that segment generation on the search shards, but those changes + // were not included in the commit since they happened right after it. + preCommitSegmentGeneration.set(lastCommittedSegmentInfos.getGeneration() + 1); + commitIndexWriter(indexWriter, translog); + logger.trace("finished commit for flush"); + // we need to refresh in order to clear older version values + refresh("version_table_flush", SearcherScope.INTERNAL, true); + translog.trimUnreferencedReaders(); + // Use the timestamp from when the flush started, but only update it in case of success, so that any exception in + // the above lines would not lead the engine to think that it recently flushed, when it did not. + this.lastFlushTimestamp = lastFlushTimestamp; + } catch (AlreadyClosedException e) { + failOnTragicEvent(e); + throw e; + } catch (Exception e) { + throw new FlushFailedEngineException(shardId, e); } - } catch (FlushFailedEngineException ex) { - maybeFailEngine("flush", ex); - listener.onFailure(ex); - return; - } catch (Exception e) { - listener.onFailure(e); - return; - } finally { - flushLock.unlock(); - logger.trace("released flush lock"); + refreshLastCommittedSegmentInfos(); + generation = lastCommittedSegmentInfos.getGeneration(); + flushListener.afterFlush(generation, commitLocation); + } else { + generation = lastCommittedSegmentInfos.getGeneration(); } + } catch (FlushFailedEngineException ex) { + maybeFailEngine("flush", ex); + listener.onFailure(ex); + return; + } catch (Exception e) { + listener.onFailure(e); + return; + } finally { + flushLock.unlock(); + logger.trace("released flush lock"); } + // We don't have to do this here; we do it defensively to make sure that even if wall clock time is misbehaving // (e.g., moves backwards) we will at least still sometimes prune deleted tombstones: if (engineConfig.isEnableGcDeletes()) { diff --git a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java index aa9bddf414296..2efba314540ee 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java @@ -450,7 +450,7 @@ public boolean shouldPeriodicallyFlush() { } @Override - public void flush(boolean force, boolean waitIfOngoing, ActionListener listener) throws EngineException { + protected void flushHoldingLock(boolean force, boolean waitIfOngoing, ActionListener listener) throws EngineException { listener.onResponse(new FlushResult(true, lastCommittedSegmentInfos.getGeneration())); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index cbf2dd872da2f..b714eabbd2636 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -130,6 +130,7 @@ public enum MergeReason { private final Supplier mappingParserContextSupplier; private volatile DocumentMapper mapper; + private volatile long mappingVersion; public MapperService( ClusterService clusterService, @@ -298,6 +299,7 @@ public void updateMapping(final IndexMetadata currentIndexMetadata, final IndexM previousMapper = this.mapper; assert assertRefreshIsNotNeeded(previousMapper, type, incomingMapping); this.mapper = newDocumentMapper(incomingMapping, MergeReason.MAPPING_RECOVERY, incomingMappingSource); + this.mappingVersion = newIndexMetadata.getMappingVersion(); } String op = previousMapper != null ? "updated" : "added"; if (logger.isDebugEnabled() && incomingMappingSource.compressed().length < 512) { @@ -590,6 +592,10 @@ public DocumentMapper documentMapper() { return mapper; } + public long mappingVersion() { + return mappingVersion; + } + /** * Returns {@code true} if the given {@code mappingSource} includes a type * as a top-level object. diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SourceLoader.java b/server/src/main/java/org/elasticsearch/index/mapper/SourceLoader.java index 607ba2b261f5d..c07821f3c9ae7 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SourceLoader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SourceLoader.java @@ -20,6 +20,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -79,12 +80,15 @@ public Set requiredStoredFields() { * Load {@code _source} from doc values. */ class Synthetic implements SourceLoader { - private final SyntheticFieldLoader loader; - private final Map storedFieldLoaders; + private final Supplier syntheticFieldLoaderLeafSupplier; + private final Set requiredStoredFields; public Synthetic(Mapping mapping) { - loader = mapping.syntheticFieldLoader(); - storedFieldLoaders = Map.copyOf(loader.storedFieldLoaders().collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))); + this.syntheticFieldLoaderLeafSupplier = mapping::syntheticFieldLoader; + this.requiredStoredFields = syntheticFieldLoaderLeafSupplier.get() + .storedFieldLoaders() + .map(Map.Entry::getKey) + .collect(Collectors.toSet()); } @Override @@ -94,19 +98,26 @@ public boolean reordersFieldValues() { @Override public Set requiredStoredFields() { - return storedFieldLoaders.keySet(); + return requiredStoredFields; } @Override public Leaf leaf(LeafReader reader, int[] docIdsInLeaf) throws IOException { - return new SyntheticLeaf(loader.docValuesLoader(reader, docIdsInLeaf)); + SyntheticFieldLoader loader = syntheticFieldLoaderLeafSupplier.get(); + return new SyntheticLeaf(loader, loader.docValuesLoader(reader, docIdsInLeaf)); } - private class SyntheticLeaf implements Leaf { + private static class SyntheticLeaf implements Leaf { + private final SyntheticFieldLoader loader; private final SyntheticFieldLoader.DocValuesLoader docValuesLoader; + private final Map storedFieldLoaders; - private SyntheticLeaf(SyntheticFieldLoader.DocValuesLoader docValuesLoader) { + private SyntheticLeaf(SyntheticFieldLoader loader, SyntheticFieldLoader.DocValuesLoader docValuesLoader) { + this.loader = loader; this.docValuesLoader = docValuesLoader; + this.storedFieldLoaders = Map.copyOf( + loader.storedFieldLoaders().collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)) + ); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/query/MatchNoneQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MatchNoneQueryBuilder.java index fae3dd4069076..04ae0bb498841 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MatchNoneQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MatchNoneQueryBuilder.java @@ -39,14 +39,14 @@ public MatchNoneQueryBuilder(String rewriteReason) { */ public MatchNoneQueryBuilder(StreamInput in) throws IOException { super(in); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_040)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { rewriteReason = in.readOptionalString(); } } @Override protected void doWriteTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_040)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { out.writeOptionalString(rewriteReason); } } diff --git a/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java b/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java index b2067549fab67..5a2b01838e27b 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java @@ -91,7 +91,7 @@ public final class SimpleQueryStringBuilder extends AbstractQueryBuilder throw new AbortedSnapshotException(); + case PAUSING -> throw new PausedSnapshotException(); default -> { final var message = Strings.format( "Unable to move the shard snapshot status to [FINALIZE]: expecting [STARTED] but got [%s]", @@ -176,8 +187,21 @@ public void addAbortListener(ActionListener listener) { abortListeners.addListener(listener); } - public synchronized void abortIfNotCompleted(final String failure, Consumer> notifyRunner) { - if (stage.compareAndSet(Stage.INIT, Stage.ABORTED) || stage.compareAndSet(Stage.STARTED, Stage.ABORTED)) { + public void abortIfNotCompleted(final String failure, Consumer> notifyRunner) { + abortAndMoveToStageIfNotCompleted(Stage.ABORTED, failure, notifyRunner); + } + + public void pauseIfNotCompleted(Consumer> notifyRunner) { + abortAndMoveToStageIfNotCompleted(Stage.PAUSING, "paused for removal of node holding primary", notifyRunner); + } + + private synchronized void abortAndMoveToStageIfNotCompleted( + final Stage newStage, + final String failure, + final Consumer> notifyRunner + ) { + assert newStage == Stage.ABORTED || newStage == Stage.PAUSING : newStage; + if (stage.compareAndSet(Stage.INIT, newStage) || stage.compareAndSet(Stage.STARTED, newStage)) { this.failure = failure; notifyRunner.accept(abortListeners.map(r -> { Releasables.closeExpectNoException(r); @@ -186,6 +210,18 @@ public synchronized void abortIfNotCompleted(final String failure, Consumer throw new AbortedSnapshotException(); + case PAUSING -> throw new PausedSnapshotException(); } } + public boolean isPaused() { + return stage.get() == Stage.PAUSED; + } + /** * Increments number of processed files */ diff --git a/server/src/main/java/org/elasticsearch/index/store/StoreStats.java b/server/src/main/java/org/elasticsearch/index/store/StoreStats.java index cda87a421bd32..f0df51d4cb78b 100644 --- a/server/src/main/java/org/elasticsearch/index/store/StoreStats.java +++ b/server/src/main/java/org/elasticsearch/index/store/StoreStats.java @@ -33,7 +33,7 @@ public class StoreStats implements Writeable, ToXContentFragment { private long sizeInBytes; private long totalDataSetSizeInBytes; - private long reservedSize; + private long reservedSizeInBytes; public StoreStats() { @@ -47,9 +47,9 @@ public StoreStats(StreamInput in) throws IOException { totalDataSetSizeInBytes = sizeInBytes; } if (in.getTransportVersion().onOrAfter(RESERVED_BYTES_VERSION)) { - reservedSize = in.readZLong(); + reservedSizeInBytes = in.readZLong(); } else { - reservedSize = UNKNOWN_RESERVED_BYTES; + reservedSizeInBytes = UNKNOWN_RESERVED_BYTES; } } @@ -63,7 +63,7 @@ public StoreStats(long sizeInBytes, long totalDataSetSizeInBytes, long reservedS assert reservedSize == UNKNOWN_RESERVED_BYTES || reservedSize >= 0 : reservedSize; this.sizeInBytes = sizeInBytes; this.totalDataSetSizeInBytes = totalDataSetSizeInBytes; - this.reservedSize = reservedSize; + this.reservedSizeInBytes = reservedSize; } public void add(StoreStats stats) { @@ -72,7 +72,7 @@ public void add(StoreStats stats) { } sizeInBytes += stats.sizeInBytes; totalDataSetSizeInBytes += stats.totalDataSetSizeInBytes; - reservedSize = ignoreIfUnknown(reservedSize) + ignoreIfUnknown(stats.reservedSize); + reservedSizeInBytes = ignoreIfUnknown(reservedSizeInBytes) + ignoreIfUnknown(stats.reservedSizeInBytes); } private static long ignoreIfUnknown(long reservedSize) { @@ -83,28 +83,20 @@ public long sizeInBytes() { return sizeInBytes; } - public long getSizeInBytes() { - return sizeInBytes; - } - public ByteSizeValue size() { return ByteSizeValue.ofBytes(sizeInBytes); } - public ByteSizeValue getSize() { - return size(); + public long totalDataSetSizeInBytes() { + return totalDataSetSizeInBytes; } public ByteSizeValue totalDataSetSize() { return ByteSizeValue.ofBytes(totalDataSetSizeInBytes); } - public ByteSizeValue getTotalDataSetSize() { - return totalDataSetSize(); - } - - public long totalDataSetSizeInBytes() { - return totalDataSetSizeInBytes; + public long reservedSizeInBytes() { + return reservedSizeInBytes; } /** @@ -113,7 +105,7 @@ public long totalDataSetSizeInBytes() { * the reserved size is unknown. */ public ByteSizeValue getReservedSize() { - return ByteSizeValue.ofBytes(reservedSize); + return ByteSizeValue.ofBytes(reservedSizeInBytes); } @Override @@ -123,7 +115,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(totalDataSetSizeInBytes); } if (out.getTransportVersion().onOrAfter(RESERVED_BYTES_VERSION)) { - out.writeZLong(reservedSize); + out.writeZLong(reservedSizeInBytes); } } @@ -144,12 +136,12 @@ public boolean equals(Object o) { StoreStats that = (StoreStats) o; return sizeInBytes == that.sizeInBytes && totalDataSetSizeInBytes == that.totalDataSetSizeInBytes - && reservedSize == that.reservedSize; + && reservedSizeInBytes == that.reservedSizeInBytes; } @Override public int hashCode() { - return Objects.hash(sizeInBytes, totalDataSetSizeInBytes, reservedSize); + return Objects.hash(sizeInBytes, totalDataSetSizeInBytes, reservedSizeInBytes); } static final class Fields { diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index dbbf2bb98212a..ebe4652230327 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -64,6 +64,7 @@ import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor; import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.CheckedFunction; @@ -138,14 +139,12 @@ import org.elasticsearch.search.query.QueryPhase; import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import java.io.Closeable; import java.io.IOException; -import java.io.InputStream; import java.io.UncheckedIOException; import java.nio.file.Files; import java.util.ArrayList; @@ -1654,8 +1653,7 @@ public AliasFilter buildAliasFilter(ClusterState state, String index, Set filterParser = bytes -> { try ( - InputStream inputStream = bytes.streamInput(); - XContentParser parser = XContentFactory.xContentType(inputStream).xContent().createParser(parserConfig, inputStream) + XContentParser parser = XContentHelper.createParserNotCompressed(parserConfig, bytes, XContentHelper.xContentType(bytes)) ) { return parseTopLevelQuery(parser); } diff --git a/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java b/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java index 9e995c084a555..43e6c02ebe3c6 100644 --- a/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java +++ b/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.core.Booleans; +import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; import org.elasticsearch.monitor.jvm.GcNames; import org.elasticsearch.monitor.jvm.JvmInfo; @@ -509,9 +510,12 @@ static OverLimitStrategy createOverLimitStrategy(boolean trackRealMemoryUsage) { if (trackRealMemoryUsage && jvmInfo.useG1GC().equals("true") // messing with GC is "dangerous" so we apply an escape hatch. Not intended to be used. && Booleans.parseBoolean(System.getProperty("es.real_memory_circuit_breaker.g1_over_limit_strategy.enabled"), true)) { - TimeValue lockTimeout = TimeValue.timeValueMillis( - Integer.parseInt(System.getProperty("es.real_memory_circuit_breaker.g1_over_limit_strategy.lock_timeout_ms", "500")) + + long lockTimeoutInMillis = Integer.parseInt( + System.getProperty("es.real_memory_circuit_breaker.g1_over_limit_strategy.lock_timeout_ms", "500") ); + TimeValue lockTimeout = TimeValue.timeValueMillis(lockTimeoutInMillis); + TimeValue fullGCLockTimeout = TimeValue.timeValueMillis(lockTimeoutInMillis); // hardcode interval, do not want any tuning of it outside code changes. return new G1OverLimitStrategy( jvmInfo, @@ -519,7 +523,9 @@ static OverLimitStrategy createOverLimitStrategy(boolean trackRealMemoryUsage) { createYoungGcCountSupplier(), System::currentTimeMillis, 500, - lockTimeout + 5000, + lockTimeout, + fullGCLockTimeout ); } else { return memoryUsed -> memoryUsed; @@ -552,10 +558,18 @@ static class G1OverLimitStrategy implements OverLimitStrategy { private final LongSupplier gcCountSupplier; private final LongSupplier timeSupplier; private final TimeValue lockTimeout; + + // The lock acquisition timeout when we are running a full GC + private final TimeValue fullGCLockTimeout; private final long maxHeap; private long lastCheckTime = Long.MIN_VALUE; + private long lastFullGCTime = Long.MIN_VALUE; private final long minimumInterval; + private volatile boolean performingFullGC = false; + + // Minimum interval before triggering another full GC + private final long fullGCMinimumInterval; private long blackHole; private final ReleasableLock lock = new ReleasableLock(new ReentrantLock()); @@ -568,14 +582,18 @@ static class G1OverLimitStrategy implements OverLimitStrategy { LongSupplier gcCountSupplier, LongSupplier timeSupplier, long minimumInterval, - TimeValue lockTimeout + long fullGCMinimumInterval, + TimeValue lockTimeout, + TimeValue fullGCLockTimeout ) { this.lockTimeout = lockTimeout; + this.fullGCLockTimeout = fullGCLockTimeout; assert minimumInterval > 0; this.currentMemoryUsageSupplier = currentMemoryUsageSupplier; this.gcCountSupplier = gcCountSupplier; this.timeSupplier = timeSupplier; this.minimumInterval = minimumInterval; + this.fullGCMinimumInterval = fullGCMinimumInterval; this.maxHeap = jvmInfo.getMem().getHeapMax().getBytes(); long g1RegionSize = jvmInfo.getG1RegionSize(); if (g1RegionSize <= 0) { @@ -602,50 +620,23 @@ static long fallbackRegionSize(JvmInfo jvmInfo) { return regionSize; } + @SuppressForbidden(reason = "Prefer full GC to OOM or CBE") + private static void performFullGC() { + System.gc(); + } + @Override public MemoryUsage overLimit(MemoryUsage memoryUsed) { - boolean leader = false; - int allocationIndex = 0; - long allocationDuration = 0; - long begin = 0; + + TriggerGCResult result = TriggerGCResult.EMPTY; int attemptNoCopy = 0; + try (ReleasableLock locked = lock.tryAcquire(lockTimeout)) { if (locked != null) { attemptNoCopy = ++this.attemptNo; - begin = timeSupplier.getAsLong(); - leader = begin >= lastCheckTime + minimumInterval; - overLimitTriggered(leader); - if (leader) { - long initialCollectionCount = gcCountSupplier.getAsLong(); - logger.info("attempting to trigger G1GC due to high heap usage [{}]", memoryUsed.baseUsage); - long localBlackHole = 0; - // number of allocations, corresponding to (approximately) number of free regions + 1 - int allocationCount = Math.toIntExact((maxHeap - memoryUsed.baseUsage) / g1RegionSize + 1); - // allocations of half-region size becomes single humongous alloc, thus taking up a full region. - int allocationSize = (int) (g1RegionSize >> 1); - long maxUsageObserved = memoryUsed.baseUsage; - for (; allocationIndex < allocationCount; ++allocationIndex) { - long current = currentMemoryUsageSupplier.getAsLong(); - if (current >= maxUsageObserved) { - maxUsageObserved = current; - } else { - // we observed a memory drop, so some GC must have occurred - break; - } - if (initialCollectionCount != gcCountSupplier.getAsLong()) { - break; - } - localBlackHole += new byte[allocationSize].hashCode(); - } - - blackHole += localBlackHole; - logger.trace("black hole [{}]", blackHole); - - long now = timeSupplier.getAsLong(); - this.lastCheckTime = now; - allocationDuration = now - begin; - this.attemptNo = 0; - } + result = tryTriggerGC(memoryUsed); + } else { + logger.info("could not acquire lock within {} when attempting to trigger G1GC due to high heap usage", lockTimeout); } } catch (InterruptedException e) { logger.info("could not acquire lock when attempting to trigger G1GC due to high heap usage"); @@ -653,20 +644,45 @@ public MemoryUsage overLimit(MemoryUsage memoryUsed) { // fallthrough } + if (performingFullGC && attemptNoCopy == 0) { + // Another thread is currently performing a full GC, and we were not able to try (lock acquire timeout) + // Since the full GC thread may hold the lock for longer, try again for an additional timeout + logger.info( + "could not acquire lock within {} while another thread was performing a full GC, waiting again for {}", + lockTimeout, + fullGCLockTimeout + ); + try (ReleasableLock locked = lock.tryAcquire(fullGCLockTimeout)) { + if (locked != null) { + attemptNoCopy = ++this.attemptNo; + result = tryTriggerGC(memoryUsed); + } else { + logger.info( + "could not acquire lock within {} when attempting to trigger G1GC due to high heap usage", + fullGCLockTimeout + ); + } + } catch (InterruptedException e) { + logger.info("could not acquire lock when attempting to trigger G1GC due to high heap usage"); + Thread.currentThread().interrupt(); + // fallthrough + } + } + final long current = currentMemoryUsageSupplier.getAsLong(); if (current < memoryUsed.baseUsage) { - if (leader) { + if (result.gcAttempted()) { logger.info( "GC did bring memory usage down, before [{}], after [{}], allocations [{}], duration [{}]", memoryUsed.baseUsage, current, - allocationIndex, - allocationDuration + result.allocationIndex(), + result.allocationDuration() ); } else if (attemptNoCopy < 10 || Long.bitCount(attemptNoCopy) == 1) { logger.info( "memory usage down after [{}], before [{}], after [{}]", - begin - lastCheckTime, + result.timeSinceLastCheck(), memoryUsed.baseUsage, current ); @@ -678,18 +694,18 @@ public MemoryUsage overLimit(MemoryUsage memoryUsed) { memoryUsed.permanentChildUsage ); } else { - if (leader) { + if (result.gcAttempted()) { logger.info( "GC did not bring memory usage down, before [{}], after [{}], allocations [{}], duration [{}]", memoryUsed.baseUsage, current, - allocationIndex, - allocationDuration + result.allocationIndex(), + result.allocationDuration() ); } else if (attemptNoCopy < 10 || Long.bitCount(attemptNoCopy) == 1) { logger.info( "memory usage not down after [{}], before [{}], after [{}]", - begin - lastCheckTime, + result.timeSinceLastCheck(), memoryUsed.baseUsage, current ); @@ -699,6 +715,66 @@ public MemoryUsage overLimit(MemoryUsage memoryUsed) { } } + private TriggerGCResult tryTriggerGC(MemoryUsage memoryUsed) { + long begin = timeSupplier.getAsLong(); + boolean canPerformGC = begin >= lastCheckTime + minimumInterval; + int allocationIndex = 0; + + overLimitTriggered(canPerformGC); + + if (canPerformGC) { + long initialCollectionCount = gcCountSupplier.getAsLong(); + logger.info("attempting to trigger G1GC due to high heap usage [{}]", memoryUsed.baseUsage); + long localBlackHole = 0; + // number of allocations, corresponding to (approximately) number of free regions + 1 + int allocationCount = Math.toIntExact((maxHeap - memoryUsed.baseUsage) / g1RegionSize + 1); + // allocations of half-region size becomes single humongous alloc, thus taking up a full region. + int allocationSize = (int) (g1RegionSize >> 1); + long maxUsageObserved = memoryUsed.baseUsage; + for (; allocationIndex < allocationCount; ++allocationIndex) { + long current = currentMemoryUsageSupplier.getAsLong(); + if (current >= maxUsageObserved) { + maxUsageObserved = current; + } else { + // we observed a memory drop, so some GC must have occurred + break; + } + if (initialCollectionCount != gcCountSupplier.getAsLong()) { + break; + } + localBlackHole += new byte[allocationSize].hashCode(); + } + + blackHole += localBlackHole; + logger.trace("black hole [{}]", blackHole); + + this.lastCheckTime = timeSupplier.getAsLong(); + this.attemptNo = 0; + } + + long reclaimedMemory = memoryUsed.baseUsage - currentMemoryUsageSupplier.getAsLong(); + // TODO: use a threshold? Relative to % of memory? + if (reclaimedMemory <= 0) { + long now = timeSupplier.getAsLong(); + boolean canPerformFullGC = now >= lastFullGCTime + fullGCMinimumInterval; + if (canPerformFullGC) { + // Enough time passed between 2 full GC fallbacks + performingFullGC = true; + logger.info("attempt to trigger young GC failed to bring memory down, triggering full GC"); + performFullGC(); + performingFullGC = false; + this.lastFullGCTime = timeSupplier.getAsLong(); + } + } + + long allocationDuration = timeSupplier.getAsLong() - begin; + return new TriggerGCResult(canPerformGC, allocationIndex, allocationDuration, begin - lastCheckTime); + } + + private record TriggerGCResult(boolean gcAttempted, int allocationIndex, long allocationDuration, long timeSinceLastCheck) { + private static final TriggerGCResult EMPTY = new TriggerGCResult(false, 0, 0, 0); + } + void overLimitTriggered(boolean leader) { // for tests to override. } diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java b/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java index 1b2d8056ac437..31d947d548ccf 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java @@ -19,6 +19,7 @@ import org.elasticsearch.index.mapper.SourceFieldMapper; import org.elasticsearch.index.mapper.VersionFieldMapper; import org.elasticsearch.script.CtxMap; +import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.TemplateScript; import java.time.ZoneOffset; @@ -189,18 +190,6 @@ public T getFieldValue(String path, Class clazz, boolean ignoreMissing) { return cast(path, context, clazz); } - /** - * Returns the value contained in the document with the provided templated path - * @param pathTemplate The path within the document in dot-notation - * @param clazz The expected class of the field value - * @return the value for the provided path if existing, null otherwise - * @throws IllegalArgumentException if the pathTemplate is null, empty, invalid, if the field doesn't exist, - * or if the field that is found at the provided path is not of the expected type. - */ - public T getFieldValue(TemplateScript.Factory pathTemplate, Class clazz) { - return getFieldValue(renderTemplate(pathTemplate), clazz); - } - /** * Returns the value contained in the document for the provided path as a byte array. * If the path value is a string, a base64 decode operation will happen. @@ -239,16 +228,6 @@ public byte[] getFieldValueAsBytes(String path, boolean ignoreMissing) { } } - /** - * Checks whether the document contains a value for the provided templated path - * @param fieldPathTemplate the template for the path within the document in dot-notation - * @return true if the document contains a value for the field, false otherwise - * @throws IllegalArgumentException if the path is null, empty or invalid - */ - public boolean hasField(TemplateScript.Factory fieldPathTemplate) { - return hasField(renderTemplate(fieldPathTemplate)); - } - /** * Checks whether the document contains a value for the provided path * @param path The path within the document in dot-notation @@ -329,15 +308,6 @@ public boolean hasField(String path, boolean failOutOfRange) { return false; } - /** - * Removes the field identified by the provided path. - * @param fieldPathTemplate Resolves to the path with dot-notation within the document - * @throws IllegalArgumentException if the path is null, empty, invalid or if the field doesn't exist. - */ - public void removeField(TemplateScript.Factory fieldPathTemplate) { - removeField(renderTemplate(fieldPathTemplate)); - } - /** * Removes the field identified by the provided path. * @param path the path of the field to be removed @@ -468,17 +438,13 @@ public void appendFieldValue(String path, Object value, boolean allowDuplicates) * the provided value will be added to the newly created list. * Supports multiple values too provided in forms of list, in that case all the values will be appended to the * existing (or newly created) list. - * @param fieldPathTemplate Resolves to the path with dot-notation within the document + * @param path The path within the document in dot-notation * @param valueSource The value source that will produce the value or values to append to the existing ones * @param allowDuplicates When false, any values that already exist in the field will not be added * @throws IllegalArgumentException if the path is null, empty or invalid. */ - public void appendFieldValue(TemplateScript.Factory fieldPathTemplate, ValueSource valueSource, boolean allowDuplicates) { - appendFieldValue( - fieldPathTemplate.newInstance(templateModel).execute(), - valueSource.copyAndResolve(templateModel), - allowDuplicates - ); + public void appendFieldValue(String path, ValueSource valueSource, boolean allowDuplicates) { + appendFieldValue(path, valueSource.copyAndResolve(templateModel), allowDuplicates); } /** @@ -499,26 +465,26 @@ public void setFieldValue(String path, Object value) { * Sets the provided value to the provided path in the document. * Any non existing path element will be created. If the last element is a list, * the value will replace the existing list. - * @param fieldPathTemplate Resolves to the path with dot-notation within the document + * @param path The path within the document in dot-notation * @param valueSource The value source that will produce the value to put in for the path key * @throws IllegalArgumentException if the path is null, empty, invalid or if the value cannot be set to the * item identified by the provided path. */ - public void setFieldValue(TemplateScript.Factory fieldPathTemplate, ValueSource valueSource) { - setFieldValue(fieldPathTemplate.newInstance(templateModel).execute(), valueSource.copyAndResolve(templateModel)); + public void setFieldValue(String path, ValueSource valueSource) { + setFieldValue(path, valueSource.copyAndResolve(templateModel)); } /** * Sets the provided value to the provided path in the document. * Any non existing path element will be created. If the last element is a list, * the value will replace the existing list. - * @param fieldPathTemplate Resolves to the path with dot-notation within the document + * @param path The path within the document in dot-notation * @param valueSource The value source that will produce the value to put in for the path key * @param ignoreEmptyValue The flag to determine whether to exit quietly when the value produced by TemplatedValue is null or empty * @throws IllegalArgumentException if the path is null, empty, invalid or if the value cannot be set to the * item identified by the provided path. */ - public void setFieldValue(TemplateScript.Factory fieldPathTemplate, ValueSource valueSource, boolean ignoreEmptyValue) { + public void setFieldValue(String path, ValueSource valueSource, boolean ignoreEmptyValue) { Object value = valueSource.copyAndResolve(templateModel); if (ignoreEmptyValue && valueSource instanceof ValueSource.TemplatedValue) { if (value == null) { @@ -530,20 +496,20 @@ public void setFieldValue(TemplateScript.Factory fieldPathTemplate, ValueSource } } - setFieldValue(fieldPathTemplate.newInstance(templateModel).execute(), value); + setFieldValue(path, value); } /** * Sets the provided value to the provided path in the document. * Any non existing path element will be created. If the last element is a list, * the value will replace the existing list. - * @param fieldPathTemplate Resolves to the path with dot-notation within the document + * @param path The path within the document in dot-notation * @param value The value to put in for the path key * @param ignoreEmptyValue The flag to determine whether to exit quietly when the value produced by TemplatedValue is null or empty * @throws IllegalArgumentException if the path is null, empty, invalid or if the value cannot be set to the * item identified by the provided path. */ - public void setFieldValue(TemplateScript.Factory fieldPathTemplate, Object value, boolean ignoreEmptyValue) { + public void setFieldValue(String path, Object value, boolean ignoreEmptyValue) { if (ignoreEmptyValue) { if (value == null) { return; @@ -555,7 +521,7 @@ public void setFieldValue(TemplateScript.Factory fieldPathTemplate, Object value } } - setFieldValue(fieldPathTemplate.newInstance(templateModel).execute(), value); + setFieldValue(path, value); } private void setFieldValue(String path, Object value, boolean append, boolean allowDuplicates) { @@ -724,6 +690,21 @@ private static T cast(String path, Object object, Class clazz) { ); } + /** + * Renders a template into a string. This allows field access via both literal fields like {@code "foo.bar.baz"} and dynamic fields + * like {@code "{{other_field}}"} (that is, look up the value of the 'other_field' in the document and then use the resulting string as + * the field to operate on). + *

+ * See {@link ConfigurationUtils#compileTemplate(String, String, String, String, ScriptService)} and associated methods, which + * create these {@link TemplateScript.Factory} instances. + *

+ * Note: for clarity and efficiency reasons, it is advisable to invoke this method outside IngestDocument itself -- fields should be + * rendered by a caller (once), and then passed to an ingest document repeatedly. There are enough methods on IngestDocument that + * operate on String paths already, we don't want to mirror all of them with twin methods that accept a template. + * + * @param template the template or literal string to evaluate + * @return a literal string field path + */ public String renderTemplate(TemplateScript.Factory template) { return template.newInstance(templateModel).execute(); } diff --git a/server/src/main/java/org/elasticsearch/monitor/metrics/NodeMetrics.java b/server/src/main/java/org/elasticsearch/monitor/metrics/NodeMetrics.java index 16b0b9a10d914..a7d93ec7e7d80 100644 --- a/server/src/main/java/org/elasticsearch/monitor/metrics/NodeMetrics.java +++ b/server/src/main/java/org/elasticsearch/monitor/metrics/NodeMetrics.java @@ -62,7 +62,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { this.stats = new NodeStatsCache(TimeValue.timeValueMinutes(1)); metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.indices.get.total", + "es.indices.get.total", "Total number of get operations", "operation", () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getGet().getCount()) @@ -71,7 +71,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.indices.get.time", + "es.indices.get.time", "Time in milliseconds spent performing get operations.", "milliseconds", () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getGet().getTimeInMillis()) @@ -80,7 +80,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.indices.search.fetch.total", + "es.indices.search.fetch.total", "Total number of fetch operations.", "operation", () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getSearch().getTotal().getFetchCount()) @@ -89,7 +89,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.indices.search.fetch.time", + "es.indices.search.fetch.time", "Time in milliseconds spent performing fetch operations.", "milliseconds", () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getSearch().getTotal().getFetchTimeInMillis()) @@ -98,7 +98,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.indices.merge.total", + "es.indices.merge.total", "Total number of merge operations.", "operation", () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getMerge().getTotal()) @@ -107,7 +107,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.indices.merge.time", + "es.indices.merge.time", "Time in milliseconds spent performing merge operations.", "milliseconds", () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getMerge().getTotalTimeInMillis()) @@ -116,7 +116,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongGauge( - "es.node.stats.indices.translog.operations", + "es.translog.operations.count", "Number of transaction log operations.", "operation", () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getTranslog().estimatedNumberOfOperations()) @@ -125,7 +125,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongGauge( - "es.node.stats.indices.translog.size", + "es.translog.size", "Size, in bytes, of the transaction log.", "bytes", () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getTranslog().getTranslogSizeInBytes()) @@ -134,7 +134,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongGauge( - "es.node.stats.indices.translog.uncommitted_operations", + "es.translog.uncommitted_operations.count", "Number of uncommitted transaction log operations.", "operations", () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getTranslog().getUncommittedOperations()) @@ -143,7 +143,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongGauge( - "es.node.stats.indices.translog.uncommitted_size", + "es.translog.uncommitted_operations.size", "Size, in bytes, of uncommitted transaction log operations.", "bytes", () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getTranslog().getUncommittedSizeInBytes()) @@ -152,7 +152,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.indices.translog.earliest_last_modified_age", + "es.translog.earliest_last_modified.time", "Earliest last modified age for the transaction log.", "time", () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getTranslog().getEarliestLastModifiedAge()) @@ -161,7 +161,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.transport.rx_size", + "es.transport.rx.size", "Size, in bytes, of RX packets received by the node during internal cluster communication.", "bytes", () -> new LongWithAttributes(stats.getOrRefresh().getTransport().getRxSize().getBytes()) @@ -170,7 +170,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.transport.tx_size", + "es.transport.tx.size", "Size, in bytes, of TX packets sent by the node during internal cluster communication.", "bytes", () -> new LongWithAttributes(stats.getOrRefresh().getTransport().getTxSize().getBytes()) @@ -179,7 +179,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongGauge( - "es.node.stats.jvm.mem.pools.young.used", + "es.jvm.mem.pools.young.size", "Memory, in bytes, used by the young generation heap.", "bytes", () -> new LongWithAttributes(bytesUsedByGCGen(stats.getOrRefresh().getJvm().getMem(), GcNames.YOUNG)) @@ -188,7 +188,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongGauge( - "es.node.stats.jvm.mem.pools.survivor.used", + "es.jvm.mem.pools.survivor.size", "Memory, in bytes, used by the survivor space.", "bytes", () -> new LongWithAttributes(bytesUsedByGCGen(stats.getOrRefresh().getJvm().getMem(), GcNames.SURVIVOR)) @@ -197,7 +197,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongGauge( - "es.node.stats.jvm.mem.pools.old.used", + "es.jvm.mem.pools.old.size", "Memory, in bytes, used by the old generation heap.", "bytes", () -> new LongWithAttributes(bytesUsedByGCGen(stats.getOrRefresh().getJvm().getMem(), GcNames.OLD)) @@ -206,7 +206,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.fs.io_stats.io_time.total", + "es.fs.io_stats.time.total", "The total time in millis spent performing I/O operations across all devices used by Elasticsearch.", "milliseconds", () -> new LongWithAttributes(stats.getOrRefresh().getFs().getIoStats().getTotalIOTimeMillis()) @@ -215,7 +215,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.indices.indexing.docs.total", + "es.indexing.docs.total", "Total number of indexed documents", "documents", () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getIndexing().getTotal().getIndexCount()) @@ -224,7 +224,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongGauge( - "es.node.stats.indices.indexing.docs.current", + "es.indexing.docs.count", "Current number of indexing documents", "documents", () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getIndexing().getTotal().getIndexCurrent()) @@ -233,7 +233,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.indices.indexing.failed.total", + "es.indices.indexing.failed.total", "Total number of failed indexing operations", "operations", () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getIndexing().getTotal().getIndexFailedCount()) @@ -242,7 +242,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.indices.deletion.docs.total", + "es.indices.deletion.docs.total", "Total number of deleted documents", "documents", () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getIndexing().getTotal().getDeleteCount()) @@ -251,7 +251,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongGauge( - "es.node.stats.indices.deletion.docs.current", + "es.indices.deletion.docs.count", "Current number of deleting documents", "documents", () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getIndexing().getTotal().getDeleteCurrent()) @@ -260,7 +260,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.indices.indexing.time", + "es.indices.indexing.time", "Total indices indexing time", "milliseconds", () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getIndexing().getTotal().getIndexTime().millis()) @@ -269,7 +269,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.indices.deletion.time", + "es.indices.deletion.time", "Total indices deletion time", "milliseconds", () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getIndexing().getTotal().getDeleteTime().millis()) @@ -278,7 +278,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.indices.throttle.time", + "es.indices.throttle.time", "Total indices throttle time", "milliseconds", () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getIndexing().getTotal().getThrottleTime().millis()) @@ -287,7 +287,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.indices.noop.total", + "es.indices.noop.total", "Total number of noop shard operations", "operations", () -> new LongWithAttributes(stats.getOrRefresh().getIndices().getIndexing().getTotal().getNoopUpdateCount()) @@ -296,7 +296,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.indices.indexing.coordinating_operations.memory.size.total", + "es.indexing.coordinating_operations.size", "Total number of memory bytes consumed by coordinating operations", "bytes", () -> new LongWithAttributes(stats.getOrRefresh().getIndexingPressureStats().getTotalCoordinatingBytes()) @@ -305,7 +305,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.indices.indexing.coordinating_operations.count.total", + "es.indexing.coordinating_operations.total", "Total number of coordinating operations", "operations", () -> new LongWithAttributes(stats.getOrRefresh().getIndexingPressureStats().getTotalCoordinatingOps()) @@ -314,7 +314,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongGauge( - "es.node.stats.indices.indexing.coordinating_operations.memory.size.current", + "es.indexing.coordinating_operations.size", "Current number of memory bytes consumed by coordinating operations", "bytes", () -> new LongWithAttributes(stats.getOrRefresh().getIndexingPressureStats().getCurrentCoordinatingBytes()) @@ -323,7 +323,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongGauge( - "es.node.stats.indices.indexing.coordinating_operations.count.current", + "es.indexing.coordinating_operations.count", "Current number of coordinating operations", "operations", () -> new LongWithAttributes(stats.getOrRefresh().getIndexingPressureStats().getCurrentCoordinatingOps()) @@ -332,7 +332,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.indices.indexing.coordinating_operations.rejections.total", + "es.indexing.coordinating_operations.rejections.total", "Total number of coordinating operations rejections", "operations", () -> new LongWithAttributes(stats.getOrRefresh().getIndexingPressureStats().getCoordinatingRejections()) @@ -341,7 +341,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.indices.indexing.primary_operations.memory.size.total", + "es.indexing.primary_operations.size", "Total number of memory bytes consumed by primary operations", "bytes", () -> new LongWithAttributes(stats.getOrRefresh().getIndexingPressureStats().getTotalPrimaryBytes()) @@ -350,7 +350,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.indices.indexing.primary_operations.count.total", + "es.indexing.primary_operations.total", "Total number of primary operations", "operations", () -> new LongWithAttributes(stats.getOrRefresh().getIndexingPressureStats().getTotalPrimaryOps()) @@ -359,7 +359,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongGauge( - "es.node.stats.indices.indexing.primary_operations.memory.size.current", + "es.indexing.primary_operations.size", "Current number of memory bytes consumed by primary operations", "bytes", () -> new LongWithAttributes(stats.getOrRefresh().getIndexingPressureStats().getCurrentPrimaryBytes()) @@ -368,7 +368,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongGauge( - "es.node.stats.indices.indexing.primary_operations.count.current", + "es.indexing.primary_operations.count", "Current number of primary operations", "operations", () -> new LongWithAttributes(stats.getOrRefresh().getIndexingPressureStats().getCurrentPrimaryOps()) @@ -377,7 +377,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongAsyncCounter( - "es.node.stats.indices.indexing.primary_operations.rejections.total", + "es.indexing.primary_operations.rejections.total", "Total number of primary operations rejections", "operations", () -> new LongWithAttributes(stats.getOrRefresh().getIndexingPressureStats().getPrimaryRejections()) @@ -386,7 +386,7 @@ private void registerAsyncMetrics(MeterRegistry registry) { metrics.add( registry.registerLongGauge( - "es.node.stats.indices.indexing.memory.limit.current", + "es.indexing.memory.limit.size", "Current memory limit for primary and coordinating operations", "bytes", () -> new LongWithAttributes(stats.getOrRefresh().getIndexingPressureStats().getMemoryLimit()) @@ -441,7 +441,7 @@ private NodeStats getNodeStats() { false, false, false, - false, + true, false ); } diff --git a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java index 10d3d89d617fa..0de5657c0cb1a 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeConstruction.java +++ b/server/src/main/java/org/elasticsearch/node/NodeConstruction.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.ingest.ReservedPipelineAction; import org.elasticsearch.action.search.SearchExecutionStatsCollector; import org.elasticsearch.action.search.SearchPhaseController; +import org.elasticsearch.action.search.SearchTransportAPMMetrics; import org.elasticsearch.action.search.SearchTransportService; import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.action.update.UpdateHelper; @@ -201,6 +202,7 @@ import java.util.Collection; import java.util.LinkedHashSet; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.Optional; @@ -383,6 +385,7 @@ private Settings createEnvironment(Environment initialEnvironment, NodeServicePr ); logger.info("JVM home [{}], using bundled JDK [{}]", System.getProperty("java.home"), jvmInfo.getUsingBundledJdk()); logger.info("JVM arguments {}", Arrays.toString(jvmInfo.getInputArguments())); + logger.info("Default Locale [{}]", Locale.getDefault()); if (Build.current().isProductionRelease() == false) { logger.warn( "version [{}] is a pre-release version of Elasticsearch and is not suitable for production", @@ -870,6 +873,7 @@ record PluginServiceInstances( telemetryProvider.getTracer() ); final ResponseCollectorService responseCollectorService = new ResponseCollectorService(clusterService); + final SearchTransportAPMMetrics searchTransportAPMMetrics = new SearchTransportAPMMetrics(telemetryProvider.getMeterRegistry()); final SearchTransportService searchTransportService = new SearchTransportService( transportService, client, @@ -1046,6 +1050,7 @@ record PluginServiceInstances( b.bind(MetadataCreateIndexService.class).toInstance(metadataCreateIndexService); b.bind(MetadataUpdateSettingsService.class).toInstance(metadataUpdateSettingsService); b.bind(SearchService.class).toInstance(searchService); + b.bind(SearchTransportAPMMetrics.class).toInstance(searchTransportAPMMetrics); b.bind(SearchTransportService.class).toInstance(searchTransportService); b.bind(SearchPhaseController.class).toInstance(new SearchPhaseController(searchService::aggReduceContextBuilder)); b.bind(Transport.class).toInstance(transport); diff --git a/server/src/main/java/org/elasticsearch/plugins/ExtensionLoader.java b/server/src/main/java/org/elasticsearch/plugins/ExtensionLoader.java index 7dfb64c989ea2..5cf5f1b92e472 100644 --- a/server/src/main/java/org/elasticsearch/plugins/ExtensionLoader.java +++ b/server/src/main/java/org/elasticsearch/plugins/ExtensionLoader.java @@ -8,9 +8,10 @@ package org.elasticsearch.plugins; -import java.util.Locale; +import org.elasticsearch.core.Strings; + +import java.util.Optional; import java.util.ServiceLoader; -import java.util.function.Supplier; /** * A utility for loading SPI extensions. @@ -20,8 +21,7 @@ public class ExtensionLoader { /** * Loads a single SPI extension. * - * There should be no more than one extension found. If no service providers - * are found, the supplied fallback is used. + * There should be no more than one extension found. * * Note: A ServiceLoader is needed rather than the service class because ServiceLoaders * must be loaded by a module with the {@code uses} declaration. Since this @@ -29,21 +29,22 @@ public class ExtensionLoader { * service classes it may load. Thus, the caller must load the ServiceLoader. * * @param loader a service loader instance to find the singleton extension in - * @param fallback a supplier for an instance if no extensions are found * @return an instance of the extension * @param the SPI extension type */ - public static T loadSingleton(ServiceLoader loader, Supplier fallback) { - var extensions = loader.stream().toList(); - if (extensions.size() > 1) { + public static Optional loadSingleton(ServiceLoader loader) { + var extensions = loader.iterator(); + if (extensions.hasNext() == false) { + return Optional.empty(); + } + var ext = extensions.next(); + if (extensions.hasNext()) { // It would be really nice to give the actual extension class here directly, but that would require passing it // in effectively twice in the call site, once to ServiceLoader, and then to this method directly as well. // It's annoying that ServiceLoader hangs onto the service class, but does not expose it. It does at least // print the service class from its toString, which is better tha nothing - throw new IllegalStateException(String.format(Locale.ROOT, "More than one extension found for %s", loader)); - } else if (extensions.isEmpty()) { - return fallback.get(); + throw new IllegalStateException(Strings.format("More than one extension found for %s", loader)); } - return extensions.get(0).get(); + return Optional.of(ext); } } diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index c45a048480383..48caafc6bfab8 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -114,6 +114,7 @@ import org.elasticsearch.repositories.ShardSnapshotResult; import org.elasticsearch.repositories.SnapshotShardContext; import org.elasticsearch.snapshots.AbortedSnapshotException; +import org.elasticsearch.snapshots.PausedSnapshotException; import org.elasticsearch.snapshots.SnapshotDeleteListener; import org.elasticsearch.snapshots.SnapshotException; import org.elasticsearch.snapshots.SnapshotId; @@ -177,9 +178,9 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp protected final ThreadPool threadPool; - public static final String STATELESS_SHARD_THREAD_NAME = "stateless_shard"; + public static final String STATELESS_SHARD_READ_THREAD_NAME = "stateless_shard_read"; public static final String STATELESS_TRANSLOG_THREAD_NAME = "stateless_translog"; - public static final String STATELESS_UPLOAD_THREAD_NAME = "stateless_upload"; + public static final String STATELESS_SHARD_WRITE_THREAD_NAME = "stateless_shard_write"; public static final String SNAPSHOT_PREFIX = "snap-"; @@ -1984,9 +1985,9 @@ protected void assertSnapshotOrGenericThread() { ThreadPool.Names.SNAPSHOT, ThreadPool.Names.SNAPSHOT_META, ThreadPool.Names.GENERIC, - STATELESS_SHARD_THREAD_NAME, + STATELESS_SHARD_READ_THREAD_NAME, STATELESS_TRANSLOG_THREAD_NAME, - STATELESS_UPLOAD_THREAD_NAME + STATELESS_SHARD_WRITE_THREAD_NAME ); } @@ -3254,7 +3255,7 @@ private static void ensureNotAborted(ShardId shardId, SnapshotId snapshotId, Ind snapshotStatus.ensureNotAborted(); } catch (Exception e) { logger.debug("[{}] [{}] {} on the file [{}], exiting", shardId, snapshotId, e.getMessage(), fileName); - assert e instanceof AbortedSnapshotException : e; + assert e instanceof AbortedSnapshotException || e instanceof PausedSnapshotException : e; throw e; } } diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java index ca3ff799436c2..e7ea234eae310 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.lucene.store.IndexOutputOutputStream; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParserUtils; import org.elasticsearch.core.Nullable; import org.elasticsearch.gateway.CorruptStateException; @@ -33,6 +34,7 @@ import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import java.io.FilterInputStream; @@ -144,15 +146,23 @@ public T deserialize(String repoName, NamedXContentRegistry namedXContentRegistr BytesReference bytesReference = Streams.readFully(wrappedStream); deserializeMetaBlobInputStream.verifyFooter(); try ( - XContentParser parser = XContentType.SMILE.xContent() - .createParser(namedXContentRegistry, LoggingDeprecationHandler.INSTANCE, bytesReference.streamInput()) + XContentParser parser = XContentHelper.createParserNotCompressed( + XContentParserConfiguration.EMPTY.withRegistry(namedXContentRegistry) + .withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), + bytesReference, + XContentType.SMILE + ) ) { result = reader.apply(repoName, parser); XContentParserUtils.ensureExpectedToken(null, parser.nextToken(), parser); } catch (Exception e) { try ( - XContentParser parser = XContentType.SMILE.xContent() - .createParser(namedXContentRegistry, LoggingDeprecationHandler.INSTANCE, bytesReference.streamInput()) + XContentParser parser = XContentHelper.createParserNotCompressed( + XContentParserConfiguration.EMPTY.withRegistry(namedXContentRegistry) + .withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), + bytesReference, + XContentType.SMILE + ) ) { result = fallbackReader.apply(repoName, parser); XContentParserUtils.ensureExpectedToken(null, parser.nextToken(), parser); diff --git a/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java b/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java index 77cb51d821843..5ea80ac608b8f 100644 --- a/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java +++ b/server/src/main/java/org/elasticsearch/rest/BaseRestHandler.java @@ -14,6 +14,8 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.core.RefCounted; +import org.elasticsearch.core.Releasable; import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.core.Tuple; import org.elasticsearch.plugins.ActionPlugin; @@ -77,30 +79,33 @@ public final long getUsageCount() { @Override public final void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws Exception { // prepare the request for execution; has the side effect of touching the request parameters - final RestChannelConsumer action = prepareRequest(request, client); - - // validate unconsumed params, but we must exclude params used to format the response - // use a sorted set so the unconsumed parameters appear in a reliable sorted order - final SortedSet unconsumedParams = request.unconsumedParams() - .stream() - .filter(p -> responseParams(request.getRestApiVersion()).contains(p) == false) - .collect(Collectors.toCollection(TreeSet::new)); - - // validate the non-response params - if (unconsumedParams.isEmpty() == false) { - final Set candidateParams = new HashSet<>(); - candidateParams.addAll(request.consumedParams()); - candidateParams.addAll(responseParams(request.getRestApiVersion())); - throw new IllegalArgumentException(unrecognized(request, unconsumedParams, candidateParams, "parameter")); - } + try (var action = prepareRequest(request, client)) { + + // validate unconsumed params, but we must exclude params used to format the response + // use a sorted set so the unconsumed parameters appear in a reliable sorted order + final SortedSet unconsumedParams = request.unconsumedParams() + .stream() + .filter(p -> responseParams(request.getRestApiVersion()).contains(p) == false) + .collect(Collectors.toCollection(TreeSet::new)); + + // validate the non-response params + if (unconsumedParams.isEmpty() == false) { + final Set candidateParams = new HashSet<>(); + candidateParams.addAll(request.consumedParams()); + candidateParams.addAll(responseParams(request.getRestApiVersion())); + throw new IllegalArgumentException(unrecognized(request, unconsumedParams, candidateParams, "parameter")); + } - if (request.hasContent() && request.isContentConsumed() == false) { - throw new IllegalArgumentException("request [" + request.method() + " " + request.path() + "] does not support having a body"); - } + if (request.hasContent() && request.isContentConsumed() == false) { + throw new IllegalArgumentException( + "request [" + request.method() + " " + request.path() + "] does not support having a body" + ); + } - usageCount.increment(); - // execute the action - action.accept(channel); + usageCount.increment(); + // execute the action + action.accept(channel); + } } protected static String unrecognized( @@ -149,11 +154,18 @@ protected static String unrecognized( } /** - * REST requests are handled by preparing a channel consumer that represents the execution of - * the request against a channel. + * REST requests are handled by preparing a channel consumer that represents the execution of the request against a channel. */ @FunctionalInterface - protected interface RestChannelConsumer extends CheckedConsumer {} + protected interface RestChannelConsumer extends CheckedConsumer, Releasable { + /** + * Called just after the execution has started (or failed, if the request was invalid), but typically well before the execution has + * completed. This callback should be used to release (refs to) resources that were acquired when constructing this consumer, for + * instance by calling {@link RefCounted#decRef()} on any newly-created transport requests with nontrivial lifecycles. + */ + @Override + default void close() {} + } /** * Prepare the request for execution. Implementations should consume all request params before diff --git a/server/src/main/java/org/elasticsearch/rest/RestRequest.java b/server/src/main/java/org/elasticsearch/rest/RestRequest.java index eac13e5ef87a6..09eb83d109e3e 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestRequest.java +++ b/server/src/main/java/org/elasticsearch/rest/RestRequest.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Booleans; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.Nullable; @@ -23,6 +24,7 @@ import org.elasticsearch.core.Tuple; import org.elasticsearch.http.HttpChannel; import org.elasticsearch.http.HttpRequest; +import org.elasticsearch.telemetry.tracing.Traceable; import org.elasticsearch.xcontent.ParsedMediaType; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContent; @@ -31,7 +33,6 @@ import org.elasticsearch.xcontent.XContentType; import java.io.IOException; -import java.io.InputStream; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -46,7 +47,7 @@ import static org.elasticsearch.common.unit.ByteSizeValue.parseBytesSizeValue; import static org.elasticsearch.core.TimeValue.parseTimeValue; -public class RestRequest implements ToXContent.Params { +public class RestRequest implements ToXContent.Params, Traceable { public static final String RESPONSE_RESTRICTED = "responseRestricted"; // tchar pattern as defined by RFC7230 section 3.2.6 @@ -541,12 +542,7 @@ public final XContentParser contentOrSourceParamParser() throws IOException { public final void withContentOrSourceParamParserOrNull(CheckedConsumer withParser) throws IOException { if (hasContentOrSourceParam()) { Tuple tuple = contentOrSourceParam(); - BytesReference content = tuple.v2(); - XContentType xContentType = tuple.v1(); - try ( - InputStream stream = content.streamInput(); - XContentParser parser = xContentType.xContent().createParser(parserConfig, stream) - ) { + try (XContentParser parser = XContentHelper.createParserNotCompressed(parserConfig, tuple.v2(), tuple.v1())) { withParser.accept(parser); } } else { @@ -631,6 +627,11 @@ public void markResponseRestricted(String restriction) { consumedParams.add(RESPONSE_RESTRICTED); } + @Override + public String getSpanId() { + return "rest-" + getRequestId(); + } + public static class MediaTypeHeaderException extends RuntimeException { private final String message; diff --git a/server/src/main/java/org/elasticsearch/rest/action/RestFieldCapabilitiesAction.java b/server/src/main/java/org/elasticsearch/rest/action/RestFieldCapabilitiesAction.java index eae8af0601557..e62fdf33db456 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/RestFieldCapabilitiesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/RestFieldCapabilitiesAction.java @@ -72,7 +72,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC } return channel -> { RestCancellableNodeClient cancelClient = new RestCancellableNodeClient(client, request.getHttpChannel()); - cancelClient.fieldCaps(fieldRequest, new RestChunkedToXContentListener<>(channel)); + cancelClient.fieldCaps(fieldRequest, new RestRefCountedChunkedToXContentListener<>(channel)); }; } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCloneSnapshotAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCloneSnapshotAction.java index c6790e7de21e6..7785680a3ca8d 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCloneSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestCloneSnapshotAction.java @@ -43,15 +43,17 @@ public String getName() { @Override public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - final Map source = request.contentParser().map(); - final CloneSnapshotRequest cloneSnapshotRequest = new CloneSnapshotRequest( - request.param("repository"), - request.param("snapshot"), - request.param("target_snapshot"), - XContentMapValues.nodeStringArrayValue(source.getOrDefault("indices", Collections.emptyList())) - ); - cloneSnapshotRequest.masterNodeTimeout(request.paramAsTime("master_timeout", cloneSnapshotRequest.masterNodeTimeout())); - cloneSnapshotRequest.indicesOptions(IndicesOptions.fromMap(source, cloneSnapshotRequest.indicesOptions())); - return channel -> client.admin().cluster().cloneSnapshot(cloneSnapshotRequest, new RestToXContentListener<>(channel)); + try (var parser = request.contentParser()) { + final Map source = parser.map(); + final CloneSnapshotRequest cloneSnapshotRequest = new CloneSnapshotRequest( + request.param("repository"), + request.param("snapshot"), + request.param("target_snapshot"), + XContentMapValues.nodeStringArrayValue(source.getOrDefault("indices", Collections.emptyList())) + ); + cloneSnapshotRequest.masterNodeTimeout(request.paramAsTime("master_timeout", cloneSnapshotRequest.masterNodeTimeout())); + cloneSnapshotRequest.indicesOptions(IndicesOptions.fromMap(source, cloneSnapshotRequest.indicesOptions())); + return channel -> client.admin().cluster().cloneSnapshot(cloneSnapshotRequest, new RestToXContentListener<>(channel)); + } } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterAllocationExplainAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterAllocationExplainAction.java index 896c341953e73..607ae3f554fe8 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterAllocationExplainAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterAllocationExplainAction.java @@ -14,7 +14,7 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; @@ -58,6 +58,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC req.includeYesDecisions(request.paramAsBoolean("include_yes_decisions", false)); req.includeDiskInfo(request.paramAsBoolean("include_disk_info", false)); - return channel -> client.admin().cluster().allocationExplain(req, new RestChunkedToXContentListener<>(channel)); + return channel -> client.admin().cluster().allocationExplain(req, new RestRefCountedChunkedToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterRerouteAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterRerouteAction.java index 9058df2336cc5..468cf30c8de54 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterRerouteAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestClusterRerouteAction.java @@ -19,7 +19,7 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ObjectParser.ValueType; import org.elasticsearch.xcontent.ParseField; @@ -84,7 +84,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC if (metric == null) { request.params().put("metric", DEFAULT_METRICS); } - return channel -> client.admin().cluster().reroute(clusterRerouteRequest, new RestChunkedToXContentListener<>(channel)); + return channel -> client.admin().cluster().reroute(clusterRerouteRequest, new RestRefCountedChunkedToXContentListener<>(channel)); } @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetDesiredBalanceAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetDesiredBalanceAction.java index 312ce353b6d42..a93c1e3d04fd6 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetDesiredBalanceAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetDesiredBalanceAction.java @@ -13,7 +13,7 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.io.IOException; import java.util.List; @@ -35,7 +35,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli return restChannel -> client.execute( TransportGetDesiredBalanceAction.TYPE, new DesiredBalanceRequest(), - new RestChunkedToXContentListener<>(restChannel) + new RestRefCountedChunkedToXContentListener<>(restChannel) ); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java index ae1cfcd7371fb..5cc77d3d50a01 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestGetSnapshotsAction.java @@ -16,7 +16,7 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import org.elasticsearch.search.sort.SortOrder; import java.io.IOException; @@ -82,6 +82,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC getSnapshotsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getSnapshotsRequest.masterNodeTimeout())); return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).admin() .cluster() - .getSnapshots(getSnapshotsRequest, new RestChunkedToXContentListener<>(channel)); + .getSnapshots(getSnapshotsRequest, new RestRefCountedChunkedToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsAction.java index fef7dc0cbdd37..d311f39f42f7a 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestNodesStatsAction.java @@ -22,7 +22,7 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.io.IOException; import java.util.Collections; @@ -189,7 +189,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).admin() .cluster() - .nodesStats(nodesStatsRequest, new RestChunkedToXContentListener<>(channel)); + .nodesStats(nodesStatsRequest, new RestRefCountedChunkedToXContentListener<>(channel)); } private final Set RESPONSE_PARAMS = Collections.singleton("level"); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPendingClusterTasksAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPendingClusterTasksAction.java index e9f9b9bf4327d..e5745ec89533c 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPendingClusterTasksAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestPendingClusterTasksAction.java @@ -15,7 +15,7 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.io.IOException; import java.util.List; @@ -43,7 +43,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC return channel -> client.execute( TransportPendingClusterTasksAction.TYPE, pendingClusterTasksRequest, - new RestChunkedToXContentListener<>(channel) + new RestRefCountedChunkedToXContentListener<>(channel) ); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java index 86ac7088642d1..b5b0a65a98d6b 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestReloadSecureSettingsAction.java @@ -9,8 +9,8 @@ package org.elasticsearch.rest.action.admin.cluster; import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsRequest; -import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsRequestBuilder; import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsResponse; +import org.elasticsearch.action.admin.cluster.node.reload.TransportNodesReloadSecureSettingsAction; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SecureString; @@ -58,30 +58,31 @@ public List routes() { @Override public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { final String[] nodesIds = Strings.splitStringByCommaToArray(request.param("nodeId")); - final NodesReloadSecureSettingsRequestBuilder nodesRequestBuilder = client.admin() - .cluster() - .prepareReloadSecureSettings() - .setTimeout(request.param("timeout")) - .setNodesIds(nodesIds); + final NodesReloadSecureSettingsRequest reloadSecureSettingsRequest = new NodesReloadSecureSettingsRequest(nodesIds); + reloadSecureSettingsRequest.timeout(request.param("timeout")); request.withContentOrSourceParamParserOrNull(parser -> { if (parser != null) { final NodesReloadSecureSettingsRequest nodesRequest = PARSER.parse(parser, null); - nodesRequestBuilder.setSecureStorePassword(nodesRequest.getSecureSettingsPassword()); + reloadSecureSettingsRequest.setSecureStorePassword(nodesRequest.getSecureSettingsPassword()); } }); - return channel -> nodesRequestBuilder.execute(new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(NodesReloadSecureSettingsResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - RestActions.buildNodesHeader(builder, channel.request(), response); - builder.field("cluster_name", response.getClusterName().value()); - response.toXContent(builder, channel.request()); - builder.endObject(); - nodesRequestBuilder.request().close(); - return new RestResponse(RestStatus.OK, builder); + return channel -> client.execute( + TransportNodesReloadSecureSettingsAction.TYPE, + reloadSecureSettingsRequest, + new RestBuilderListener<>(channel) { + @Override + public RestResponse buildResponse(NodesReloadSecureSettingsResponse response, XContentBuilder builder) throws Exception { + builder.startObject(); + RestActions.buildNodesHeader(builder, channel.request(), response); + builder.field("cluster_name", response.getClusterName().value()); + response.toXContent(builder, channel.request()); + builder.endObject(); + reloadSecureSettingsRequest.close(); + return new RestResponse(RestStatus.OK, builder); + } } - }); + ); } @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java index b5e25c32824ed..3baebb25c4dc2 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestSnapshotsStatusAction.java @@ -16,7 +16,7 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.io.IOException; import java.util.List; @@ -56,6 +56,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC snapshotsStatusRequest.masterNodeTimeout(request.paramAsTime("master_timeout", snapshotsStatusRequest.masterNodeTimeout())); return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).admin() .cluster() - .snapshotsStatus(snapshotsStatusRequest, new RestChunkedToXContentListener<>(channel)); + .snapshotsStatus(snapshotsStatusRequest, new RestRefCountedChunkedToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/dangling/RestDeleteDanglingIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/dangling/RestDeleteDanglingIndexAction.java index 4213f42549cd7..b8a7179f8cfb7 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/dangling/RestDeleteDanglingIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/dangling/RestDeleteDanglingIndexAction.java @@ -9,6 +9,7 @@ package org.elasticsearch.rest.action.admin.cluster.dangling; import org.elasticsearch.action.admin.indices.dangling.delete.DeleteDanglingIndexRequest; +import org.elasticsearch.action.admin.indices.dangling.delete.TransportDeleteDanglingIndexAction; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; @@ -42,6 +43,10 @@ public RestChannelConsumer prepareRequest(final RestRequest request, NodeClient deleteRequest.timeout(request.paramAsTime("timeout", deleteRequest.timeout())); deleteRequest.masterNodeTimeout(request.paramAsTime("master_timeout", deleteRequest.masterNodeTimeout())); - return channel -> client.admin().cluster().deleteDanglingIndex(deleteRequest, new RestToXContentListener<>(channel, r -> ACCEPTED)); + return channel -> client.execute( + TransportDeleteDanglingIndexAction.TYPE, + deleteRequest, + new RestToXContentListener<>(channel, r -> ACCEPTED) + ); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/dangling/RestImportDanglingIndexAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/dangling/RestImportDanglingIndexAction.java index 7f481c16118bd..9fa46fd9b0a3c 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/dangling/RestImportDanglingIndexAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/dangling/RestImportDanglingIndexAction.java @@ -9,6 +9,7 @@ package org.elasticsearch.rest.action.admin.cluster.dangling; import org.elasticsearch.action.admin.indices.dangling.import_index.ImportDanglingIndexRequest; +import org.elasticsearch.action.admin.indices.dangling.import_index.TransportImportDanglingIndexAction; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; @@ -41,6 +42,10 @@ public RestChannelConsumer prepareRequest(final RestRequest request, NodeClient importRequest.timeout(request.paramAsTime("timeout", importRequest.timeout())); importRequest.masterNodeTimeout(request.paramAsTime("master_timeout", importRequest.masterNodeTimeout())); - return channel -> client.admin().cluster().importDanglingIndex(importRequest, new RestToXContentListener<>(channel, r -> ACCEPTED)); + return channel -> client.execute( + TransportImportDanglingIndexAction.TYPE, + importRequest, + new RestToXContentListener<>(channel, r -> ACCEPTED) + ); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/dangling/RestListDanglingIndicesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/dangling/RestListDanglingIndicesAction.java index 8be7b68624bb4..8f7d9893019a4 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/dangling/RestListDanglingIndicesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/dangling/RestListDanglingIndicesAction.java @@ -9,6 +9,7 @@ package org.elasticsearch.rest.action.admin.cluster.dangling; import org.elasticsearch.action.admin.indices.dangling.list.ListDanglingIndicesRequest; +import org.elasticsearch.action.admin.indices.dangling.list.TransportListDanglingIndicesAction; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; @@ -32,9 +33,10 @@ public String getName() { @Override public BaseRestHandler.RestChannelConsumer prepareRequest(final RestRequest request, NodeClient client) throws IOException { - final ListDanglingIndicesRequest danglingIndicesRequest = new ListDanglingIndicesRequest(); - return channel -> client.admin() - .cluster() - .listDanglingIndices(danglingIndicesRequest, new RestActions.NodesResponseRestListener<>(channel)); + return channel -> client.execute( + TransportListDanglingIndicesAction.TYPE, + new ListDanglingIndicesRequest(), + new RestActions.NodesResponseRestListener<>(channel) + ); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestFieldUsageStatsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestFieldUsageStatsAction.java index ea2a867d35e7c..02785c8ab43eb 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestFieldUsageStatsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestFieldUsageStatsAction.java @@ -19,7 +19,7 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.io.IOException; import java.util.List; @@ -45,7 +45,7 @@ public BaseRestHandler.RestChannelConsumer prepareRequest(final RestRequest requ fusRequest.fields(request.paramAsStringArray("fields", fusRequest.fields())); return channel -> { final RestCancellableNodeClient cancelClient = new RestCancellableNodeClient(client, request.getHttpChannel()); - cancelClient.execute(FieldUsageStatsAction.INSTANCE, fusRequest, new RestChunkedToXContentListener<>(channel)); + cancelClient.execute(FieldUsageStatsAction.INSTANCE, fusRequest, new RestRefCountedChunkedToXContentListener<>(channel)); }; } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndicesAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndicesAction.java index 3550a7151ce43..db10bdd985d59 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndicesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetIndicesAction.java @@ -20,7 +20,7 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.io.IOException; import java.util.List; @@ -72,7 +72,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC final var httpChannel = request.getHttpChannel(); return channel -> new RestCancellableNodeClient(client, httpChannel).admin() .indices() - .getIndex(getIndexRequest, new RestChunkedToXContentListener<>(channel)); + .getIndex(getIndexRequest, new RestRefCountedChunkedToXContentListener<>(channel)); } /** diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java index 0a8ae5ae90c66..065399076c12a 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetMappingAction.java @@ -21,7 +21,7 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.io.IOException; import java.util.List; @@ -90,6 +90,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC final HttpChannel httpChannel = request.getHttpChannel(); return channel -> new RestCancellableNodeClient(client, httpChannel).admin() .indices() - .getMappings(getMappingsRequest, new RestChunkedToXContentListener<>(channel)); + .getMappings(getMappingsRequest, new RestRefCountedChunkedToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java index 4512b84de6af5..af72e66f6127d 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java @@ -17,7 +17,7 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.io.IOException; import java.util.List; @@ -56,6 +56,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC .names(names); getSettingsRequest.local(request.paramAsBoolean("local", getSettingsRequest.local())); getSettingsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", getSettingsRequest.masterNodeTimeout())); - return channel -> client.admin().indices().getSettings(getSettingsRequest, new RestChunkedToXContentListener<>(channel)); + return channel -> client.admin().indices().getSettings(getSettingsRequest, new RestRefCountedChunkedToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesSegmentsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesSegmentsAction.java index e293cf86d455a..21982c113ac3b 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesSegmentsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesSegmentsAction.java @@ -19,7 +19,7 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.io.IOException; import java.util.List; @@ -58,6 +58,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC indicesSegmentsRequest.indicesOptions(IndicesOptions.fromRequest(request, indicesSegmentsRequest.indicesOptions())); return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).admin() .indices() - .segments(indicesSegmentsRequest, new RestChunkedToXContentListener<>(channel)); + .segments(indicesSegmentsRequest, new RestRefCountedChunkedToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesShardStoresAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesShardStoresAction.java index 11831fdae80fb..854ac937113d8 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesShardStoresAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesShardStoresAction.java @@ -8,15 +8,15 @@ package org.elasticsearch.rest.action.admin.indices; -import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresAction; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest; +import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.Strings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.io.IOException; import java.util.List; @@ -24,7 +24,7 @@ import static org.elasticsearch.rest.RestRequest.Method.GET; /** - * Rest action for {@link IndicesShardStoresAction} + * Rest action for {@link TransportIndicesShardStoresAction} */ public class RestIndicesShardStoresAction extends BaseRestHandler { @@ -55,8 +55,10 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC request.paramAsInt("max_concurrent_shard_requests", indicesShardStoresRequest.maxConcurrentShardRequests()) ); indicesShardStoresRequest.indicesOptions(IndicesOptions.fromRequest(request, indicesShardStoresRequest.indicesOptions())); - return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).admin() - .indices() - .shardStores(indicesShardStoresRequest, new RestChunkedToXContentListener<>(channel)); + return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).execute( + TransportIndicesShardStoresAction.TYPE, + indicesShardStoresRequest, + new RestRefCountedChunkedToXContentListener<>(channel) + ); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsAction.java index 90aa366d4ecdf..f0aa614d73677 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestIndicesStatsAction.java @@ -22,7 +22,7 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import org.elasticsearch.rest.action.document.RestMultiTermVectorsAction; import java.io.IOException; @@ -146,7 +146,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).admin() .indices() - .stats(indicesStatsRequest, new RestChunkedToXContentListener<>(channel)); + .stats(indicesStatsRequest, new RestRefCountedChunkedToXContentListener<>(channel)); } @Override diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutComponentTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutComponentTemplateAction.java index 337283ebf1958..fd6f529d876a2 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutComponentTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutComponentTemplateAction.java @@ -43,7 +43,9 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC putRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putRequest.masterNodeTimeout())); putRequest.create(request.paramAsBoolean("create", false)); putRequest.cause(request.param("cause", "api")); - putRequest.componentTemplate(ComponentTemplate.parse(request.contentParser())); + try (var parser = request.contentParser()) { + putRequest.componentTemplate(ComponentTemplate.parse(parser)); + } return channel -> client.execute(PutComponentTemplateAction.INSTANCE, putRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutComposableIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutComposableIndexTemplateAction.java index afc291bc6dc26..937022f54dca3 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutComposableIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestPutComposableIndexTemplateAction.java @@ -43,7 +43,9 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC putRequest.masterNodeTimeout(request.paramAsTime("master_timeout", putRequest.masterNodeTimeout())); putRequest.create(request.paramAsBoolean("create", false)); putRequest.cause(request.param("cause", "api")); - putRequest.indexTemplate(ComposableIndexTemplate.parse(request.contentParser())); + try (var parser = request.contentParser()) { + putRequest.indexTemplate(ComposableIndexTemplate.parse(parser)); + } return channel -> client.execute(PutComposableIndexTemplateAction.INSTANCE, putRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRecoveryAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRecoveryAction.java index 036d91c000ffe..8db8366f0b9f8 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRecoveryAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestRecoveryAction.java @@ -17,7 +17,7 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.io.IOException; import java.util.List; @@ -54,6 +54,6 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC recoveryRequest.indicesOptions(IndicesOptions.fromRequest(request, recoveryRequest.indicesOptions())); return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).admin() .indices() - .recoveries(recoveryRequest, new RestChunkedToXContentListener<>(channel)); + .recoveries(recoveryRequest, new RestRefCountedChunkedToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSimulateIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSimulateIndexTemplateAction.java index 420d7a8d70f58..e140628e9bc0d 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSimulateIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSimulateIndexTemplateAction.java @@ -48,7 +48,9 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC PutComposableIndexTemplateAction.Request indexTemplateRequest = new PutComposableIndexTemplateAction.Request( "simulating_template" ); - indexTemplateRequest.indexTemplate(ComposableIndexTemplate.parse(request.contentParser())); + try (var parser = request.contentParser()) { + indexTemplateRequest.indexTemplate(ComposableIndexTemplate.parse(parser)); + } indexTemplateRequest.create(request.paramAsBoolean("create", false)); indexTemplateRequest.cause(request.param("cause", "api")); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSimulateTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSimulateTemplateAction.java index d458c309933a8..cb513f737f3d0 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSimulateTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSimulateTemplateAction.java @@ -44,7 +44,9 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli PutComposableIndexTemplateAction.Request indexTemplateRequest = new PutComposableIndexTemplateAction.Request( "simulating_template" ); - indexTemplateRequest.indexTemplate(ComposableIndexTemplate.parse(request.contentParser())); + try (var parser = request.contentParser()) { + indexTemplateRequest.indexTemplate(ComposableIndexTemplate.parse(parser)); + } indexTemplateRequest.create(request.paramAsBoolean("create", false)); indexTemplateRequest.cause(request.param("cause", "api")); diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java index 74eddca033398..779cb229ca48b 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestUpdateSettingsAction.java @@ -47,7 +47,9 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC updateSettingsRequest.masterNodeTimeout(request.paramAsTime("master_timeout", updateSettingsRequest.masterNodeTimeout())); updateSettingsRequest.indicesOptions(IndicesOptions.fromRequest(request, updateSettingsRequest.indicesOptions())); updateSettingsRequest.reopen(request.paramAsBoolean("reopen", false)); - updateSettingsRequest.fromXContent(request.contentParser()); + try (var parser = request.contentParser()) { + updateSettingsRequest.fromXContent(parser); + } return channel -> client.admin().indices().updateSettings(updateSettingsRequest, new RestToXContentListener<>(channel)); } diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java index 5e9b2c8452579..068c809554631 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestAllocationAction.java @@ -130,7 +130,7 @@ private Table buildTable(RestRequest request, final ClusterStateResponse state, table.startRow(); table.addCell(shardCount); - table.addCell(nodeStats.getIndices().getStore().getSize()); + table.addCell(nodeStats.getIndices().getStore().size()); table.addCell(used < 0 ? null : ByteSizeValue.ofBytes(used)); table.addCell(avail.getBytes() < 0 ? null : avail); table.addCell(total.getBytes() < 0 ? null : total); diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestCatComponentTemplateAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestCatComponentTemplateAction.java index b761c7e3ca054..4a238451bcc69 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestCatComponentTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestCatComponentTemplateAction.java @@ -127,24 +127,27 @@ private static int countMappingInTemplate(Template template) throws Exception { } int count = 0; XContentType xContentType = XContentType.JSON; - XContentParser parser = xContentType.xContent() - .createParser(XContentParserConfiguration.EMPTY, template.mappings().uncompressed().array()); - XContentParser.Token token = parser.nextToken(); - String currentFieldName = null; - while (token != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token == XContentParser.Token.START_OBJECT) { - if ("_doc".equals(currentFieldName)) { - List list = parser.mapOrdered().values().stream().toList(); - for (Object mapping : list) { - count = count + countSubAttributes(mapping); + try ( + XContentParser parser = xContentType.xContent() + .createParser(XContentParserConfiguration.EMPTY, template.mappings().uncompressed().array()) + ) { + XContentParser.Token token = parser.nextToken(); + String currentFieldName = null; + while (token != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.START_OBJECT) { + if ("_doc".equals(currentFieldName)) { + List list = parser.mapOrdered().values().stream().toList(); + for (Object mapping : list) { + count = count + countSubAttributes(mapping); + } } + } else { + parser.skipChildren(); } - } else { - parser.skipChildren(); + token = parser.nextToken(); } - token = parser.nextToken(); } return count; } diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java index 94fffd6582155..a57d45e07fd15 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestShardsAction.java @@ -293,8 +293,8 @@ Table buildTable(RestRequest request, ClusterStateResponse state, IndicesStatsRe } table.addCell(shard.state()); table.addCell(getOrNull(commonStats, CommonStats::getDocs, DocsStats::getCount)); - table.addCell(getOrNull(commonStats, CommonStats::getStore, StoreStats::getSize)); - table.addCell(getOrNull(commonStats, CommonStats::getStore, StoreStats::getTotalDataSetSize)); + table.addCell(getOrNull(commonStats, CommonStats::getStore, StoreStats::size)); + table.addCell(getOrNull(commonStats, CommonStats::getStore, StoreStats::totalDataSetSize)); if (shard.assignedToNode()) { String ip = state.getState().nodes().get(shard.currentNodeId()).getHostAddress(); String nodeId = shard.currentNodeId(); diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestKnnSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestKnnSearchAction.java index 681474403eb14..3dd8269126552 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestKnnSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestKnnSearchAction.java @@ -14,7 +14,7 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import org.elasticsearch.search.vectors.KnnSearchRequestParser; import java.io.IOException; @@ -55,6 +55,6 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient SearchRequestBuilder searchRequestBuilder = cancellableNodeClient.prepareSearch(); parser.toSearchRequest(searchRequestBuilder); - return channel -> searchRequestBuilder.execute(new RestChunkedToXContentListener<>(channel)); + return channel -> searchRequestBuilder.execute(new RestRefCountedChunkedToXContentListener<>(channel)); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java index c232e1a30c553..a881b2497b26c 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestMultiSearchAction.java @@ -142,7 +142,7 @@ public static MultiSearchRequest parseRequest( searchRequest.source(new SearchSourceBuilder().parseXContent(parser, false, searchUsageHolder)); RestSearchAction.validateSearchRequest(restRequest, searchRequest); if (searchRequest.pointInTimeBuilder() != null) { - RestSearchAction.preparePointInTime(searchRequest, restRequest, namedWriteableRegistry); + RestSearchAction.preparePointInTime(searchRequest, restRequest); } else { searchRequest.setCcsMinimizeRoundtrips( restRequest.paramAsBoolean("ccs_minimize_roundtrips", searchRequest.isCcsMinimizeRoundtrips()) diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java index 41102a3568e30..711aec182525e 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java @@ -11,7 +11,6 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.search.SearchContextId; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.IndicesOptions; @@ -50,7 +49,6 @@ import java.util.function.IntConsumer; import static org.elasticsearch.action.ValidateActions.addValidationError; -import static org.elasticsearch.action.search.SearchRequest.DEFAULT_INDICES_OPTIONS; import static org.elasticsearch.core.TimeValue.parseTimeValue; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; @@ -220,7 +218,7 @@ public static void parseSearchRequest( validateSearchRequest(request, searchRequest); if (searchRequest.pointInTimeBuilder() != null) { - preparePointInTime(searchRequest, request, namedWriteableRegistry); + preparePointInTime(searchRequest, request); } else { searchRequest.setCcsMinimizeRoundtrips( request.paramAsBoolean("ccs_minimize_roundtrips", searchRequest.isCcsMinimizeRoundtrips()) @@ -373,44 +371,14 @@ static SuggestBuilder parseSuggestUrlParameters(RestRequest request) { return null; } - static void preparePointInTime(SearchRequest request, RestRequest restRequest, NamedWriteableRegistry namedWriteableRegistry) { + static void preparePointInTime(SearchRequest request, RestRequest restRequest) { assert request.pointInTimeBuilder() != null; ActionRequestValidationException validationException = null; - if (request.indices().length > 0) { - validationException = addValidationError( - "[indices] cannot be used with point in time. Do not specify any index with point in time.", - validationException - ); - } - if (request.indicesOptions().equals(DEFAULT_INDICES_OPTIONS) == false) { - validationException = addValidationError("[indicesOptions] cannot be used with point in time", validationException); - } - if (request.routing() != null) { - validationException = addValidationError("[routing] cannot be used with point in time", validationException); - } - if (request.preference() != null) { - validationException = addValidationError("[preference] cannot be used with point in time", validationException); - } if (restRequest.paramAsBoolean("ccs_minimize_roundtrips", false)) { validationException = addValidationError("[ccs_minimize_roundtrips] cannot be used with point in time", validationException); request.setCcsMinimizeRoundtrips(false); } ExceptionsHelper.reThrowIfNotNull(validationException); - - final IndicesOptions indicesOptions = request.indicesOptions(); - final IndicesOptions stricterIndicesOptions = IndicesOptions.fromOptions( - indicesOptions.ignoreUnavailable(), - indicesOptions.allowNoIndices(), - false, - false, - false, - true, - true, - indicesOptions.ignoreThrottled() - ); - request.indicesOptions(stricterIndicesOptions); - final SearchContextId searchContextId = request.pointInTimeBuilder().getSearchContextId(namedWriteableRegistry); - request.indices(searchContextId.getActualIndices()); } /** diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java index de2101f94de2a..acf2818dd8902 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java @@ -14,7 +14,7 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import org.elasticsearch.search.Scroll; import org.elasticsearch.xcontent.XContentParseException; @@ -66,7 +66,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC } } }); - return channel -> client.searchScroll(searchScrollRequest, new RestChunkedToXContentListener<>(channel)); + return channel -> client.searchScroll(searchScrollRequest, new RestRefCountedChunkedToXContentListener<>(channel)); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java index a8721503c7454..773934615e051 100644 --- a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java @@ -32,7 +32,6 @@ import org.elasticsearch.index.fielddata.FieldDataContext; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexOrdinalsFieldData; -import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.IdLoader; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NestedLookup; @@ -167,20 +166,13 @@ final class DefaultSearchContext extends SearchContext { this.indexShard = readerContext.indexShard(); Engine.Searcher engineSearcher = readerContext.acquireSearcher("search"); - int maximumNumberOfSlices; - if (hasSyntheticSource(indexService)) { - // accessing synthetic source is not thread safe - maximumNumberOfSlices = 1; - } else { - maximumNumberOfSlices = determineMaximumNumberOfSlices( - executor, - request, - resultsType, - enableQueryPhaseParallelCollection, - field -> getFieldCardinality(field, readerContext.indexService(), engineSearcher.getDirectoryReader()) - ); - - } + int maximumNumberOfSlices = determineMaximumNumberOfSlices( + executor, + request, + resultsType, + enableQueryPhaseParallelCollection, + field -> getFieldCardinality(field, readerContext.indexService(), engineSearcher.getDirectoryReader()) + ); if (executor == null) { this.searcher = new ContextIndexSearcher( engineSearcher.getIndexReader(), @@ -222,14 +214,6 @@ final class DefaultSearchContext extends SearchContext { } } - private static boolean hasSyntheticSource(IndexService indexService) { - DocumentMapper documentMapper = indexService.mapperService().documentMapper(); - if (documentMapper != null) { - return documentMapper.sourceMapper().isSynthetic(); - } - return false; - } - static long getFieldCardinality(String field, IndexService indexService, DirectoryReader directoryReader) { MappedFieldType mappedFieldType = indexService.mapperService().fieldType(field); if (mappedFieldType == null) { diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index 9e59bfda96d19..8a03c7e9f08ba 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -115,7 +115,6 @@ import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.search.suggest.completion.CompletionSuggestion; import org.elasticsearch.tasks.TaskCancelledException; -import org.elasticsearch.telemetry.tracing.SpanId; import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.Scheduler.Cancellable; @@ -493,7 +492,7 @@ public void executeDfsPhase(ShardSearchRequest request, SearchShardTask task, Ac private DfsSearchResult executeDfsPhase(ShardSearchRequest request, SearchShardTask task) throws IOException { ReaderContext readerContext = createOrGetReaderContext(request); try (@SuppressWarnings("unused") // withScope call is necessary to instrument search execution - Releasable scope = tracer.withScope(SpanId.forTask(task)); + Releasable scope = tracer.withScope(task); Releasable ignored = readerContext.markAsUsed(getKeepAlive(request)); SearchContext context = createContext(readerContext, request, task, ResultsType.DFS, false) ) { @@ -665,9 +664,8 @@ private static void runAsync( */ private SearchPhaseResult executeQueryPhase(ShardSearchRequest request, SearchShardTask task) throws Exception { final ReaderContext readerContext = createOrGetReaderContext(request); - SpanId spanId = SpanId.forTask(task); try ( - Releasable scope = tracer.withScope(spanId); + Releasable scope = tracer.withScope(task); Releasable ignored = readerContext.markAsUsed(getKeepAlive(request)); SearchContext context = createContext(readerContext, request, task, ResultsType.QUERY, true) ) { @@ -680,7 +678,7 @@ private SearchPhaseResult executeQueryPhase(ShardSearchRequest request, SearchSh } afterQueryTime = executor.success(); } finally { - tracer.stopTrace(spanId); + tracer.stopTrace(task); } if (request.numberOfShards() == 1 && (request.source() == null || request.source().rankBuilder() == null)) { // we already have query results, but we can run fetch at the same time @@ -711,7 +709,7 @@ private SearchPhaseResult executeQueryPhase(ShardSearchRequest request, SearchSh private QueryFetchSearchResult executeFetchPhase(ReaderContext reader, SearchContext context, long afterQueryTime) { try ( - Releasable scope = tracer.withScope(SpanId.forTask(context.getTask())); + Releasable scope = tracer.withScope(context.getTask()); SearchOperationListenerExecutor executor = new SearchOperationListenerExecutor(context, true, afterQueryTime) ) { fetchPhase.execute(context, shortcutDocIdsToLoad(context)); diff --git a/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java b/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java index c47f53ec503b9..d6a3334dd035b 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java @@ -22,6 +22,7 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.LeafCollector; +import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryCache; import org.apache.lucene.search.QueryCachingPolicy; @@ -196,9 +197,11 @@ public Query rewrite(Query original) throws IOException { if (profiler != null) { profiler.startRewriteTime(); } - try { return super.rewrite(original); + } catch (TimeExceededException e) { + timeExceeded = true; + return new MatchNoDocsQuery("rewrite timed out"); } finally { if (profiler != null) { profiler.stopAndAddRewriteTime(); @@ -297,10 +300,10 @@ private static LeafSlice[] computeSlices(List leaves, int min @Override public T search(Query query, CollectorManager collectorManager) throws IOException { final C firstCollector = collectorManager.newCollector(); + // Take advantage of the few extra rewrite rules of ConstantScoreQuery when score are not needed. + query = firstCollector.scoreMode().needsScores() ? rewrite(query) : rewrite(new ConstantScoreQuery(query)); final Weight weight; try { - // Take advantage of the few extra rewrite rules of ConstantScoreQuery when score are not needed. - query = firstCollector.scoreMode().needsScores() ? rewrite(query) : rewrite(new ConstantScoreQuery(query)); weight = createWeight(query, firstCollector.scoreMode(), 1); } catch (@SuppressWarnings("unused") TimeExceededException e) { timeExceeded = true; diff --git a/server/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java b/server/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java deleted file mode 100644 index 392f60ba36cd0..0000000000000 --- a/server/src/main/java/org/elasticsearch/search/internal/InternalSearchResponse.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.search.internal; - -import org.elasticsearch.action.search.SearchResponseSections; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.search.SearchHits; -import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.profile.SearchProfileResults; -import org.elasticsearch.search.suggest.Suggest; - -import java.io.IOException; - -/** - * {@link SearchResponseSections} subclass that can be serialized over the wire. - */ -public class InternalSearchResponse extends SearchResponseSections implements Writeable { - public static final InternalSearchResponse EMPTY_WITH_TOTAL_HITS = new InternalSearchResponse( - SearchHits.EMPTY_WITH_TOTAL_HITS, - null, - null, - null, - false, - null, - 1 - ); - - public static final InternalSearchResponse EMPTY_WITHOUT_TOTAL_HITS = new InternalSearchResponse( - SearchHits.EMPTY_WITHOUT_TOTAL_HITS, - null, - null, - null, - false, - null, - 1 - ); - - public InternalSearchResponse( - SearchHits hits, - InternalAggregations aggregations, - Suggest suggest, - SearchProfileResults profileResults, - boolean timedOut, - Boolean terminatedEarly, - int numReducePhases - ) { - super(hits, aggregations, suggest, timedOut, terminatedEarly, profileResults, numReducePhases); - } - - public InternalSearchResponse(StreamInput in) throws IOException { - super( - new SearchHits(in), - in.readBoolean() ? InternalAggregations.readFrom(in) : null, - in.readBoolean() ? new Suggest(in) : null, - in.readBoolean(), - in.readOptionalBoolean(), - in.readOptionalWriteable(SearchProfileResults::new), - in.readVInt() - ); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - hits.writeTo(out); - out.writeOptionalWriteable((InternalAggregations) aggregations); - out.writeOptionalWriteable(suggest); - out.writeBoolean(timedOut); - out.writeOptionalBoolean(terminatedEarly); - out.writeOptionalWriteable(profileResults); - out.writeVInt(numReducePhases); - } -} diff --git a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java index 01988003f4dd0..18ae708d8fec3 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java @@ -182,6 +182,10 @@ public static long computeWaitForCheckpoint(Map indexToWaitForCh } public ShardSearchRequest(ShardId shardId, long nowInMillis, AliasFilter aliasFilter) { + this(shardId, nowInMillis, aliasFilter, null); + } + + public ShardSearchRequest(ShardId shardId, long nowInMillis, AliasFilter aliasFilter, String clusterAlias) { this( OriginalIndices.NONE, shardId, @@ -195,7 +199,7 @@ public ShardSearchRequest(ShardId shardId, long nowInMillis, AliasFilter aliasFi true, null, nowInMillis, - null, + clusterAlias, null, null, SequenceNumbers.UNASSIGNED_SEQ_NO, diff --git a/server/src/main/java/org/elasticsearch/search/lookup/SourceFilter.java b/server/src/main/java/org/elasticsearch/search/lookup/SourceFilter.java index a58e7fa7d4a2b..761936b43053c 100644 --- a/server/src/main/java/org/elasticsearch/search/lookup/SourceFilter.java +++ b/server/src/main/java/org/elasticsearch/search/lookup/SourceFilter.java @@ -94,12 +94,13 @@ private Function buildBytesFilter() { BytesStreamOutput streamOutput = new BytesStreamOutput(1024); XContent xContent = in.sourceContentType().xContent(); XContentBuilder builder = new XContentBuilder(xContent, streamOutput); - XContentParser parser = xContent.createParser(parserConfig, in.internalSourceRef().streamInput()); - if ((parser.currentToken() == null) && (parser.nextToken() == null)) { - return Source.empty(in.sourceContentType()); + try (XContentParser parser = xContent.createParser(parserConfig, in.internalSourceRef().streamInput())) { + if ((parser.currentToken() == null) && (parser.nextToken() == null)) { + return Source.empty(in.sourceContentType()); + } + builder.copyCurrentStructure(parser); + return Source.fromBytes(BytesReference.bytes(builder)); } - builder.copyCurrentStructure(parser); - return Source.fromBytes(BytesReference.bytes(builder)); } catch (IOException e) { throw new UncheckedIOException(e); } diff --git a/server/src/main/java/org/elasticsearch/snapshots/PausedSnapshotException.java b/server/src/main/java/org/elasticsearch/snapshots/PausedSnapshotException.java new file mode 100644 index 0000000000000..8a268a5d52078 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/snapshots/PausedSnapshotException.java @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.snapshots; + +public final class PausedSnapshotException extends RuntimeException { + public PausedSnapshotException() { + super("paused"); + } +} diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index 0f7c4f71a089c..7b3a83dfc9bb3 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -136,7 +136,11 @@ public void clusterChanged(ClusterChangedEvent event) { cancelRemoved(currentSnapshots); for (final var oneRepoSnapshotsInProgress : currentSnapshots.entriesByRepo()) { for (final var snapshotsInProgressEntry : oneRepoSnapshotsInProgress) { - handleUpdatedSnapshotsInProgressEntry(localNodeId, snapshotsInProgressEntry); + handleUpdatedSnapshotsInProgressEntry( + localNodeId, + currentSnapshots.isNodeIdForRemoval(localNodeId), + snapshotsInProgressEntry + ); } } } @@ -223,7 +227,7 @@ private void cancelRemoved(SnapshotsInProgress snapshotsInProgress) { } } - private void handleUpdatedSnapshotsInProgressEntry(String localNodeId, SnapshotsInProgress.Entry entry) { + private void handleUpdatedSnapshotsInProgressEntry(String localNodeId, boolean removingLocalNode, SnapshotsInProgress.Entry entry) { if (entry.isClone()) { // This is a snapshot clone, it will be executed on the current master return; @@ -236,7 +240,11 @@ private void handleUpdatedSnapshotsInProgressEntry(String localNodeId, Snapshots return; } - startNewShardSnapshots(localNodeId, entry); + if (removingLocalNode) { + pauseShardSnapshots(localNodeId, entry); + } else { + startNewShardSnapshots(localNodeId, entry); + } } case ABORTED -> { // Abort all running shards for this snapshot @@ -249,7 +257,13 @@ private void handleUpdatedSnapshotsInProgressEntry(String localNodeId, Snapshots // due to CS batching we might have missed the INIT state and straight went into ABORTED // notify master that abort has completed by moving to FAILED if (shard.getValue().state() == ShardState.ABORTED && localNodeId.equals(shard.getValue().nodeId())) { - notifyUnsuccessfulSnapshotShard(snapshot, sid, shard.getValue().reason(), shard.getValue().generation()); + notifyUnsuccessfulSnapshotShard( + snapshot, + sid, + ShardState.FAILED, + shard.getValue().reason(), + shard.getValue().generation() + ); } } else { snapshotStatus.abortIfNotCompleted("snapshot has been aborted", notifyOnAbortTaskRunner::enqueueTask); @@ -263,19 +277,20 @@ private void handleUpdatedSnapshotsInProgressEntry(String localNodeId, Snapshots private void startNewShardSnapshots(String localNodeId, SnapshotsInProgress.Entry entry) { Map shardsToStart = null; final Snapshot snapshot = entry.snapshot(); - final var runningShardsForSnapshot = shardSnapshots.getOrDefault(snapshot, emptyMap()).keySet(); + final var runningShardsForSnapshot = shardSnapshots.getOrDefault(snapshot, emptyMap()); for (var scheduledShard : entry.shards().entrySet()) { // Add all new shards to start processing on final var shardId = scheduledShard.getKey(); final var shardSnapshotStatus = scheduledShard.getValue(); - if (shardSnapshotStatus.state() == ShardState.INIT - && localNodeId.equals(shardSnapshotStatus.nodeId()) - && runningShardsForSnapshot.contains(shardId) == false) { - logger.trace("[{}] adding shard to the queue", shardId); - if (shardsToStart == null) { - shardsToStart = new HashMap<>(); + if (shardSnapshotStatus.state() == ShardState.INIT && localNodeId.equals(shardSnapshotStatus.nodeId())) { + final var runningShard = runningShardsForSnapshot.get(shardId); + if (runningShard == null || runningShard.isPaused()) { + logger.trace("[{}] adding [{}] shard to the queue", shardId, runningShard == null ? "new" : "paused"); + if (shardsToStart == null) { + shardsToStart = new HashMap<>(); + } + shardsToStart.put(shardId, shardSnapshotStatus.generation()); } - shardsToStart.put(shardId, shardSnapshotStatus.generation()); } } if (shardsToStart == null) { @@ -303,6 +318,40 @@ private void startNewShardSnapshots(String localNodeId, SnapshotsInProgress.Entr threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> shardSnapshotTasks.forEach(Runnable::run)); } + private void pauseShardSnapshots(String localNodeId, SnapshotsInProgress.Entry entry) { + final var localShardSnapshots = shardSnapshots.getOrDefault(entry.snapshot(), Map.of()); + + for (final Map.Entry shardEntry : entry.shards().entrySet()) { + final ShardId shardId = shardEntry.getKey(); + final ShardSnapshotStatus masterShardSnapshotStatus = shardEntry.getValue(); + + if (masterShardSnapshotStatus.state() != ShardState.INIT) { + // shard snapshot not currently scheduled by master + continue; + } + + if (localNodeId.equals(masterShardSnapshotStatus.nodeId()) == false) { + // shard snapshot scheduled on a different node + continue; + } + + final var localShardSnapshotStatus = localShardSnapshots.get(shardId); + if (localShardSnapshotStatus == null) { + // shard snapshot scheduled but not currently running, pause immediately without starting + notifyUnsuccessfulSnapshotShard( + entry.snapshot(), + shardId, + ShardState.PAUSED_FOR_NODE_REMOVAL, + "paused", + masterShardSnapshotStatus.generation() + ); + } else { + // shard snapshot currently running, mark for pause + localShardSnapshotStatus.pauseIfNotCompleted(notifyOnAbortTaskRunner::enqueueTask); + } + } + } + private Runnable newShardSnapshotTask( final ShardId shardId, final Snapshot snapshot, @@ -335,15 +384,22 @@ public void onResponse(ShardSnapshotResult shardSnapshotResult) { @Override public void onFailure(Exception e) { final String failure; + final Stage nextStage; if (e instanceof AbortedSnapshotException) { + nextStage = Stage.FAILURE; failure = "aborted"; logger.debug(() -> format("[%s][%s] aborted shard snapshot", shardId, snapshot), e); + } else if (e instanceof PausedSnapshotException) { + nextStage = Stage.PAUSED; + failure = "paused for removal of node holding primary"; + logger.debug(() -> format("[%s][%s] pausing shard snapshot", shardId, snapshot), e); } else { + nextStage = Stage.FAILURE; failure = summarizeFailure(e); logger.warn(() -> format("[%s][%s] failed to snapshot shard", shardId, snapshot), e); } - snapshotStatus.moveToFailed(threadPool.absoluteTimeInMillis(), failure); - notifyUnsuccessfulSnapshotShard(snapshot, shardId, failure, snapshotStatus.generation()); + final var shardState = snapshotStatus.moveToUnsuccessful(nextStage, failure, threadPool.absoluteTimeInMillis()); + notifyUnsuccessfulSnapshotShard(snapshot, shardId, shardState, failure, snapshotStatus.generation()); } }); } @@ -543,6 +599,19 @@ private void syncShardStatsOnNewMaster(List entries) notifyUnsuccessfulSnapshotShard( snapshot.snapshot(), shardId, + ShardState.FAILED, + indexShardSnapshotStatus.getFailure(), + localShard.getValue().generation() + ); + } else if (stage == Stage.PAUSED) { + // but we think the shard has paused - we need to make new master know that + logger.debug(""" + [{}] new master thinks the shard [{}] is still running but the shard paused locally, updating status on \ + master""", snapshot.snapshot(), shardId); + notifyUnsuccessfulSnapshotShard( + snapshot.snapshot(), + shardId, + ShardState.PAUSED_FOR_NODE_REMOVAL, indexShardSnapshotStatus.getFailure(), localShard.getValue().generation() ); @@ -569,13 +638,15 @@ private void notifySuccessfulSnapshotShard(final Snapshot snapshot, final ShardI private void notifyUnsuccessfulSnapshotShard( final Snapshot snapshot, final ShardId shardId, + final ShardState shardState, final String failure, final ShardGeneration generation ) { + assert shardState == ShardState.FAILED || shardState == ShardState.PAUSED_FOR_NODE_REMOVAL : shardState; sendSnapshotShardUpdate( snapshot, shardId, - new ShardSnapshotStatus(clusterService.localNode().getId(), ShardState.FAILED, generation, failure) + new ShardSnapshotStatus(clusterService.localNode().getId(), shardState, generation, failure) ); } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index f62061f5d5b4b..f973d456a6b79 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -238,6 +238,11 @@ public SnapshotsService( this.systemIndices = systemIndices; this.masterServiceTaskQueue = clusterService.createTaskQueue("snapshots-service", Priority.NORMAL, new SnapshotTaskExecutor()); + this.updateNodeIdsToRemoveQueue = clusterService.createTaskQueue( + "snapshots-service-node-ids", + Priority.NORMAL, + UpdateNodeIdsForRemovalTask::executeBatch + ); } /** @@ -829,8 +834,19 @@ public void applyClusterState(ClusterChangedEvent event) { final boolean newMaster = event.previousState().nodes().isLocalNodeElectedMaster() == false; processExternalChanges( newMaster || removedNodesCleanupNeeded(snapshotsInProgress, event.nodesDelta().removedNodes()), - event.routingTableChanged() && waitingShardsStartedOrUnassigned(snapshotsInProgress, event) + snapshotsInProgress.nodeIdsForRemovalChanged(SnapshotsInProgress.get(event.previousState())) + || (event.routingTableChanged() && waitingShardsStartedOrUnassigned(snapshotsInProgress, event)) ); + + if (newMaster + || event.state().metadata().nodeShutdowns().equals(event.previousState().metadata().nodeShutdowns()) == false + || supportsNodeRemovalTracking(event.state()) != supportsNodeRemovalTracking(event.previousState())) { + updateNodeIdsToRemoveQueue.submitTask( + "SnapshotsService#updateNodeIdsToRemove", + new UpdateNodeIdsForRemovalTask(), + null + ); + } } else { final List readyToResolveListeners = new ArrayList<>(); // line-up mutating concurrent operations which can be in form of clusterApplierService and masterService tasks @@ -1046,6 +1062,7 @@ public ClusterState execute(ClusterState currentState) { snapshot, routingTable, nodes, + snapshots::isNodeIdForRemoval, knownFailures ); if (shards != null) { @@ -1130,6 +1147,7 @@ private static ImmutableOpenMap processWaitingShar SnapshotsInProgress.Entry entry, RoutingTable routingTable, DiscoveryNodes nodes, + Predicate nodeIdRemovalPredicate, Map knownFailures ) { assert entry.isClone() == false : "clones take a different path"; @@ -1160,19 +1178,30 @@ private static ImmutableOpenMap processWaitingShar snapshotChanged = true; shards.put(shardId, knownFailure); } - } else if (shardStatus.state() == ShardState.WAITING) { + } else if (shardStatus.state() == ShardState.WAITING || shardStatus.state() == ShardState.PAUSED_FOR_NODE_REMOVAL) { IndexRoutingTable indexShardRoutingTable = routingTable.index(shardId.getIndex()); if (indexShardRoutingTable != null) { IndexShardRoutingTable shardRouting = indexShardRoutingTable.shard(shardId.id()); if (shardRouting != null && shardRouting.primaryShard() != null) { - if (shardRouting.primaryShard().started()) { + final var primaryNodeId = shardRouting.primaryShard().currentNodeId(); + if (nodeIdRemovalPredicate.test(primaryNodeId)) { + if (shardStatus.state() == ShardState.PAUSED_FOR_NODE_REMOVAL) { + // Shard that we are waiting for is on a node marked for removal, keep it as PAUSED_FOR_REMOVAL + shards.put(shardId, shardStatus); + } else { + // Shard that we are waiting for is on a node marked for removal, move it to PAUSED_FOR_REMOVAL + snapshotChanged = true; + shards.put( + shardId, + new ShardSnapshotStatus(primaryNodeId, ShardState.PAUSED_FOR_NODE_REMOVAL, shardStatus.generation()) + ); + } + continue; + } else if (shardRouting.primaryShard().started()) { // Shard that we were waiting for has started on a node, let's process it snapshotChanged = true; logger.trace("starting shard that we were waiting for [{}] on node [{}]", shardId, shardStatus.nodeId()); - shards.put( - shardId, - new ShardSnapshotStatus(shardRouting.primaryShard().currentNodeId(), shardStatus.generation()) - ); + shards.put(shardId, new ShardSnapshotStatus(primaryNodeId, shardStatus.generation())); continue; } else if (shardRouting.primaryShard().initializing() || shardRouting.primaryShard().relocating()) { // Shard that we were waiting for hasn't started yet or still relocating - will continue to wait @@ -1225,7 +1254,7 @@ private static boolean waitingShardsStartedOrUnassigned(SnapshotsInProgress snap if (entry.state() == State.STARTED && entry.isClone() == false) { for (Map.Entry shardStatus : entry.shardsByRepoShardId().entrySet()) { final ShardState state = shardStatus.getValue().state(); - if (state != ShardState.WAITING && state != ShardState.QUEUED) { + if (state != ShardState.WAITING && state != ShardState.QUEUED && state != ShardState.PAUSED_FOR_NODE_REMOVAL) { continue; } final RepositoryShardId shardId = shardStatus.getKey(); @@ -1234,11 +1263,13 @@ private static boolean waitingShardsStartedOrUnassigned(SnapshotsInProgress snap .getRoutingTable() .index(entry.indexByName(shardId.indexName())); if (indexShardRoutingTable == null) { - // index got removed concurrently and we have to fail WAITING or QUEUED state shards + // index got removed concurrently and we have to fail WAITING, QUEUED and PAUSED_FOR_REMOVAL state shards return true; } ShardRouting shardRouting = indexShardRoutingTable.shard(shardId.shardId()).primaryShard(); - if (shardRouting != null && (shardRouting.started() || shardRouting.unassigned())) { + if (shardRouting != null + && (shardRouting.started() && snapshotsInProgress.isNodeIdForRemoval(shardRouting.currentNodeId()) == false + || shardRouting.unassigned())) { return true; } } @@ -2870,7 +2901,11 @@ private static ImmutableOpenMap nodeIdRemovalPredicate + ) { ShardSnapshotStatus shardSnapshotStatus; if (primary == null || primary.assignedToNode() == false) { shardSnapshotStatus = new ShardSnapshotStatus(null, ShardState.MISSING, shardRepoGeneration, "primary shard is not allocated"); } else if (primary.relocating() || primary.initializing()) { shardSnapshotStatus = new ShardSnapshotStatus(primary.currentNodeId(), ShardState.WAITING, shardRepoGeneration); + } else if (nodeIdRemovalPredicate.test(primary.currentNodeId())) { + shardSnapshotStatus = new ShardSnapshotStatus(primary.currentNodeId(), ShardState.PAUSED_FOR_NODE_REMOVAL, shardRepoGeneration); } else if (primary.started() == false) { shardSnapshotStatus = new ShardSnapshotStatus( primary.currentNodeId(), @@ -3044,6 +3086,9 @@ static final class SnapshotShardsUpdateContext { // initial cluster state for update computation private final ClusterState initialState; + // tests whether node IDs are currently marked for removal + private final Predicate nodeIdRemovalPredicate; + // updates outstanding to be applied to existing snapshot entries private final Map> updatesByRepo; @@ -3059,6 +3104,7 @@ static final class SnapshotShardsUpdateContext { ) { this.batchExecutionContext = batchExecutionContext; this.initialState = batchExecutionContext.initialState(); + this.nodeIdRemovalPredicate = SnapshotsInProgress.get(initialState)::isNodeIdForRemoval; this.rerouteRunnable = new RunOnce(rerouteRunnable); // RunOnce to avoid enqueueing O(#shards) listeners this.updatesByRepo = new HashMap<>(); for (final var taskContext : batchExecutionContext.taskContexts()) { @@ -3090,7 +3136,7 @@ SnapshotsInProgress computeUpdatedState() { changedCount, startedCount ); - return updated; + return supportsNodeRemovalTracking(initialState) ? updated.withUpdatedNodeIdsForRemoval(initialState) : updated; } return existing; } @@ -3237,14 +3283,24 @@ private void executeShardSnapshotUpdate( return; } - logger.trace( - "[{}] Updating shard [{}] with status [{}]", - updateSnapshotState.snapshot, - updatedShard, - updateSnapshotState.updatedState.state() - ); + final ShardSnapshotStatus updatedState; + if (existing.state() == ShardState.ABORTED + && updateSnapshotState.updatedState.state() == ShardState.PAUSED_FOR_NODE_REMOVAL) { + // concurrently pausing the shard snapshot due to node shutdown and aborting the snapshot - this shard is no longer + // actively snapshotting but we don't want it to resume, so mark it as FAILED since it didn't complete + updatedState = new ShardSnapshotStatus( + updateSnapshotState.updatedState.nodeId(), + ShardState.FAILED, + updateSnapshotState.updatedState.generation(), + "snapshot aborted" + ); + } else { + updatedState = updateSnapshotState.updatedState; + } + + logger.trace("[{}] Updating shard [{}] with status [{}]", updateSnapshotState.snapshot, updatedShard, updatedState.state()); changedCount++; - newStates.get().put(updatedShard, updateSnapshotState.updatedState); + newStates.get().put(updatedShard, updatedState); executedUpdates.add(updateSnapshotState); } @@ -3308,7 +3364,7 @@ private void startShardSnapshot(RepositoryShardId repoShardId, ShardGeneration g } else { shardRouting = indexRouting.shard(repoShardId.shardId()).primaryShard(); } - final ShardSnapshotStatus shardSnapshotStatus = initShardSnapshotStatus(generation, shardRouting); + final ShardSnapshotStatus shardSnapshotStatus = initShardSnapshotStatus(generation, shardRouting, nodeIdRemovalPredicate); final ShardId routingShardId = shardRouting != null ? shardRouting.shardId() : new ShardId(index, repoShardId.shardId()); if (shardSnapshotStatus.isActive()) { startShardOperation(shardsBuilder(), routingShardId, shardSnapshotStatus); @@ -3924,4 +3980,36 @@ private SnapshotsInProgress createSnapshot( return res; } } + + private record UpdateNodeIdsForRemovalTask() implements ClusterStateTaskListener { + @Override + public void onFailure(Exception e) { + // must be a master failover, and the new master will retry so nbd + assert MasterService.isPublishFailureException(e) : e; + } + + static ClusterState executeBatch( + ClusterStateTaskExecutor.BatchExecutionContext batchExecutionContext + ) { + for (ClusterStateTaskExecutor.TaskContext taskContext : batchExecutionContext.taskContexts()) { + taskContext.success(() -> {}); + } + + final var clusterState = batchExecutionContext.initialState(); + if (supportsNodeRemovalTracking(clusterState)) { + final var snapshotsInProgress = SnapshotsInProgress.get(clusterState); + final var newSnapshotsInProgress = snapshotsInProgress.withUpdatedNodeIdsForRemoval(clusterState); + if (newSnapshotsInProgress != snapshotsInProgress) { + return ClusterState.builder(clusterState).putCustom(SnapshotsInProgress.TYPE, newSnapshotsInProgress).build(); + } + } + return clusterState; + } + } + + private static boolean supportsNodeRemovalTracking(ClusterState clusterState) { + return clusterState.getMinTransportVersion().onOrAfter(TransportVersions.SNAPSHOTS_IN_PROGRESS_TRACKING_REMOVING_NODES_ADDED); + } + + private final MasterServiceTaskQueue updateNodeIdsToRemoveQueue; } diff --git a/server/src/main/java/org/elasticsearch/tasks/Task.java b/server/src/main/java/org/elasticsearch/tasks/Task.java index 3726ba265e433..83ee08574df4e 100644 --- a/server/src/main/java/org/elasticsearch/tasks/Task.java +++ b/server/src/main/java/org/elasticsearch/tasks/Task.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.telemetry.tracing.Traceable; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.ToXContentObject; @@ -21,7 +22,7 @@ /** * Current task information */ -public class Task { +public class Task implements Traceable { /** * The request header to mark tasks with specific ids @@ -265,4 +266,9 @@ public TaskResult result(DiscoveryNode node, ActionResponse response) throws IOE throw new IllegalStateException("response has to implement ToXContent to be able to store the results"); } } + + @Override + public String getSpanId() { + return "task-" + getId(); + } } diff --git a/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java b/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java index e0ef4feb0ae35..377c7b3847b0b 100644 --- a/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java +++ b/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java @@ -67,7 +67,7 @@ public static TaskInfo from(StreamInput in) throws IOException { return new TaskInfo( taskId, in.readString(), - in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_055) ? in.readString() : taskId.getNodeId(), + in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061) ? in.readString() : taskId.getNodeId(), in.readString(), in.readOptionalString(), in.readOptionalNamedWriteable(Task.Status.class), @@ -84,7 +84,7 @@ public static TaskInfo from(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { taskId.writeTo(out); out.writeString(type); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_055)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { out.writeString(node); } out.writeString(action); diff --git a/server/src/main/java/org/elasticsearch/telemetry/tracing/SpanId.java b/server/src/main/java/org/elasticsearch/telemetry/tracing/SpanId.java deleted file mode 100644 index 8a22102baadf9..0000000000000 --- a/server/src/main/java/org/elasticsearch/telemetry/tracing/SpanId.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ - -package org.elasticsearch.telemetry.tracing; - -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.tasks.Task; - -import java.util.Objects; - -public class SpanId { - private final String rawId; - - private SpanId(String rawId) { - this.rawId = Objects.requireNonNull(rawId); - } - - public String getRawId() { - return rawId; - } - - @Override - public String toString() { - return "SpanId[" + rawId + "]"; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - SpanId spanId = (SpanId) o; - return rawId.equals(spanId.rawId); - } - - @Override - public int hashCode() { - return Objects.hash(rawId); - } - - public static SpanId forTask(Task task) { - return new SpanId("task-" + task.getId()); - } - - public static SpanId forRestRequest(RestRequest restRequest) { - return new SpanId("rest-" + restRequest.getRequestId()); - } - - public static SpanId forBareString(String rawId) { - return new SpanId(rawId); - } -} diff --git a/server/src/main/java/org/elasticsearch/telemetry/tracing/TraceContext.java b/server/src/main/java/org/elasticsearch/telemetry/tracing/TraceContext.java new file mode 100644 index 0000000000000..197b4f96acd5b --- /dev/null +++ b/server/src/main/java/org/elasticsearch/telemetry/tracing/TraceContext.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.telemetry.tracing; + +/** + * Required methods from ThreadContext for Tracer + */ +public interface TraceContext { + /** + * Returns a transient header object or null if there is no header for the given key + */ + T getTransient(String key); + + /** + * Puts a transient header object into this context + */ + void putTransient(String key, Object value); + + /** + * Returns the header for the given key or null if not present + */ + String getHeader(String key); + + /** + * Puts a header into the context + */ + void putHeader(String key, String value); +} diff --git a/server/src/main/java/org/elasticsearch/telemetry/tracing/Traceable.java b/server/src/main/java/org/elasticsearch/telemetry/tracing/Traceable.java new file mode 100644 index 0000000000000..64c8635d75dd8 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/telemetry/tracing/Traceable.java @@ -0,0 +1,19 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.telemetry.tracing; + +/** + * A class that can be traced using the telemetry tracing API + */ +public interface Traceable { + /** + * A consistent id for the span. Should be structured "[short-name]-[unique-id]" ie "request-abc1234" + */ + String getSpanId(); +} diff --git a/server/src/main/java/org/elasticsearch/telemetry/tracing/Tracer.java b/server/src/main/java/org/elasticsearch/telemetry/tracing/Tracer.java index f54857091b778..6f2c98dda4e2b 100644 --- a/server/src/main/java/org/elasticsearch/telemetry/tracing/Tracer.java +++ b/server/src/main/java/org/elasticsearch/telemetry/tracing/Tracer.java @@ -8,10 +8,7 @@ package org.elasticsearch.telemetry.tracing; -import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Releasable; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.tasks.Task; import java.util.Map; @@ -37,27 +34,13 @@ public interface Tracer { /** * Called when a span starts. - * @param threadContext the current context. Required for tracing parent/child span activity. - * @param spanId a unique identifier for the activity, and will not be sent to the tracing system. Add the ID - * to the attributes if it is important + * @param traceContext the current context. Required for tracing parent/child span activity. + * @param traceable provides a unique identifier for the activity, and will not be sent to the tracing system. Add the ID + * to the attributes if it is important * @param name the name of the span. Used to filter out spans, but also sent to the tracing system * @param attributes arbitrary key/value data for the span. Sent to the tracing system */ - void startTrace(ThreadContext threadContext, SpanId spanId, String name, Map attributes); - - /** - * @see Tracer#startTrace(ThreadContext, SpanId, String, Map) - */ - default void startTrace(ThreadContext threadContext, Task task, String name, Map attributes) { - startTrace(threadContext, SpanId.forTask(task), name, attributes); - } - - /** - * @see Tracer#startTrace(ThreadContext, SpanId, String, Map) - */ - default void startTrace(ThreadContext threadContext, RestRequest restRequest, String name, Map attributes) { - startTrace(threadContext, SpanId.forRestRequest(restRequest), name, attributes); - } + void startTrace(TraceContext traceContext, Traceable traceable, String name, Map attributes); /** * Called when a span starts. This version of the method relies on context to assign the span a parent. @@ -67,23 +50,9 @@ default void startTrace(ThreadContext threadContext, RestRequest restRequest, St /** * Called when a span ends. - * @param spanId an identifier for the span - */ - void stopTrace(SpanId spanId); - - /** - * @see Tracer#stopTrace(SpanId) - */ - default void stopTrace(Task task) { - stopTrace(SpanId.forTask(task)); - } - - /** - * @see Tracer#stopTrace(SpanId) + * @param traceable provides an identifier for the span */ - default void stopTrace(RestRequest restRequest) { - stopTrace(SpanId.forRestRequest(restRequest)); - } + void stopTrace(Traceable traceable); /** * Called when a span ends. This version of the method relies on context to select the span to stop. @@ -94,58 +63,51 @@ default void stopTrace(RestRequest restRequest) { * Some tracing implementations support the concept of "events" within a span, marking a point in time during the span * when something interesting happened. If the tracing implementation doesn't support events, then nothing will be recorded. * This should only be called when a trace already been started on the {@code traceable}. - * @param spanId an identifier for the span + * @param traceable provides an identifier for the span * @param eventName the event that happened. This should be something meaningful to people reviewing the data, for example * "send response", "finished processing", "validated request", etc. */ - void addEvent(SpanId spanId, String eventName); + void addEvent(Traceable traceable, String eventName); /** * If an exception occurs during a span, you can add data about the exception to the span where the exception occurred. * This should only be called when a span has been started, otherwise it has no effect. - * @param spanId an identifier for the span + * @param traceable provides an identifier for the span * @param throwable the exception that occurred. */ - void addError(SpanId spanId, Throwable throwable); - - /** - * @see Tracer#addError(SpanId, Throwable) - */ - default void addError(RestRequest restRequest, Throwable throwable) { - addError(SpanId.forRestRequest(restRequest), throwable); - } + void addError(Traceable traceable, Throwable throwable); /** * Adds a boolean attribute to an active span. These will be sent to the endpoint that collects tracing data. - * @param spanId an identifier for the span + * @param traceable provides an identifier for the span * @param key the attribute key * @param value the attribute value */ - void setAttribute(SpanId spanId, String key, boolean value); + void setAttribute(Traceable traceable, String key, boolean value); /** * Adds a double attribute to an active span. These will be sent to the endpoint that collects tracing data. - * @param spanId an identifier for the span + * @param traceable provides an identifier for the span * @param key the attribute key * @param value the attribute value */ - void setAttribute(SpanId spanId, String key, double value); + void setAttribute(Traceable traceable, String key, double value); /** * Adds a long attribute to an active span. These will be sent to the endpoint that collects tracing data. - * @param spanId an identifier for the span + * @param traceable provides an identifier for the span * @param key the attribute key * @param value the attribute value */ - void setAttribute(SpanId spanId, String key, long value); + void setAttribute(Traceable traceable, String key, long value); /** * Adds a String attribute to an active span. These will be sent to the endpoint that collects tracing data. - * @param spanId an identifier for the span + * @param traceable provides an identifier for the span * @param key the attribute key * @param value the attribute value */ - void setAttribute(SpanId spanId, String key, String value); + void setAttribute(Traceable traceable, String key, String value); /** * Usually you won't need this about scopes when using tracing. However, @@ -172,10 +134,10 @@ default void addError(RestRequest restRequest, Throwable throwable) { *

Nonetheless, it is possible to manually use scope where more detail is needed by * explicitly opening a scope via the `Tracer`. * - * @param spanId an identifier for the span + * @param traceable provides an identifier for the span * @return a scope. You MUST close it when you are finished with it. */ - Releasable withScope(SpanId spanId); + Releasable withScope(Traceable traceable); /** * A Tracer implementation that does nothing. This is used when no tracer is configured, @@ -183,52 +145,37 @@ default void addError(RestRequest restRequest, Throwable throwable) { */ Tracer NOOP = new Tracer() { @Override - public void startTrace(ThreadContext threadContext, SpanId spanId, String name, Map attributes) {} - - @Override - public void startTrace(ThreadContext threadContext, Task task, String name, Map attributes) {} - - @Override - public void startTrace(ThreadContext threadContext, RestRequest restRequest, String name, Map attributes) {} + public void startTrace(TraceContext traceContext, Traceable traceable, String name, Map attributes) {} @Override public void startTrace(String name, Map attributes) {} @Override - public void stopTrace(SpanId spanId) {} - - @Override - public void stopTrace(Task task) {} - - @Override - public void stopTrace(RestRequest restRequest) {} + public void stopTrace(Traceable traceable) {} @Override public void stopTrace() {} @Override - public void addEvent(SpanId spanId, String eventName) {} - - @Override - public void addError(SpanId spanId, Throwable throwable) {} + public void addEvent(Traceable traceable, String eventName) {} @Override - public void addError(RestRequest restRequest, Throwable throwable) {} + public void addError(Traceable traceable, Throwable throwable) {} @Override - public void setAttribute(SpanId spanId, String key, boolean value) {} + public void setAttribute(Traceable traceable, String key, boolean value) {} @Override - public void setAttribute(SpanId spanId, String key, double value) {} + public void setAttribute(Traceable traceable, String key, double value) {} @Override - public void setAttribute(SpanId spanId, String key, long value) {} + public void setAttribute(Traceable traceable, String key, long value) {} @Override - public void setAttribute(SpanId spanId, String key, String value) {} + public void setAttribute(Traceable traceable, String key, String value) {} @Override - public Releasable withScope(SpanId spanId) { + public Releasable withScope(Traceable traceable) { return () -> {}; } }; diff --git a/server/src/main/java/org/elasticsearch/transport/LeakTracker.java b/server/src/main/java/org/elasticsearch/transport/LeakTracker.java index ea12953e7df12..3be22f6fae53a 100644 --- a/server/src/main/java/org/elasticsearch/transport/LeakTracker.java +++ b/server/src/main/java/org/elasticsearch/transport/LeakTracker.java @@ -77,11 +77,28 @@ public static Releasable wrap(Releasable releasable) { return releasable; } var leak = INSTANCE.track(releasable); - return () -> { - try { - releasable.close(); - } finally { - leak.close(releasable); + return new Releasable() { + @Override + public void close() { + try { + releasable.close(); + } finally { + leak.close(releasable); + } + } + + @Override + public int hashCode() { + // It's legitimate to wrap the resource twice, with two different wrap() calls, which would yield different objects + // if and only if assertions are enabled. So we'd better not ever use these things as map keys etc. + throw new AssertionError("almost certainly a mistake to need the hashCode() of a leak-tracking Releasable"); + } + + @Override + public boolean equals(Object obj) { + // It's legitimate to wrap the resource twice, with two different wrap() calls, which would yield different objects + // if and only if assertions are enabled. So we'd better not ever use these things as map keys etc. + throw new AssertionError("almost certainly a mistake to compare a leak-tracking Releasable for equality"); } }; } @@ -118,6 +135,20 @@ public boolean decRef() { public boolean hasReferences() { return refCounted.hasReferences(); } + + @Override + public int hashCode() { + // It's legitimate to wrap the resource twice, with two different wrap() calls, which would yield different objects + // if and only if assertions are enabled. So we'd better not ever use these things as map keys etc. + throw new AssertionError("almost certainly a mistake to need the hashCode() of a leak-tracking RefCounted"); + } + + @Override + public boolean equals(Object obj) { + // It's legitimate to wrap the resource twice, with two different wrap() calls, which would yield different objects + // if and only if assertions are enabled. So we'd better not ever use these things as map keys etc. + throw new AssertionError("almost certainly a mistake to compare a leak-tracking RefCounted for equality"); + } }; } diff --git a/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java b/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java index 320b9cfdbf7e6..cfb6f872ce748 100644 --- a/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java +++ b/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java @@ -179,7 +179,7 @@ public class ProxyConnectionStrategy extends RemoteConnectionStrategy { RemoteConnectionManager.wrapConnectionWithRemoteClusterInfo( newConnection, clusterAlias, - actualProfile.getTransportProfile() + connectionManager.getCredentialsManager() ), actualProfile.getHandshakeTimeout(), cn -> true, diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java index a055e4122257f..3c74e46851504 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java @@ -57,15 +57,28 @@ final class RemoteClusterConnection implements Closeable { * @param settings the nodes settings object * @param clusterAlias the configured alias of the cluster to connect to * @param transportService the local nodes transport service - * @param credentialsProtected Whether the remote cluster is protected by a credentials, i.e. it has a credentials configured - * via secure setting. This means the remote cluster uses the new configurable access RCS model - * (as opposed to the basic model). + * @param credentialsManager object to lookup remote cluster credentials by cluster alias. If a cluster is protected by a credential, + * i.e. it has a credential configured via secure setting. + * This means the remote cluster uses the advances RCS model (as opposed to the basic model). */ - RemoteClusterConnection(Settings settings, String clusterAlias, TransportService transportService, boolean credentialsProtected) { + RemoteClusterConnection( + Settings settings, + String clusterAlias, + TransportService transportService, + RemoteClusterCredentialsManager credentialsManager + ) { this.transportService = transportService; this.clusterAlias = clusterAlias; - ConnectionProfile profile = RemoteConnectionStrategy.buildConnectionProfile(clusterAlias, settings, credentialsProtected); - this.remoteConnectionManager = new RemoteConnectionManager(clusterAlias, createConnectionManager(profile, transportService)); + ConnectionProfile profile = RemoteConnectionStrategy.buildConnectionProfile( + clusterAlias, + settings, + credentialsManager.hasCredentials(clusterAlias) + ); + this.remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + credentialsManager, + createConnectionManager(profile, transportService) + ); this.connectionStrategy = RemoteConnectionStrategy.buildStrategy(clusterAlias, transportService, remoteConnectionManager, settings); // we register the transport service here as a listener to make sure we notify handlers on disconnect etc. this.remoteConnectionManager.addListener(transportService); diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterCredentialsManager.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterCredentialsManager.java new file mode 100644 index 0000000000000..58e84f5e4ef11 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterCredentialsManager.java @@ -0,0 +1,53 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.transport; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Nullable; + +import java.util.Map; + +import static org.elasticsearch.transport.RemoteClusterService.REMOTE_CLUSTER_CREDENTIALS; + +public class RemoteClusterCredentialsManager { + + private static final Logger logger = LogManager.getLogger(RemoteClusterCredentialsManager.class); + + private volatile Map clusterCredentials; + + @SuppressWarnings("this-escape") + public RemoteClusterCredentialsManager(Settings settings) { + updateClusterCredentials(settings); + } + + public final void updateClusterCredentials(Settings settings) { + clusterCredentials = REMOTE_CLUSTER_CREDENTIALS.getAsMap(settings); + logger.debug( + () -> Strings.format( + "Updated remote cluster credentials for clusters: [%s]", + Strings.collectionToCommaDelimitedString(clusterCredentials.keySet()) + ) + ); + } + + @Nullable + public SecureString resolveCredentials(String clusterAlias) { + return clusterCredentials.get(clusterAlias); + } + + public boolean hasCredentials(String clusterAlias) { + return clusterCredentials.containsKey(clusterAlias); + } + + public static final RemoteClusterCredentialsManager EMPTY = new RemoteClusterCredentialsManager(Settings.EMPTY); +} diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterPortSettings.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterPortSettings.java index 814b17bac95ef..fd5c39ec5fb1f 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterPortSettings.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterPortSettings.java @@ -39,7 +39,7 @@ */ public class RemoteClusterPortSettings { - public static final TransportVersion TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY = TransportVersions.V_8_500_059; + public static final TransportVersion TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY = TransportVersions.V_8_500_061; public static final String REMOTE_CLUSTER_PROFILE = "_remote_cluster"; public static final String REMOTE_CLUSTER_PREFIX = "remote_cluster."; diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index c38f4b26c665f..6bfbb95cbcfe9 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -147,15 +147,14 @@ public boolean isRemoteClusterServerEnabled() { private final TransportService transportService; private final Map remoteClusters = ConcurrentCollections.newConcurrentMap(); - private final Set credentialsProtectedRemoteClusters; + private final RemoteClusterCredentialsManager remoteClusterCredentialsManager; RemoteClusterService(Settings settings, TransportService transportService) { super(settings); this.enabled = DiscoveryNode.isRemoteClusterClient(settings); this.remoteClusterServerEnabled = REMOTE_CLUSTER_SERVER_ENABLED.get(settings); this.transportService = transportService; - this.credentialsProtectedRemoteClusters = REMOTE_CLUSTER_CREDENTIALS.getAsMap(settings).keySet(); - + this.remoteClusterCredentialsManager = new RemoteClusterCredentialsManager(settings); if (remoteClusterServerEnabled) { registerRemoteClusterHandshakeRequestHandler(transportService); } @@ -305,6 +304,14 @@ private synchronized void updateSkipUnavailable(String clusterAlias, Boolean ski } } + public void updateRemoteClusterCredentials(Settings settings) { + remoteClusterCredentialsManager.updateClusterCredentials(settings); + } + + public RemoteClusterCredentialsManager getRemoteClusterCredentialsManager() { + return remoteClusterCredentialsManager; + } + @Override protected void updateRemoteCluster(String clusterAlias, Settings settings) { CountDownLatch latch = new CountDownLatch(1); @@ -363,12 +370,7 @@ synchronized void updateRemoteCluster( if (remote == null) { // this is a new cluster we have to add a new representation Settings finalSettings = Settings.builder().put(this.settings, false).put(newSettings, false).build(); - remote = new RemoteClusterConnection( - finalSettings, - clusterAlias, - transportService, - credentialsProtectedRemoteClusters.contains(clusterAlias) - ); + remote = new RemoteClusterConnection(finalSettings, clusterAlias, transportService, remoteClusterCredentialsManager); remoteClusters.put(clusterAlias, remote); remote.ensureConnected(listener.map(ignored -> RemoteClusterConnectionStatus.CONNECTED)); } else if (remote.shouldRebuildConnection(newSettings)) { @@ -380,12 +382,7 @@ synchronized void updateRemoteCluster( } remoteClusters.remove(clusterAlias); Settings finalSettings = Settings.builder().put(this.settings, false).put(newSettings, false).build(); - remote = new RemoteClusterConnection( - finalSettings, - clusterAlias, - transportService, - credentialsProtectedRemoteClusters.contains(clusterAlias) - ); + remote = new RemoteClusterConnection(finalSettings, clusterAlias, transportService, remoteClusterCredentialsManager); remoteClusters.put(clusterAlias, remote); remote.ensureConnected(listener.map(ignored -> RemoteClusterConnectionStatus.RECONNECTED)); } else { diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteConnectionManager.java b/server/src/main/java/org/elasticsearch/transport/RemoteConnectionManager.java index b16734b273376..3b531d54fb033 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteConnectionManager.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteConnectionManager.java @@ -12,6 +12,7 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; @@ -25,18 +26,19 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicLong; -import static org.elasticsearch.transport.RemoteClusterPortSettings.REMOTE_CLUSTER_PROFILE; import static org.elasticsearch.transport.RemoteClusterService.REMOTE_CLUSTER_HANDSHAKE_ACTION_NAME; public class RemoteConnectionManager implements ConnectionManager { private final String clusterAlias; + private final RemoteClusterCredentialsManager credentialsManager; private final ConnectionManager delegate; private final AtomicLong counter = new AtomicLong(); private volatile List connectedNodes = Collections.emptyList(); - RemoteConnectionManager(String clusterAlias, ConnectionManager delegate) { + RemoteConnectionManager(String clusterAlias, RemoteClusterCredentialsManager credentialsManager, ConnectionManager delegate) { this.clusterAlias = clusterAlias; + this.credentialsManager = credentialsManager; this.delegate = delegate; this.delegate.addListener(new TransportConnectionListener() { @Override @@ -51,6 +53,10 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti }); } + public RemoteClusterCredentialsManager getCredentialsManager() { + return credentialsManager; + } + /** * Remote cluster connections have a different lifecycle from intra-cluster connections. Use {@link #connectToRemoteClusterNode} * instead of this method. @@ -95,13 +101,7 @@ public void openConnection(DiscoveryNode node, @Nullable ConnectionProfile profi node, profile, listener.delegateFailureAndWrap( - (l, connection) -> l.onResponse( - new InternalRemoteConnection( - connection, - clusterAlias, - profile != null ? profile.getTransportProfile() : getConnectionProfile().getTransportProfile() - ) - ) + (l, connection) -> l.onResponse(wrapConnectionWithRemoteClusterInfo(connection, clusterAlias, credentialsManager)) ) ); } @@ -182,16 +182,35 @@ public void closeNoBlock() { * @return a cluster alias if the connection target a node in the remote cluster, otherwise an empty result */ public static Optional resolveRemoteClusterAlias(Transport.Connection connection) { + return resolveRemoteClusterAliasWithCredentials(connection).map(RemoteClusterAliasWithCredentials::clusterAlias); + } + + public record RemoteClusterAliasWithCredentials(String clusterAlias, @Nullable SecureString credentials) { + @Override + public String toString() { + return "RemoteClusterAliasWithCredentials{clusterAlias='" + clusterAlias + "', credentials='::es_redacted::'}"; + } + } + + /** + * This method returns information (alias and credentials) for remote cluster for the given transport connection. + * Either or both of alias and credentials can be null depending on the connection. + * + * @param connection the transport connection for which to resolve a remote cluster alias + */ + public static Optional resolveRemoteClusterAliasWithCredentials(Transport.Connection connection) { Transport.Connection unwrapped = TransportService.unwrapConnection(connection); if (unwrapped instanceof InternalRemoteConnection remoteConnection) { - return Optional.of(remoteConnection.getClusterAlias()); + return Optional.of( + new RemoteClusterAliasWithCredentials(remoteConnection.getClusterAlias(), remoteConnection.getClusterCredentials()) + ); } return Optional.empty(); } private Transport.Connection getConnectionInternal(DiscoveryNode node) throws NodeNotConnectedException { Transport.Connection connection = delegate.getConnection(node); - return new InternalRemoteConnection(connection, clusterAlias, getConnectionProfile().getTransportProfile()); + return wrapConnectionWithRemoteClusterInfo(connection, clusterAlias, credentialsManager); } private synchronized void addConnectedNode(DiscoveryNode addedNode) { @@ -297,21 +316,27 @@ private static final class InternalRemoteConnection implements Transport.Connect private static final Logger logger = LogManager.getLogger(InternalRemoteConnection.class); private final Transport.Connection connection; private final String clusterAlias; - private final boolean isRemoteClusterProfile; + @Nullable + private final SecureString clusterCredentials; - InternalRemoteConnection(Transport.Connection connection, String clusterAlias, String transportProfile) { + private InternalRemoteConnection(Transport.Connection connection, String clusterAlias, @Nullable SecureString clusterCredentials) { assert false == connection instanceof InternalRemoteConnection : "should not double wrap"; assert false == connection instanceof ProxyConnection : "proxy connection should wrap internal remote connection, not the other way around"; - this.clusterAlias = Objects.requireNonNull(clusterAlias); this.connection = Objects.requireNonNull(connection); - this.isRemoteClusterProfile = REMOTE_CLUSTER_PROFILE.equals(Objects.requireNonNull(transportProfile)); + this.clusterAlias = Objects.requireNonNull(clusterAlias); + this.clusterCredentials = clusterCredentials; } public String getClusterAlias() { return clusterAlias; } + @Nullable + public SecureString getClusterCredentials() { + return clusterCredentials; + } + @Override public DiscoveryNode getNode() { return connection.getNode(); @@ -321,7 +346,7 @@ public DiscoveryNode getNode() { public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException { final String effectiveAction; - if (isRemoteClusterProfile && TransportService.HANDSHAKE_ACTION_NAME.equals(action)) { + if (clusterCredentials != null && TransportService.HANDSHAKE_ACTION_NAME.equals(action)) { logger.trace("sending remote cluster specific handshake to node [{}] of remote cluster [{}]", getNode(), clusterAlias); effectiveAction = REMOTE_CLUSTER_HANDSHAKE_ACTION_NAME; } else { @@ -389,8 +414,8 @@ public boolean hasReferences() { static InternalRemoteConnection wrapConnectionWithRemoteClusterInfo( Transport.Connection connection, String clusterAlias, - String transportProfile + RemoteClusterCredentialsManager credentialsManager ) { - return new InternalRemoteConnection(connection, clusterAlias, transportProfile); + return new InternalRemoteConnection(connection, clusterAlias, credentialsManager.resolveCredentials(clusterAlias)); } } diff --git a/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java b/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java index 0dcad9cf6864c..0f68a58faf463 100644 --- a/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java +++ b/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java @@ -357,7 +357,11 @@ private ConnectionManager.ConnectionValidator getConnectionValidator(DiscoveryNo : "transport profile must be consistent between the connection manager and the actual profile"; transportService.connectionValidator(node) .validate( - RemoteConnectionManager.wrapConnectionWithRemoteClusterInfo(connection, clusterAlias, profile.getTransportProfile()), + RemoteConnectionManager.wrapConnectionWithRemoteClusterInfo( + connection, + clusterAlias, + connectionManager.getCredentialsManager() + ), profile, listener ); diff --git a/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat b/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat index 119a708832948..bdb1b75be4843 100644 --- a/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat +++ b/server/src/main/resources/META-INF/services/org.apache.lucene.codecs.PostingsFormat @@ -1,2 +1,3 @@ org.elasticsearch.index.codec.bloomfilter.ES85BloomFilterPostingsFormat org.elasticsearch.index.codec.bloomfilter.ES87BloomFilterPostingsFormat +org.elasticsearch.index.codec.postings.ES812PostingsFormat diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/DesiredBalanceResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/DesiredBalanceResponseTests.java index 7b452beac0938..a063c590a8c07 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/DesiredBalanceResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/DesiredBalanceResponseTests.java @@ -179,9 +179,14 @@ public void testToXContent() throws IOException { randomClusterInfo() ); - Map json = createParser( - ChunkedToXContent.wrapAsToXContent(response).toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS) - ).map(); + Map json; + try ( + var parser = createParser( + ChunkedToXContent.wrapAsToXContent(response).toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS) + ) + ) { + json = parser.map(); + } assertThat(json.keySet(), containsInAnyOrder("stats", "cluster_balance_stats", "routing_table", "cluster_info")); // stats diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceActionTests.java index e702446406238..6eb3310623b92 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/TransportDeleteDesiredBalanceActionTests.java @@ -78,7 +78,7 @@ public void testReturnsErrorIfAllocatorIsNotDesiredBalanced() throws Exception { mock(ShardsAllocator.class) ).masterOperation(mock(Task.class), new DesiredBalanceRequest(), ClusterState.EMPTY_STATE, listener); - var exception = expectThrows(ResourceNotFoundException.class, listener::actionGet); + var exception = expectThrows(ResourceNotFoundException.class, listener); assertThat(exception.getMessage(), equalTo("Desired balance allocator is not in use, no desired balance found")); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java index 8334c98e5fca0..86ccd9807cf9f 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java @@ -697,7 +697,7 @@ protected void taskOperation( cancellationFuture.actionGet(); logger.info("Parent task is now cancelled counting down task latch"); taskLatch.countDown(); - expectThrows(TaskCancelledException.class, taskFuture::actionGet); + expectThrows(TaskCancelledException.class, taskFuture); // Release all node tasks and wait for response checkLatch.countDown(); @@ -775,7 +775,7 @@ protected void taskOperation( reachabilityChecker.ensureUnreachable(); } - expectThrows(TaskCancelledException.class, taskFuture::actionGet); + expectThrows(TaskCancelledException.class, taskFuture); blockedActionLatch.countDown(); NodesResponse responses = future.get(10, TimeUnit.SECONDS); @@ -848,7 +848,7 @@ protected void taskOperation( reachabilityChecker.ensureUnreachable(); } - expectThrows(TaskCancelledException.class, taskFuture::actionGet); + expectThrows(TaskCancelledException.class, taskFuture); } public void testTaskLevelActionFailures() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestTests.java index a1d2ef33d85f3..774093834e941 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestTests.java @@ -61,10 +61,11 @@ private void doFromXContentTestWithRandomFields(boolean addRandomFields) throws () -> randomAlphaOfLengthBetween(3, 10) ) ); - XContentParseException iae = expectThrows( - XContentParseException.class, - () -> ClusterUpdateSettingsRequest.fromXContent(createParser(xContentType.xContent(), mutated)) - ); + XContentParseException iae = expectThrows(XContentParseException.class, () -> { + try (var parser = createParser(xContentType.xContent(), mutated)) { + ClusterUpdateSettingsRequest.fromXContent(parser); + } + }); assertThat(iae.getMessage(), containsString("[cluster_update_settings_request] unknown field [" + unsupportedField + "]")); } else { try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) { diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestTests.java index 2f151e516cde4..97a5775f7c69f 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestTests.java @@ -99,15 +99,18 @@ public void testToXContent() throws IOException { } XContentBuilder builder = original.toXContent(XContentFactory.jsonBuilder(), new MapParams(Collections.emptyMap())); - XContentParser parser = XContentType.JSON.xContent() - .createParser(NamedXContentRegistry.EMPTY, null, BytesReference.bytes(builder).streamInput()); - Map map = parser.mapOrdered(); - CreateSnapshotRequest processed = new CreateSnapshotRequest((String) map.get("repository"), (String) map.get("snapshot")); - processed.waitForCompletion(original.waitForCompletion()); - processed.masterNodeTimeout(original.masterNodeTimeout()); - processed.source(map); - - assertEquals(original, processed); + try ( + XContentParser parser = XContentType.JSON.xContent() + .createParser(NamedXContentRegistry.EMPTY, null, BytesReference.bytes(builder).streamInput()) + ) { + Map map = parser.mapOrdered(); + CreateSnapshotRequest processed = new CreateSnapshotRequest((String) map.get("repository"), (String) map.get("snapshot")); + processed.waitForCompletion(original.waitForCompletion()); + processed.masterNodeTimeout(original.masterNodeTimeout()); + processed.source(map); + + assertEquals(original, processed); + } } public void testSizeCheck() { diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestTests.java index 922e7e03c7600..56216d2670150 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestTests.java @@ -130,9 +130,13 @@ public void testSource() throws IOException { original.snapshotUuid(null); // cannot be set via the REST API original.quiet(false); // cannot be set via the REST API XContentBuilder builder = original.toXContent(XContentFactory.jsonBuilder(), new ToXContent.MapParams(Collections.emptyMap())); - XContentParser parser = XContentType.JSON.xContent() - .createParser(NamedXContentRegistry.EMPTY, null, BytesReference.bytes(builder).streamInput()); - Map map = parser.mapOrdered(); + Map map; + try ( + XContentParser parser = XContentType.JSON.xContent() + .createParser(NamedXContentRegistry.EMPTY, null, BytesReference.bytes(builder).streamInput()) + ) { + map = parser.mapOrdered(); + } // we will only restore properties from the map that are contained in the request body. All other // properties are restored from the original (in the actual REST action this is restored from the @@ -174,8 +178,11 @@ public void testToStringWillIncludeSkipOperatorOnlyState() { private Map convertRequestToMap(RestoreSnapshotRequest request) throws IOException { XContentBuilder builder = request.toXContent(XContentFactory.jsonBuilder(), new ToXContent.MapParams(Collections.emptyMap())); - XContentParser parser = XContentType.JSON.xContent() - .createParser(NamedXContentRegistry.EMPTY, null, BytesReference.bytes(builder).streamInput()); - return parser.mapOrdered(); + try ( + XContentParser parser = XContentType.JSON.xContent() + .createParser(NamedXContentRegistry.EMPTY, null, BytesReference.bytes(builder).streamInput()) + ) { + return parser.mapOrdered(); + } } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerTests.java index 6c79946cce15f..d6cf90034f5b5 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageAnalyzerTests.java @@ -14,7 +14,6 @@ import org.apache.lucene.codecs.lucene90.Lucene90DocValuesFormat; import org.apache.lucene.codecs.lucene99.Lucene99Codec; import org.apache.lucene.codecs.lucene99.Lucene99HnswVectorsFormat; -import org.apache.lucene.codecs.lucene99.Lucene99PostingsFormat; import org.apache.lucene.codecs.perfield.PerFieldDocValuesFormat; import org.apache.lucene.codecs.perfield.PerFieldKnnVectorsFormat; import org.apache.lucene.codecs.perfield.PerFieldPostingsFormat; @@ -67,6 +66,7 @@ import org.apache.lucene.util.FixedBitSet; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.index.codec.postings.ES812PostingsFormat; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.LuceneFilesExtensions; import org.elasticsearch.test.ESTestCase; @@ -642,7 +642,7 @@ static void rewriteIndexWithPerFieldCodec(Directory source, CodecMode mode, Dire .setCodec(new Lucene99Codec(mode.mode()) { @Override public PostingsFormat getPostingsFormatForField(String field) { - return new Lucene99PostingsFormat(); + return new ES812PostingsFormat(); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestTests.java index 8b700ecb9fc01..1290729252d0d 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestTests.java @@ -66,7 +66,9 @@ public void testConditionsParsing() throws Exception { .field("min_primary_shard_docs", 10) .endObject() .endObject(); - request.fromXContent(false, createParser(builder)); + try (var parser = createParser(builder)) { + request.fromXContent(false, parser); + } Map> conditions = request.getConditions().getConditions(); assertThat(conditions.size(), equalTo(10)); MaxAgeCondition maxAgeCondition = (MaxAgeCondition) conditions.get(MaxAgeCondition.NAME); @@ -118,7 +120,9 @@ public void testParsingWithIndexSettings() throws Exception { .endObject() .endObject() .endObject(); - request.fromXContent(false, createParser(builder)); + try (var parser = createParser(builder)) { + request.fromXContent(false, parser); + } Map> conditions = request.getConditions().getConditions(); assertThat(conditions.size(), equalTo(3)); assertThat(request.getCreateIndexRequest().mappings(), containsString("not_analyzed")); @@ -139,8 +143,9 @@ public void testTypelessMappingParsing() throws Exception { .endObject() .endObject(); - request.fromXContent(false, createParser(builder)); - + try (var parser = createParser(builder)) { + request.fromXContent(false, parser); + } CreateIndexRequest createIndexRequest = request.getCreateIndexRequest(); String mapping = createIndexRequest.mappings(); assertNotNull(mapping); @@ -198,7 +203,11 @@ public void testUnknownFields() throws IOException { } builder.endObject(); BytesReference mutated = XContentTestUtils.insertRandomFields(xContentType, BytesReference.bytes(builder), null, random()); - expectThrows(XContentParseException.class, () -> request.fromXContent(false, createParser(xContentType.xContent(), mutated))); + expectThrows(XContentParseException.class, () -> { + try (var parser = createParser(xContentType.xContent(), mutated)) { + request.fromXContent(false, parser); + } + }); } public void testValidation() { diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresActionTests.java index dfafcd0662290..ffe42722b308d 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/shards/TransportIndicesShardStoresActionTests.java @@ -63,13 +63,12 @@ void runTest() { request.shardStatuses("green", "red"); // newly-created shards are in yellow health so this matches none of them final var future = new PlainActionFuture(); action.execute( - new CancellableTask(1, "transport", IndicesShardStoresAction.NAME, "", TaskId.EMPTY_TASK_ID, Map.of()), + new CancellableTask(1, "transport", TransportIndicesShardStoresAction.TYPE.name(), "", TaskId.EMPTY_TASK_ID, Map.of()), request, future ); - assertTrue(future.isDone()); - final var response = future.actionGet(0L); + final var response = future.result(); assertThat(response.getFailures(), empty()); assertThat(response.getStoreStatuses(), anEmptyMap()); assertThat(shardsWithFailures, empty()); @@ -86,7 +85,7 @@ void runTest() { request.shardStatuses(randomFrom("yellow", "all")); // newly-created shards are in yellow health so this matches all of them final var future = new PlainActionFuture(); action.execute( - new CancellableTask(1, "transport", IndicesShardStoresAction.NAME, "", TaskId.EMPTY_TASK_ID, Map.of()), + new CancellableTask(1, "transport", TransportIndicesShardStoresAction.TYPE.name(), "", TaskId.EMPTY_TASK_ID, Map.of()), request, future ); @@ -123,7 +122,14 @@ public void testCancellation() { runTest(new TestHarness() { @Override void runTest() { - final var task = new CancellableTask(1, "transport", IndicesShardStoresAction.NAME, "", TaskId.EMPTY_TASK_ID, Map.of()); + final var task = new CancellableTask( + 1, + "transport", + TransportIndicesShardStoresAction.TYPE.name(), + "", + TaskId.EMPTY_TASK_ID, + Map.of() + ); final var request = new IndicesShardStoresRequest(); request.shardStatuses(randomFrom("yellow", "all")); final var future = new PlainActionFuture(); @@ -132,8 +138,7 @@ void runTest() { listExpected = false; assertFalse(future.isDone()); deterministicTaskQueue.runAllTasks(); - assertTrue(future.isDone()); - expectThrows(TaskCancelledException.class, () -> future.actionGet(0L)); + expectThrows(TaskCancelledException.class, future::result); } }); } @@ -146,16 +151,15 @@ void runTest() { request.shardStatuses(randomFrom("yellow", "all")); final var future = new PlainActionFuture(); action.execute( - new CancellableTask(1, "transport", IndicesShardStoresAction.NAME, "", TaskId.EMPTY_TASK_ID, Map.of()), + new CancellableTask(1, "transport", TransportIndicesShardStoresAction.TYPE.name(), "", TaskId.EMPTY_TASK_ID, Map.of()), request, future ); assertFalse(future.isDone()); failOneRequest = true; deterministicTaskQueue.runAllTasks(); - assertTrue(future.isDone()); assertFalse(failOneRequest); - assertEquals("simulated", expectThrows(ElasticsearchException.class, () -> future.actionGet(0L)).getMessage()); + assertEquals("simulated", expectThrows(ElasticsearchException.class, future::result).getMessage()); } }); } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java index 1df6c7e4433a7..aa8c14a4f3f74 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionTests.java @@ -346,7 +346,7 @@ public void testRejectCoordination() throws Exception { threadPool.startForcingRejections(); PlainActionFuture future = new PlainActionFuture<>(); ActionTestUtils.execute(bulkAction, null, bulkRequest, future); - expectThrows(EsRejectedExecutionException.class, future::actionGet); + expectThrows(EsRejectedExecutionException.class, future); } finally { threadPool.stopForcingRejections(); } @@ -360,7 +360,7 @@ public void testRejectionAfterCreateIndexIsPropagated() throws Exception { bulkAction.beforeIndexCreation = threadPool::startForcingRejections; PlainActionFuture future = new PlainActionFuture<>(); ActionTestUtils.execute(bulkAction, null, bulkRequest, future); - expectThrows(EsRejectedExecutionException.class, future::actionGet); + expectThrows(EsRejectedExecutionException.class, future); assertTrue(bulkAction.indexCreated); } finally { threadPool.stopForcingRejections(); diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java index 54296d29d360c..bbd1837730689 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.client.internal.Requests; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; @@ -36,6 +37,7 @@ import org.elasticsearch.index.bulk.stats.ShardBulkStats; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.VersionConflictEngineException; +import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.MetadataFieldMapper; @@ -274,7 +276,13 @@ public void testExecuteBulkIndexRequestWithMappingUpdates() throws Exception { when(shard.applyIndexOperationOnPrimary(anyLong(), any(), any(), anyLong(), anyLong(), anyLong(), anyBoolean())).thenReturn( mappingUpdate ); - when(shard.mapperService()).thenReturn(mock(MapperService.class)); + MapperService mapperService = mock(MapperService.class); + when(shard.mapperService()).thenReturn(mapperService); + + // merged mapping source needs to be different from previous one for the master node to be invoked + DocumentMapper mergedDoc = mock(DocumentMapper.class); + when(mapperService.merge(any(), any(CompressedXContent.class), any())).thenReturn(mergedDoc); + when(mergedDoc.mappingSource()).thenReturn(CompressedXContent.fromJSON("{}")); randomlySetIgnoredPrimaryResponse(items[0]); @@ -922,9 +930,14 @@ public void testRetries() throws Exception { }); when(shard.indexSettings()).thenReturn(indexSettings); when(shard.shardId()).thenReturn(shardId); - when(shard.mapperService()).thenReturn(mock(MapperService.class)); + MapperService mapperService = mock(MapperService.class); + when(shard.mapperService()).thenReturn(mapperService); when(shard.getBulkOperationListener()).thenReturn(mock(ShardBulkStats.class)); + DocumentMapper mergedDocMapper = mock(DocumentMapper.class); + when(mergedDocMapper.mappingSource()).thenReturn(CompressedXContent.fromJSON("{}")); + when(mapperService.merge(any(), any(CompressedXContent.class), any())).thenReturn(mergedDocMapper); + UpdateHelper updateHelper = mock(UpdateHelper.class); when(updateHelper.prepare(any(), eq(shard), any())).thenAnswer( invocation -> new UpdateHelper.Result( @@ -1012,7 +1025,13 @@ public void testForceExecutionOnRejectionAfterMappingUpdate() throws Exception { success2 ); when(shard.getFailedIndexResult(any(EsRejectedExecutionException.class), anyLong(), anyString())).thenCallRealMethod(); - when(shard.mapperService()).thenReturn(mock(MapperService.class)); + MapperService mapperService = mock(MapperService.class); + when(shard.mapperService()).thenReturn(mapperService); + + // merged mapping source needs to be different from previous one for the master node to be invoked + DocumentMapper mergedDoc = mock(DocumentMapper.class); + when(mapperService.merge(any(), any(CompressedXContent.class), any())).thenReturn(mergedDoc); + when(mergedDoc.mappingSource()).thenReturn(CompressedXContent.fromJSON("{}")); randomlySetIgnoredPrimaryResponse(items[0]); items[0].decRef(); @@ -1140,6 +1159,136 @@ public void testPerformOnPrimaryReportsBulkStats() throws Exception { } } + public void testNoopMappingUpdateInfiniteLoopPrevention() throws Exception { + Engine.IndexResult mappingUpdate = new Engine.IndexResult( + new Mapping(mock(RootObjectMapper.class), new MetadataFieldMapper[0], Collections.emptyMap()), + "id" + ); + + IndexShard shard = mockShard(); + when(shard.applyIndexOperationOnPrimary(anyLong(), any(), any(), anyLong(), anyLong(), anyLong(), anyBoolean())).thenReturn( + mappingUpdate + ); + MapperService mapperService = mock(MapperService.class); + when(shard.mapperService()).thenReturn(mapperService); + + DocumentMapper documentMapper = mock(DocumentMapper.class); + when(documentMapper.mappingSource()).thenReturn(CompressedXContent.fromJSON("{}")); + // returning the current document mapper as the merge result to simulate a noop mapping update + when(mapperService.documentMapper()).thenReturn(documentMapper); + when(mapperService.merge(any(), any(CompressedXContent.class), any())).thenReturn(documentMapper); + + UpdateHelper updateHelper = mock(UpdateHelper.class); + when(updateHelper.prepare(any(), eq(shard), any())).thenReturn( + new UpdateHelper.Result( + new IndexRequest("index").id("id").source(Requests.INDEX_CONTENT_TYPE, "field", "value"), + randomBoolean() ? DocWriteResponse.Result.CREATED : DocWriteResponse.Result.UPDATED, + Collections.singletonMap("field", "value"), + Requests.INDEX_CONTENT_TYPE + ) + ); + + BulkItemRequest[] items = new BulkItemRequest[] { + new BulkItemRequest(0, new UpdateRequest("index", "id").doc(Requests.INDEX_CONTENT_TYPE, "field", "value")) }; + BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items); + + AssertionError error = expectThrows( + AssertionError.class, + () -> TransportShardBulkAction.performOnPrimary( + bulkShardRequest, + shard, + updateHelper, + threadPool::absoluteTimeInMillis, + (update, shardId, listener) -> fail("the master should not be contacted as the operation yielded a noop mapping update"), + listener -> listener.onResponse(null), + ActionTestUtils.assertNoFailureListener(result -> {}), + threadPool, + Names.WRITE + ) + ); + assertThat( + error.getMessage(), + equalTo( + "On retry, this indexing request resulted in another noop mapping update." + + " Failing the indexing operation to prevent an infinite retry loop." + ) + ); + } + + public void testNoopMappingUpdateSuccessOnRetry() throws Exception { + Engine.IndexResult mappingUpdate = new Engine.IndexResult( + new Mapping(mock(RootObjectMapper.class), new MetadataFieldMapper[0], Collections.emptyMap()), + "id" + ); + Translog.Location resultLocation = new Translog.Location(42, 42, 42); + Engine.IndexResult successfulResult = new FakeIndexResult(1, 1, 10, true, resultLocation, "id"); + + IndexShard shard = mockShard(); + when(shard.applyIndexOperationOnPrimary(anyLong(), any(), any(), anyLong(), anyLong(), anyLong(), anyBoolean())).thenReturn( + // on the first invocation, return a result that attempts a mapping update + // the mapping update will be a noop and the operation is retired without contacting the master + mappingUpdate, + // the second invocation also returns a mapping update result + // this doesn't trigger the infinite loop detection because MapperService#mappingVersion returns a different mapping version + mappingUpdate, + // on the third attempt, return a successful result, indicating that no mapping update needs to be executed + successfulResult + ); + + MapperService mapperService = mock(MapperService.class); + when(shard.mapperService()).thenReturn(mapperService); + + DocumentMapper documentMapper = mock(DocumentMapper.class); + when(documentMapper.mappingSource()).thenReturn(CompressedXContent.fromJSON("{}")); + when(mapperService.documentMapper()).thenReturn(documentMapper); + // returning the current document mapper as the merge result to simulate a noop mapping update + when(mapperService.merge(any(), any(CompressedXContent.class), any())).thenReturn(documentMapper); + // on the second invocation, the mapping version is incremented + // so that the second mapping update attempt doesn't trigger the infinite loop prevention + when(mapperService.mappingVersion()).thenReturn(0L, 1L); + + UpdateHelper updateHelper = mock(UpdateHelper.class); + when(updateHelper.prepare(any(), eq(shard), any())).thenReturn( + new UpdateHelper.Result( + new IndexRequest("index").id("id").source(Requests.INDEX_CONTENT_TYPE, "field", "value"), + randomBoolean() ? DocWriteResponse.Result.CREATED : DocWriteResponse.Result.UPDATED, + Collections.singletonMap("field", "value"), + Requests.INDEX_CONTENT_TYPE + ) + ); + + BulkItemRequest[] items = new BulkItemRequest[] { + new BulkItemRequest(0, new UpdateRequest("index", "id").doc(Requests.INDEX_CONTENT_TYPE, "field", "value")) }; + BulkShardRequest bulkShardRequest = new BulkShardRequest(shardId, RefreshPolicy.NONE, items); + + final CountDownLatch latch = new CountDownLatch(1); + TransportShardBulkAction.performOnPrimary( + bulkShardRequest, + shard, + updateHelper, + threadPool::absoluteTimeInMillis, + (update, shardId, listener) -> fail("the master should not be contacted as the operation yielded a noop mapping update"), + listener -> listener.onFailure(new IllegalStateException("no failure expected")), + new LatchedActionListener<>(ActionTestUtils.assertNoFailureListener(result -> { + BulkItemResponse primaryResponse = result.replicaRequest().items()[0].getPrimaryResponse(); + assertFalse(primaryResponse.isFailed()); + }), latch), + threadPool, + Names.WRITE + ); + + latch.await(); + verify(mapperService, times(2)).merge(any(), any(CompressedXContent.class), any()); + } + + private IndexShard mockShard() { + IndexShard shard = mock(IndexShard.class); + when(shard.shardId()).thenReturn(shardId); + when(shard.getBulkOperationListener()).thenReturn(mock(ShardBulkStats.class)); + when(shard.getFailedIndexResult(any(Exception.class), anyLong(), anyString())).thenCallRealMethod(); + return shard; + } + private void randomlySetIgnoredPrimaryResponse(BulkItemRequest primaryRequest) { if (randomBoolean()) { // add a response to the request and thereby check that it is ignored for the primary. diff --git a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequestTests.java b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequestTests.java index 3af2639538f0d..ae25a5b597ec3 100644 --- a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequestTests.java @@ -152,15 +152,15 @@ public void testToXContent() throws IOException { } public void testFromXContent() throws IOException { - XContentParser parser = createParser(JsonXContent.jsonXContent, "{ \"fields\" : [\"FOO\"] }"); - FieldCapabilitiesRequest request = new FieldCapabilitiesRequest(); - ObjectParser PARSER = new ObjectParser<>("field_caps_request"); - PARSER.declareStringArray(fromList(String.class, FieldCapabilitiesRequest::fields), new ParseField("fields")); - - PARSER.parse(parser, request, null); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, "{ \"fields\" : [\"FOO\"] }")) { + FieldCapabilitiesRequest request = new FieldCapabilitiesRequest(); + ObjectParser PARSER = new ObjectParser<>("field_caps_request"); + PARSER.declareStringArray(fromList(String.class, FieldCapabilitiesRequest::fields), new ParseField("fields")); - assertArrayEquals(request.fields(), new String[] { "FOO" }); + PARSER.parse(parser, request, null); + assertArrayEquals(request.fields(), new String[] { "FOO" }); + } } public void testValidation() { diff --git a/server/src/test/java/org/elasticsearch/action/get/MultiGetRequestTests.java b/server/src/test/java/org/elasticsearch/action/get/MultiGetRequestTests.java index eb9cfa4a6939c..76fdef3d06c1f 100644 --- a/server/src/test/java/org/elasticsearch/action/get/MultiGetRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/get/MultiGetRequestTests.java @@ -80,24 +80,25 @@ public void testUnexpectedField() throws IOException { } public void testAddWithValidSourceValueIsAccepted() throws Exception { - XContentParser parser = createParser( - XContentFactory.jsonBuilder() - .startObject() - .startArray("docs") - .startObject() - .field("_source", randomFrom("false", "true")) - .endObject() - .startObject() - .field("_source", randomBoolean()) - .endObject() - .endArray() - .endObject() - ); - - MultiGetRequest multiGetRequest = new MultiGetRequest(); - multiGetRequest.add(randomAlphaOfLength(5), null, FetchSourceContext.FETCH_SOURCE, null, parser, true); - - assertEquals(2, multiGetRequest.getItems().size()); + try ( + XContentParser parser = createParser( + XContentFactory.jsonBuilder() + .startObject() + .startArray("docs") + .startObject() + .field("_source", randomFrom("false", "true")) + .endObject() + .startObject() + .field("_source", randomBoolean()) + .endObject() + .endArray() + .endObject() + ) + ) { + MultiGetRequest multiGetRequest = new MultiGetRequest(); + multiGetRequest.add(randomAlphaOfLength(5), null, FetchSourceContext.FETCH_SOURCE, null, parser, true); + assertEquals(2, multiGetRequest.getItems().size()); + } } public void testXContentSerialization() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/action/ingest/GetPipelineResponseTests.java b/server/src/test/java/org/elasticsearch/action/ingest/GetPipelineResponseTests.java index e6efc00209ba5..6f5841f3d2a03 100644 --- a/server/src/test/java/org/elasticsearch/action/ingest/GetPipelineResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/ingest/GetPipelineResponseTests.java @@ -62,11 +62,15 @@ public void testXContentDeserialization() throws IOException { Map pipelinesMap = createPipelineConfigMap(); GetPipelineResponse response = new GetPipelineResponse(new ArrayList<>(pipelinesMap.values())); XContentBuilder builder = response.toXContent(getRandomXContentBuilder(), ToXContent.EMPTY_PARAMS); - XContentParser parser = builder.generator() - .contentType() - .xContent() - .createParser(xContentRegistry(), LoggingDeprecationHandler.INSTANCE, BytesReference.bytes(builder).streamInput()); - GetPipelineResponse parsedResponse = GetPipelineResponse.fromXContent(parser); + GetPipelineResponse parsedResponse; + try ( + XContentParser parser = builder.generator() + .contentType() + .xContent() + .createParser(xContentRegistry(), LoggingDeprecationHandler.INSTANCE, BytesReference.bytes(builder).streamInput()) + ) { + parsedResponse = GetPipelineResponse.fromXContent(parser); + } List actualPipelines = response.pipelines(); List parsedPipelines = parsedResponse.pipelines(); assertEquals(actualPipelines.size(), parsedPipelines.size()); diff --git a/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java b/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java index 7f5b5f7716f3e..8cbcf4962e156 100644 --- a/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/AbstractSearchAsyncActionTests.java @@ -18,10 +18,10 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.internal.AliasFilter; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.test.ESTestCase; @@ -194,7 +194,10 @@ public void testSendSearchResponseDisallowPartialFailures() { new IllegalArgumentException() ); } - action.sendSearchResponse(InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, phaseResults.results); + action.sendSearchResponse( + new SearchResponseSections(SearchHits.EMPTY_WITH_TOTAL_HITS, null, null, false, null, null, 1), + phaseResults.results + ); assertThat(exception.get(), instanceOf(SearchPhaseExecutionException.class)); SearchPhaseExecutionException searchPhaseExecutionException = (SearchPhaseExecutionException) exception.get(); assertEquals(0, searchPhaseExecutionException.getSuppressed().length); diff --git a/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java index f8a22ec04fb15..4cac4a8a0445d 100644 --- a/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java @@ -20,7 +20,6 @@ import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.collapse.CollapseBuilder; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; @@ -89,16 +88,10 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL List mSearchResponses = new ArrayList<>(numInnerHits); for (int innerHitNum = 0; innerHitNum < numInnerHits; innerHitNum++) { - InternalSearchResponse internalSearchResponse = new InternalSearchResponse( - collapsedHits.get(innerHitNum), - null, - null, - null, - false, - null, - 1 + mockSearchPhaseContext.sendSearchResponse( + new SearchResponseSections(collapsedHits.get(innerHitNum), null, null, false, null, null, 1), + null ); - mockSearchPhaseContext.sendSearchResponse(internalSearchResponse, null); mSearchResponses.add(new MultiSearchResponse.Item(mockSearchPhaseContext.searchResponse.get(), null)); } @@ -112,11 +105,10 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL SearchHit hit = new SearchHit(1, "ID"); hit.setDocumentField("someField", new DocumentField("someField", Collections.singletonList(collapseValue))); SearchHits hits = new SearchHits(new SearchHit[] { hit }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0F); - InternalSearchResponse internalSearchResponse = new InternalSearchResponse(hits, null, null, null, false, null, 1); - ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, internalSearchResponse, () -> new SearchPhase("test") { + ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, hits, () -> new SearchPhase("test") { @Override public void run() { - mockSearchPhaseContext.sendSearchResponse(internalSearchResponse, null); + mockSearchPhaseContext.sendSearchResponse(new SearchResponseSections(hits, null, null, false, null, null, 1), null); } }); @@ -154,9 +146,14 @@ public void testFailOneItemFailsEntirePhase() throws IOException { @Override void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionListener listener) { assertTrue(executedMultiSearch.compareAndSet(false, true)); - InternalSearchResponse internalSearchResponse = new InternalSearchResponse(collapsedHits, null, null, null, false, null, 1); SearchResponse searchResponse = new SearchResponse( - internalSearchResponse, + collapsedHits, + null, + null, + false, + null, + null, + 1, null, 1, 1, @@ -182,11 +179,10 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL SearchHit hit2 = new SearchHit(2, "ID2"); hit2.setDocumentField("someField", new DocumentField("someField", Collections.singletonList(collapseValue))); SearchHits hits = new SearchHits(new SearchHit[] { hit1, hit2 }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0F); - InternalSearchResponse internalSearchResponse = new InternalSearchResponse(hits, null, null, null, false, null, 1); - ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, internalSearchResponse, () -> new SearchPhase("test") { + ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, hits, () -> new SearchPhase("test") { @Override public void run() { - mockSearchPhaseContext.sendSearchResponse(internalSearchResponse, null); + mockSearchPhaseContext.sendSearchResponse(new SearchResponseSections(hits, null, null, false, null, null, 1), null); } }); phase.run(); @@ -210,11 +206,10 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL SearchHit hit2 = new SearchHit(2, "ID2"); hit2.setDocumentField("someField", new DocumentField("someField", Collections.singletonList(null))); SearchHits hits = new SearchHits(new SearchHit[] { hit1, hit2 }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0F); - InternalSearchResponse internalSearchResponse = new InternalSearchResponse(hits, null, null, null, false, null, 1); - ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, internalSearchResponse, () -> new SearchPhase("test") { + ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, hits, () -> new SearchPhase("test") { @Override public void run() { - mockSearchPhaseContext.sendSearchResponse(internalSearchResponse, null); + mockSearchPhaseContext.sendSearchResponse(new SearchResponseSections(hits, null, null, false, null, null, 1), null); } }); phase.run(); @@ -238,11 +233,10 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL ); SearchHits hits = new SearchHits(new SearchHit[0], new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0f); - InternalSearchResponse internalSearchResponse = new InternalSearchResponse(hits, null, null, null, false, null, 1); - ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, internalSearchResponse, () -> new SearchPhase("test") { + ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, hits, () -> new SearchPhase("test") { @Override public void run() { - mockSearchPhaseContext.sendSearchResponse(internalSearchResponse, null); + mockSearchPhaseContext.sendSearchResponse(new SearchResponseSections(hits, null, null, false, null, null, 1), null); } }); phase.run(); @@ -281,11 +275,10 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL .routing("baz"); SearchHits hits = new SearchHits(new SearchHit[0], new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0f); - InternalSearchResponse internalSearchResponse = new InternalSearchResponse(hits, null, null, null, false, null, 1); - ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, internalSearchResponse, () -> new SearchPhase("test") { + ExpandSearchPhase phase = new ExpandSearchPhase(mockSearchPhaseContext, hits, () -> new SearchPhase("test") { @Override - public void run() throws IOException { - mockSearchPhaseContext.sendSearchResponse(internalSearchResponse, null); + public void run() { + mockSearchPhaseContext.sendSearchResponse(new SearchResponseSections(hits, null, null, false, null, null, 1), null); } }); phase.run(); diff --git a/server/src/test/java/org/elasticsearch/action/search/FetchLookupFieldsPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/FetchLookupFieldsPhaseTests.java index 38409752c7e7d..01a71fe00b2fe 100644 --- a/server/src/test/java/org/elasticsearch/action/search/FetchLookupFieldsPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/FetchLookupFieldsPhaseTests.java @@ -17,7 +17,6 @@ import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.fetch.subphase.FieldAndFormat; import org.elasticsearch.search.fetch.subphase.LookupField; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.ESTestCase; import java.util.List; @@ -46,8 +45,11 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL searchHits[i] = SearchHitTests.createTestItem(randomBoolean(), randomBoolean()); } SearchHits hits = new SearchHits(searchHits, new TotalHits(numHits, TotalHits.Relation.EQUAL_TO), 1.0f); - InternalSearchResponse searchResponse = new InternalSearchResponse(hits, null, null, null, false, null, 1); - FetchLookupFieldsPhase phase = new FetchLookupFieldsPhase(searchPhaseContext, searchResponse, null); + FetchLookupFieldsPhase phase = new FetchLookupFieldsPhase( + searchPhaseContext, + new SearchResponseSections(hits, null, null, false, null, null, 1), + null + ); phase.run(); searchPhaseContext.assertNoFailure(); assertNotNull(searchPhaseContext.searchResponse.get()); @@ -95,18 +97,15 @@ void sendExecuteMultiSearch( } else { searchHits = new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), 1.0f); } - InternalSearchResponse internalSearchResponse = new InternalSearchResponse( - searchHits, - null, - null, - null, - false, - null, - 1 - ); responses[i] = new MultiSearchResponse.Item( new SearchResponse( - internalSearchResponse, + searchHits, + null, + null, + false, + null, + null, + 1, null, 1, 1, @@ -174,8 +173,11 @@ void sendExecuteMultiSearch( ); } SearchHits searchHits = new SearchHits(new SearchHit[] { leftHit0, leftHit1 }, new TotalHits(2, TotalHits.Relation.EQUAL_TO), 1.0f); - InternalSearchResponse searchResponse = new InternalSearchResponse(searchHits, null, null, null, false, null, 1); - FetchLookupFieldsPhase phase = new FetchLookupFieldsPhase(searchPhaseContext, searchResponse, null); + FetchLookupFieldsPhase phase = new FetchLookupFieldsPhase( + searchPhaseContext, + new SearchResponseSections(searchHits, null, null, false, null, null, 1), + null + ); phase.run(); assertTrue(requestSent.get()); searchPhaseContext.assertNoFailure(); diff --git a/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java b/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java index 71156517b0306..df3d4d76a14ee 100644 --- a/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java +++ b/server/src/test/java/org/elasticsearch/action/search/MockSearchPhaseContext.java @@ -16,7 +16,6 @@ import org.elasticsearch.core.Releasables; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.transport.Transport; @@ -83,7 +82,7 @@ public OriginalIndices getOriginalIndices(int shardIndex) { } @Override - public void sendSearchResponse(InternalSearchResponse internalSearchResponse, AtomicArray queryResults) { + public void sendSearchResponse(SearchResponseSections internalSearchResponse, AtomicArray queryResults) { String scrollId = getRequest().scroll() != null ? TransportSearchHelper.buildScrollId(queryResults) : null; String searchContextId = getRequest().pointInTimeBuilder() != null ? TransportSearchHelper.buildScrollId(queryResults) : null; searchResponse.set( diff --git a/server/src/test/java/org/elasticsearch/action/search/MultiSearchActionTookTests.java b/server/src/test/java/org/elasticsearch/action/search/MultiSearchActionTookTests.java index 9b1ed6eee1028..f682e75b89a07 100644 --- a/server/src/test/java/org/elasticsearch/action/search/MultiSearchActionTookTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/MultiSearchActionTookTests.java @@ -19,7 +19,7 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AtomicArray; -import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; @@ -149,8 +149,7 @@ public void search(final SearchRequest request, final ActionListener queried = new HashSet<>(); TestSearchResponse() { - super(InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, null, 0, 0, 0, 0L, ShardSearchFailure.EMPTY_ARRAY, Clusters.EMPTY, null); + super( + SearchHits.EMPTY_WITH_TOTAL_HITS, + null, + null, + false, + null, + null, + 1, + null, + 0, + 0, + 0, + 0L, + ShardSearchFailure.EMPTY_ARRAY, + Clusters.EMPTY, + null + ); } } diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchContextIdTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchContextIdTests.java index 90ac90738837d..32091780484fa 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchContextIdTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchContextIdTests.java @@ -85,5 +85,11 @@ public void testEncode() { assertThat(node3.getNode(), equalTo("node_3")); assertThat(node3.getSearchContextId().getId(), equalTo(42L)); assertThat(node3.getSearchContextId().getSessionId(), equalTo("c")); + + final String[] indices = SearchContextId.decodeIndices(id); + assertThat(indices.length, equalTo(3)); + assertThat(indices[0], equalTo("cluster_x:idx")); + assertThat(indices[1], equalTo("cluster_y:idy")); + assertThat(indices[2], equalTo("idy")); } } diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java index cd86a2e4f55d6..bfd949606c188 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java @@ -50,7 +50,6 @@ import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.fetch.FetchSearchResult; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.profile.ProfileResult; @@ -294,7 +293,7 @@ public void testMerge() { profile ); try { - InternalSearchResponse mergedResponse = SearchPhaseController.merge(false, reducedQueryPhase, fetchResults); + SearchResponseSections mergedResponse = SearchPhaseController.merge(false, reducedQueryPhase, fetchResults); if (trackTotalHits == SearchContext.TRACK_TOTAL_HITS_DISABLED) { assertNull(mergedResponse.hits.getTotalHits()); } else { @@ -412,7 +411,7 @@ protected boolean lessThan(RankDoc a, RankDoc b) { false ); try { - InternalSearchResponse mergedResponse = SearchPhaseController.merge(false, reducedQueryPhase, fetchResults); + SearchResponseSections mergedResponse = SearchPhaseController.merge(false, reducedQueryPhase, fetchResults); if (trackTotalHits == SearchContext.TRACK_TOTAL_HITS_DISABLED) { assertNull(mergedResponse.hits.getTotalHits()); } else { diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java index 0c8496081ff19..8c0ffeabf0ea6 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java @@ -457,6 +457,42 @@ public void testValidate() throws IOException { assertEquals(1, validationErrors.validationErrors().size()); assertEquals("[rank] requires [explain] is [false]", validationErrors.validationErrors().get(0)); } + { + SearchRequest searchRequest = new SearchRequest("test").source( + new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder("")) + ); + ActionRequestValidationException validationErrors = searchRequest.validate(); + assertNotNull(validationErrors); + assertEquals(1, validationErrors.validationErrors().size()); + assertEquals( + "[indices] cannot be used with point in time. Do not specify any index with point in time.", + validationErrors.validationErrors().get(0) + ); + } + { + SearchRequest searchRequest = new SearchRequest().indicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED) + .source(new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(""))); + ActionRequestValidationException validationErrors = searchRequest.validate(); + assertNotNull(validationErrors); + assertEquals(1, validationErrors.validationErrors().size()); + assertEquals("[indicesOptions] cannot be used with point in time", validationErrors.validationErrors().get(0)); + } + { + SearchRequest searchRequest = new SearchRequest().routing("route1") + .source(new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(""))); + ActionRequestValidationException validationErrors = searchRequest.validate(); + assertNotNull(validationErrors); + assertEquals(1, validationErrors.validationErrors().size()); + assertEquals("[routing] cannot be used with point in time", validationErrors.validationErrors().get(0)); + } + { + SearchRequest searchRequest = new SearchRequest().preference("pref1") + .source(new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder(""))); + ActionRequestValidationException validationErrors = searchRequest.validate(); + assertNotNull(validationErrors); + assertEquals(1, validationErrors.validationErrors().size()); + assertEquals("[preference] cannot be used with point in time", validationErrors.validationErrors().get(0)); + } } public void testCopyConstructor() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java index e57b204df0836..0f80572fdb7bc 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.InternalAggregations; @@ -29,7 +30,6 @@ import org.elasticsearch.search.aggregations.bucket.range.Range; import org.elasticsearch.search.aggregations.metrics.Max; import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.profile.SearchProfileResults; import org.elasticsearch.search.profile.SearchProfileResultsTests; @@ -108,8 +108,7 @@ public void testMergeTookInMillis() throws InterruptedException { ) ) { for (int i = 0; i < numResponses; i++) { - SearchResponse searchResponse = new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, + SearchResponse searchResponse = SearchResponseUtils.emptyWithTotalHits( null, 1, 1, @@ -169,8 +168,7 @@ public void testMergeShardFailures() throws InterruptedException { shardSearchFailures[j] = failure; priorityQueue.add(Tuple.tuple(searchShardTarget, failure)); } - SearchResponse searchResponse = new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, + SearchResponse searchResponse = SearchResponseUtils.emptyWithTotalHits( null, 1, 1, @@ -231,8 +229,7 @@ public void testMergeShardFailuresNullShardTarget() throws InterruptedException shardSearchFailures[j] = failure; priorityQueue.add(Tuple.tuple(shardId, failure)); } - SearchResponse searchResponse = new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, + SearchResponse searchResponse = SearchResponseUtils.emptyWithTotalHits( null, 1, 1, @@ -291,8 +288,7 @@ public void testMergeShardFailuresNullShardId() throws InterruptedException { shardSearchFailures[j] = shardSearchFailure; expectedFailures.add(shardSearchFailure); } - SearchResponse searchResponse = new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, + SearchResponse searchResponse = SearchResponseUtils.emptyWithTotalHits( null, 1, 1, @@ -335,9 +331,14 @@ public void testMergeProfileResults() throws InterruptedException { SearchProfileResults profile = SearchProfileResultsTests.createTestItem(); expectedProfile.putAll(profile.getShardResults()); SearchHits searchHits = new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), Float.NaN); - InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchHits, null, null, profile, false, null, 1); SearchResponse searchResponse = new SearchResponse( - internalSearchResponse, + searchHits, + null, + null, + false, + null, + profile, + 1, null, 1, 1, @@ -408,9 +409,14 @@ public void testMergeCompletionSuggestions() throws InterruptedException { suggestions.add(completionSuggestion); Suggest suggest = new Suggest(suggestions); SearchHits searchHits = new SearchHits(new SearchHit[0], null, Float.NaN); - InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchHits, null, suggest, null, false, null, 1); SearchResponse searchResponse = new SearchResponse( - internalSearchResponse, + searchHits, + null, + suggest, + false, + null, + null, + 1, null, 1, 1, @@ -489,9 +495,14 @@ public void testMergeCompletionSuggestionsTieBreak() throws InterruptedException suggestions.add(completionSuggestion); Suggest suggest = new Suggest(suggestions); SearchHits searchHits = new SearchHits(new SearchHit[0], null, Float.NaN); - InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchHits, null, suggest, null, false, null, 1); SearchResponse searchResponse = new SearchResponse( - internalSearchResponse, + searchHits, + null, + suggest, + false, + null, + null, + 1, null, 1, 1, @@ -566,9 +577,14 @@ public void testMergeEmptyFormat() throws InterruptedException { ) { for (Max max : Arrays.asList(max1, max2)) { InternalAggregations aggs = InternalAggregations.from(Arrays.asList(max)); - InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchHits, aggs, null, null, false, null, 1); SearchResponse searchResponse = new SearchResponse( - internalSearchResponse, + searchHits, + aggs, + null, + false, + null, + null, + 1, null, 1, 1, @@ -630,9 +646,14 @@ public void testMergeAggs() throws InterruptedException { InternalDateRange range = factory.create(rangeAggName, singletonList(bucket), DocValueFormat.RAW, false, emptyMap()); InternalAggregations aggs = InternalAggregations.from(Arrays.asList(range, max)); SearchHits searchHits = new SearchHits(new SearchHit[0], null, Float.NaN); - InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchHits, aggs, null, null, false, null, 1); SearchResponse searchResponse = new SearchResponse( - internalSearchResponse, + searchHits, + aggs, + null, + false, + null, + null, + 1, null, 1, 1, @@ -787,18 +808,14 @@ public void testMergeSearchHits() throws InterruptedException { Boolean terminatedEarly = frequently() ? null : true; expectedTerminatedEarly = expectedTerminatedEarly == null ? terminatedEarly : expectedTerminatedEarly; - InternalSearchResponse internalSearchResponse = new InternalSearchResponse( + SearchResponse searchResponse = new SearchResponse( searchHits, null, null, - null, timedOut, terminatedEarly, - numReducePhases - ); - - SearchResponse searchResponse = new SearchResponse( - internalSearchResponse, + null, + numReducePhases, null, total, successful, @@ -937,9 +954,14 @@ public void testMergeEmptySearchHitsWithNonEmpty() { null, null ); - InternalSearchResponse response = new InternalSearchResponse(searchHits, null, null, null, false, false, 1); SearchResponse searchResponse = new SearchResponse( - response, + searchHits, + null, + null, + false, + false, + null, + 1, null, 1, 1, @@ -963,9 +985,14 @@ public void testMergeEmptySearchHitsWithNonEmpty() { null, null ); - InternalSearchResponse response = new InternalSearchResponse(empty, null, null, null, false, false, 1); SearchResponse searchResponse = new SearchResponse( - response, + empty, + null, + null, + false, + false, + null, + 1, null, 1, 1, @@ -1015,9 +1042,14 @@ public void testMergeOnlyEmptyHits() { expectedTotalHits = new TotalHits(Math.min(previousValue + totalHits.value, trackTotalHitsUpTo), totalHitsRelation); } SearchHits empty = new SearchHits(new SearchHit[0], totalHits, Float.NaN, null, null, null); - InternalSearchResponse response = new InternalSearchResponse(empty, null, null, null, false, false, 1); SearchResponse searchResponse = new SearchResponse( - response, + empty, + null, + null, + false, + false, + null, + 1, null, 1, 1, diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java index b45a04922c187..ef759279e095f 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java @@ -25,9 +25,9 @@ import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchHitsTests; import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.search.aggregations.AggregationsTests; import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.profile.SearchProfileResults; import org.elasticsearch.search.profile.SearchProfileResultsTests; import org.elasticsearch.search.suggest.Suggest; @@ -107,42 +107,44 @@ private SearchResponse createTestItem(boolean minimal, ShardSearchFailure... sha int totalShards = randomIntBetween(1, Integer.MAX_VALUE); int successfulShards = randomIntBetween(0, totalShards); int skippedShards = randomIntBetween(0, totalShards); - InternalSearchResponse internalSearchResponse; + SearchResponse.Clusters clusters; + if (minimal) { + clusters = randomSimpleClusters(); + } else { + clusters = randomClusters(); + } if (minimal == false) { SearchHits hits = SearchHitsTests.createTestItem(true, true); InternalAggregations aggregations = aggregationsTests.createTestInstance(); Suggest suggest = SuggestTests.createTestItem(); SearchProfileResults profileResults = SearchProfileResultsTests.createTestItem(); - internalSearchResponse = new InternalSearchResponse( + return new SearchResponse( hits, aggregations, suggest, - profileResults, timedOut, terminatedEarly, - numReducePhases + profileResults, + numReducePhases, + null, + totalShards, + successfulShards, + skippedShards, + tookInMillis, + shardSearchFailures, + clusters ); } else { - internalSearchResponse = InternalSearchResponse.EMPTY_WITH_TOTAL_HITS; - } - - SearchResponse.Clusters clusters; - if (minimal) { - clusters = randomSimpleClusters(); - } else { - clusters = randomClusters(); + return SearchResponseUtils.emptyWithTotalHits( + null, + totalShards, + successfulShards, + skippedShards, + tookInMillis, + shardSearchFailures, + clusters + ); } - - return new SearchResponse( - internalSearchResponse, - null, - totalShards, - successfulShards, - skippedShards, - tookInMillis, - shardSearchFailures, - clusters - ); } /** @@ -381,15 +383,13 @@ public void testToXContent() throws IOException { SearchHit[] hits = new SearchHit[] { hit }; { SearchResponse response = new SearchResponse( - new InternalSearchResponse( - new SearchHits(hits, new TotalHits(100, TotalHits.Relation.EQUAL_TO), 1.5f), - null, - null, - null, - false, - null, - 1 - ), + new SearchHits(hits, new TotalHits(100, TotalHits.Relation.EQUAL_TO), 1.5f), + null, + null, + false, + null, + null, + 1, null, 0, 0, @@ -425,15 +425,13 @@ public void testToXContent() throws IOException { } { SearchResponse response = new SearchResponse( - new InternalSearchResponse( - new SearchHits(hits, new TotalHits(100, TotalHits.Relation.EQUAL_TO), 1.5f), - null, - null, - null, - false, - null, - 1 - ), + new SearchHits(hits, new TotalHits(100, TotalHits.Relation.EQUAL_TO), 1.5f), + null, + null, + false, + null, + null, + 1, null, 0, 0, @@ -477,15 +475,13 @@ public void testToXContent() throws IOException { } { SearchResponse response = new SearchResponse( - new InternalSearchResponse( - new SearchHits(hits, new TotalHits(100, TotalHits.Relation.EQUAL_TO), 1.5f), - null, - null, - null, - false, - null, - 1 - ), + new SearchHits(hits, new TotalHits(100, TotalHits.Relation.EQUAL_TO), 1.5f), + null, + null, + false, + null, + null, + 1, null, 20, 9, @@ -654,8 +650,7 @@ public void testSerialization() throws IOException { } public void testToXContentEmptyClusters() throws IOException { - SearchResponse searchResponse = new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, + SearchResponse searchResponse = SearchResponseUtils.emptyWithTotalHits( null, 1, 1, diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java index 1097174628e58..fb27d824417b1 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportMultiSearchActionTests.java @@ -23,7 +23,7 @@ import org.elasticsearch.common.Randomness; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -166,8 +166,7 @@ public void search(final SearchRequest request, final ActionListener { counter.decrementAndGet(); - var response = new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, + var response = SearchResponseUtils.emptyWithTotalHits( null, 0, 0, diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java index 6230a24a0768f..9707df1a7dfd0 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java @@ -69,7 +69,6 @@ import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.collapse.CollapseBuilder; import org.elasticsearch.search.internal.AliasFilter; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.sort.SortBuilders; @@ -77,6 +76,7 @@ import org.elasticsearch.search.suggest.term.TermSuggestionBuilder; import org.elasticsearch.search.vectors.KnnSearchBuilder; import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.telemetry.TelemetryProvider; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.TransportVersionUtils; import org.elasticsearch.test.transport.MockTransportService; @@ -480,16 +480,23 @@ private MockTransportService[] startTransport( } private static SearchResponse emptySearchResponse() { - InternalSearchResponse response = new InternalSearchResponse( + return new SearchResponse( new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), Float.NaN), InternalAggregations.EMPTY, null, - null, false, null, - 1 + null, + 1, + null, + 1, + 1, + 0, + 100, + ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY, + null ); - return new SearchResponse(response, null, 1, 1, 0, 100, ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY, null); } public void testCCSRemoteReduceMergeFails() throws Exception { @@ -1594,7 +1601,8 @@ protected void doWriteTo(StreamOutput out) throws IOException { actionFilters, null, null, - null + null, + new SearchTransportAPMMetrics(TelemetryProvider.NOOP.getMeterRegistry()) ); CountDownLatch latch = new CountDownLatch(1); diff --git a/server/src/test/java/org/elasticsearch/action/support/PlainActionFutureTests.java b/server/src/test/java/org/elasticsearch/action/support/PlainActionFutureTests.java index abc482a34a070..2ca914eb23c61 100644 --- a/server/src/test/java/org/elasticsearch/action/support/PlainActionFutureTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/PlainActionFutureTests.java @@ -34,13 +34,11 @@ public void onResponse(Object value) { // test all possible methods that can be interrupted final Runnable runnable = () -> { - final int method = randomIntBetween(0, 4); + final int method = randomIntBetween(0, 2); switch (method) { case 0 -> future.actionGet(); - case 1 -> future.actionGet("30s"); - case 2 -> future.actionGet(30000); - case 3 -> future.actionGet(TimeValue.timeValueSeconds(30)); - case 4 -> future.actionGet(30, TimeUnit.SECONDS); + case 1 -> future.actionGet(TimeValue.timeValueSeconds(30)); + case 2 -> future.actionGet(30, TimeUnit.SECONDS); default -> throw new AssertionError(method); } }; diff --git a/server/src/test/java/org/elasticsearch/action/support/broadcast/unpromotable/TransportBroadcastUnpromotableActionTests.java b/server/src/test/java/org/elasticsearch/action/support/broadcast/unpromotable/TransportBroadcastUnpromotableActionTests.java index b5bcb8c54668a..a0d0b1809e1f7 100644 --- a/server/src/test/java/org/elasticsearch/action/support/broadcast/unpromotable/TransportBroadcastUnpromotableActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/broadcast/unpromotable/TransportBroadcastUnpromotableActionTests.java @@ -379,6 +379,7 @@ private ActionResponse brodcastUnpromotableRequest(IndexShardRoutingTable wrongR public void testNullIndexShardRoutingTable() { IndexShardRoutingTable shardRoutingTable = null; assertThat( + expectThrows( NullPointerException.class, () -> PlainActionFuture.get( diff --git a/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java b/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java index 92155333dc507..3de66184b49fa 100644 --- a/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java @@ -676,7 +676,7 @@ protected ClusterBlockException checkBlock(Request request, ClusterState state) } setState(clusterService, newStateBuilder.build()); } - expectThrows(TaskCancelledException.class, listener::actionGet); + expectThrows(TaskCancelledException.class, listener); } public void testTaskCancellationOnceActionItIsDispatchedToMaster() throws Exception { @@ -703,7 +703,7 @@ public void testTaskCancellationOnceActionItIsDispatchedToMaster() throws Except releaseBlockedThreads.run(); - expectThrows(TaskCancelledException.class, listener::actionGet); + expectThrows(TaskCancelledException.class, listener); } public void testGlobalBlocksAreCheckedAfterIndexNotFoundException() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java b/server/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java index 9458d0fe962e8..4d26ae610da3c 100644 --- a/server/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java @@ -9,10 +9,12 @@ package org.elasticsearch.action.support.nodes; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeActionTests; +import org.elasticsearch.action.support.RefCountingListener; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -35,6 +37,7 @@ import org.elasticsearch.test.transport.CapturingTransport; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.LeakTracker; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportService; import org.hamcrest.Matchers; @@ -55,6 +58,9 @@ import java.util.Set; import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; +import java.util.function.ObjLongConsumer; import static java.util.Collections.emptyMap; import static org.elasticsearch.test.ClusterServiceUtils.createClusterService; @@ -118,7 +124,11 @@ public void testResponseAggregation() { final TestTransportNodesAction action = getTestTransportNodesAction(); final PlainActionFuture listener = new PlainActionFuture<>(); - action.execute(null, new TestNodesRequest(), listener); + action.execute(null, new TestNodesRequest(), listener.delegateFailure((l, response) -> { + assertTrue(response.getNodes().stream().allMatch(TestNodeResponse::hasReferences)); + assertTrue(response.hasReferences()); + l.onResponse(response); + })); assertFalse(listener.isDone()); final Set failedNodeIds = new HashSet<>(); @@ -127,7 +137,9 @@ public void testResponseAggregation() { for (CapturingTransport.CapturedRequest capturedRequest : transport.getCapturedRequestsAndClear()) { if (randomBoolean()) { successfulNodes.add(capturedRequest.node()); - transport.handleResponse(capturedRequest.requestId(), new TestNodeResponse(capturedRequest.node())); + final var response = new TestNodeResponse(capturedRequest.node()); + transport.handleResponse(capturedRequest.requestId(), response); + assertFalse(response.hasReferences()); // response is copied (via the wire protocol) so this instance is released } else { failedNodeIds.add(capturedRequest.node().getId()); if (randomBoolean()) { @@ -138,7 +150,18 @@ public void testResponseAggregation() { } } - TestNodesResponse response = listener.actionGet(10, TimeUnit.SECONDS); + final TestNodesResponse response = listener.actionGet(10, TimeUnit.SECONDS); + + final var allResponsesReleasedListener = new SubscribableListener(); + try (var listeners = new RefCountingListener(allResponsesReleasedListener)) { + response.addCloseListener(listeners.acquire()); + for (final var nodeResponse : response.getNodes()) { + nodeResponse.addCloseListener(listeners.acquire()); + } + } + safeAwait(allResponsesReleasedListener); + assertTrue(response.getNodes().stream().noneMatch(TestNodeResponse::hasReferences)); + assertFalse(response.hasReferences()); for (TestNodeResponse nodeResponse : response.getNodes()) { assertThat(successfulNodes, Matchers.hasItem(nodeResponse.getNode())); @@ -164,7 +187,7 @@ public void testResponsesReleasedOnCancellation() { final CancellableTask cancellableTask = new CancellableTask(randomLong(), "transport", "action", "", null, emptyMap()); final PlainActionFuture listener = new PlainActionFuture<>(); action.execute(cancellableTask, new TestNodesRequest(), listener.delegateResponse((l, e) -> { - assert Thread.currentThread().getName().contains("[" + ThreadPool.Names.GENERIC + "]"); + assert ThreadPool.assertCurrentThreadPool(ThreadPool.Names.GENERIC); l.onFailure(e); })); @@ -173,13 +196,31 @@ public void testResponsesReleasedOnCancellation() { ); Randomness.shuffle(capturedRequests); + final AtomicInteger liveResponseCount = new AtomicInteger(); + final Function responseCreator = node -> { + liveResponseCount.incrementAndGet(); + final var testNodeResponse = new TestNodeResponse(node); + testNodeResponse.addCloseListener(ActionListener.running(liveResponseCount::decrementAndGet)); + return testNodeResponse; + }; + + final ObjLongConsumer responseSender = (response, requestId) -> { + try { + // transport.handleResponse may de/serialize the response, releasing it early, so send the response straight to the handler + transport.getTransportResponseHandler(requestId).handleResponse(response); + } finally { + response.decRef(); + } + }; + final ReachabilityChecker reachabilityChecker = new ReachabilityChecker(); final Runnable nextRequestProcessor = () -> { var capturedRequest = capturedRequests.remove(0); if (randomBoolean()) { - // transport.handleResponse may de/serialize the response, releasing it early, so send the response straight to the handler - transport.getTransportResponseHandler(capturedRequest.requestId()) - .handleResponse(reachabilityChecker.register(new TestNodeResponse(capturedRequest.node()))); + responseSender.accept( + reachabilityChecker.register(responseCreator.apply(capturedRequest.node())), + capturedRequest.requestId() + ); } else { // handleRemoteError may de/serialize the exception, releasing it early, so just use handleLocalError transport.handleLocalError( @@ -200,20 +241,23 @@ public void testResponsesReleasedOnCancellation() { // responses captured before cancellation are now unreachable reachabilityChecker.ensureUnreachable(); + assertEquals(0, liveResponseCount.get()); while (capturedRequests.size() > 0) { // a response sent after cancellation is dropped immediately assertFalse(listener.isDone()); nextRequestProcessor.run(); reachabilityChecker.ensureUnreachable(); + assertEquals(0, liveResponseCount.get()); } expectThrows(TaskCancelledException.class, () -> listener.actionGet(10, TimeUnit.SECONDS)); + assertTrue(cancellableTask.isCancelled()); // keep task alive } @BeforeClass public static void startThreadPool() { - THREAD_POOL = new TestThreadPool(TransportBroadcastByNodeActionTests.class.getSimpleName()); + THREAD_POOL = new TestThreadPool(TransportNodesActionTests.class.getSimpleName()); } @AfterClass @@ -268,11 +312,9 @@ public void tearDown() throws Exception { public TestTransportNodesAction getTestTransportNodesAction() { return new TestTransportNodesAction( - THREAD_POOL, clusterService, transportService, new ActionFilters(Collections.emptySet()), - TestNodesRequest::new, TestNodeRequest::new, THREAD_POOL.executor(ThreadPool.Names.GENERIC) ); @@ -302,11 +344,9 @@ private static class TestTransportNodesAction extends TransportNodesAction< TestNodeResponse> { TestTransportNodesAction( - ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, - Writeable.Reader request, Writeable.Reader nodeRequest, Executor nodeExecutor ) { @@ -319,7 +359,7 @@ protected TestNodesResponse newResponse( List responses, List failures ) { - return new TestNodesResponse(clusterService.getClusterName(), request, responses, failures); + return new TestNodesResponse(clusterService.getClusterName(), responses, failures); } @Override @@ -350,7 +390,7 @@ private static class DataNodesOnlyTransportNodesAction extends TestTransportNode Writeable.Reader nodeRequest, Executor nodeExecutor ) { - super(threadPool, clusterService, transportService, actionFilters, request, nodeRequest, nodeExecutor); + super(clusterService, transportService, actionFilters, nodeRequest, nodeExecutor); } @Override @@ -371,16 +411,11 @@ private static class TestNodesRequest extends BaseNodesRequest private static class TestNodesResponse extends BaseNodesResponse { - private final TestNodesRequest request; + private final SubscribableListener onClose = new SubscribableListener<>(); + private final RefCounted refCounted = LeakTracker.wrap(AbstractRefCounted.of(() -> onClose.onResponse(null))); - TestNodesResponse( - ClusterName clusterName, - TestNodesRequest request, - List nodeResponses, - List failures - ) { + TestNodesResponse(ClusterName clusterName, List nodeResponses, List failures) { super(clusterName, nodeResponses, failures); - this.request = request; } @Override @@ -392,6 +427,30 @@ protected List readNodesFrom(StreamInput in) throws IOExceptio protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { out.writeCollection(nodes); } + + @Override + public void incRef() { + refCounted.incRef(); + } + + @Override + public boolean tryIncRef() { + return refCounted.tryIncRef(); + } + + @Override + public boolean decRef() { + return refCounted.decRef(); + } + + @Override + public boolean hasReferences() { + return refCounted.hasReferences(); + } + + void addCloseListener(ActionListener listener) { + onClose.addListener(listener); + } } private static class TestNodeRequest extends TransportRequest { @@ -425,6 +484,10 @@ public boolean hasReferences() { } private static class TestNodeResponse extends BaseNodeResponse { + + private final SubscribableListener onClose = new SubscribableListener<>(); + private final RefCounted refCounted = LeakTracker.wrap(AbstractRefCounted.of(() -> onClose.onResponse(null))); + TestNodeResponse() { this(mock(DiscoveryNode.class)); } @@ -436,6 +499,30 @@ private static class TestNodeResponse extends BaseNodeResponse { protected TestNodeResponse(StreamInput in) throws IOException { super(in); } + + @Override + public void incRef() { + refCounted.incRef(); + } + + @Override + public boolean tryIncRef() { + return refCounted.tryIncRef(); + } + + @Override + public boolean decRef() { + return refCounted.decRef(); + } + + @Override + public boolean hasReferences() { + return refCounted.hasReferences(); + } + + void addCloseListener(ActionListener listener) { + onClose.addListener(listener); + } } } diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java index a787a50798e05..8bda62b91bc7e 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/BroadcastReplicationTests.java @@ -305,7 +305,7 @@ public BaseBroadcastResponse executeAndAssertImmediateResponse( ) { PlainActionFuture response = new PlainActionFuture<>(); ActionTestUtils.execute(broadcastAction, null, request, response); - return response.actionGet("5s"); + return response.actionGet(5, TimeUnit.SECONDS); } private void assertBroadcastResponse(int total, int successful, int failed, BaseBroadcastResponse response, Class exceptionClass) { diff --git a/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java b/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java index 0df492b080254..be8255cd766c8 100644 --- a/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java +++ b/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java @@ -294,17 +294,18 @@ public void testStreamRequestLegacyVersion() throws IOException { public void testMultiParser() throws Exception { byte[] bytes = StreamsUtils.copyToBytesFromClasspath("/org/elasticsearch/action/termvectors/multiRequest1.json"); - XContentParser data = createParser(JsonXContent.jsonXContent, bytes); - MultiTermVectorsRequest request = new MultiTermVectorsRequest(); - request.add(new TermVectorsRequest(), data); - checkParsedParameters(request); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, bytes)) { + MultiTermVectorsRequest request = new MultiTermVectorsRequest(); + request.add(new TermVectorsRequest(), parser); + checkParsedParameters(request); + } bytes = StreamsUtils.copyToBytesFromClasspath("/org/elasticsearch/action/termvectors/multiRequest2.json"); - data = createParser(JsonXContent.jsonXContent, new BytesArray(bytes)); - request = new MultiTermVectorsRequest(); - request.add(new TermVectorsRequest(), data); - - checkParsedParameters(request); + try (var parser = createParser(JsonXContent.jsonXContent, new BytesArray(bytes))) { + MultiTermVectorsRequest request = new MultiTermVectorsRequest(); + request.add(new TermVectorsRequest(), parser); + checkParsedParameters(request); + } } void checkParsedParameters(MultiTermVectorsRequest request) { diff --git a/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java b/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java index e436ba5c89acd..16a57767a9b07 100644 --- a/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/update/UpdateRequestTests.java @@ -120,7 +120,9 @@ public void setUp() throws Exception { public void testFromXContent() throws Exception { UpdateRequest request = new UpdateRequest("test", "1"); // simple script - request.fromXContent(createParser(XContentFactory.jsonBuilder().startObject().field("script", "script1").endObject())); + try (var parser = createParser(XContentFactory.jsonBuilder().startObject().field("script", "script1").endObject())) { + request.fromXContent(parser); + } Script script = request.script(); assertThat(script, notNullValue()); assertThat(script.getIdOrCode(), equalTo("script1")); @@ -130,11 +132,13 @@ public void testFromXContent() throws Exception { assertThat(params, equalTo(emptyMap())); // simple verbose script - request.fromXContent( - createParser( + try ( + var parser = createParser( XContentFactory.jsonBuilder().startObject().startObject("script").field("source", "script1").endObject().endObject() ) - ); + ) { + request.fromXContent(parser); + } script = request.script(); assertThat(script, notNullValue()); assertThat(script.getIdOrCode(), equalTo("script1")); @@ -146,8 +150,8 @@ public void testFromXContent() throws Exception { // script with params request = new UpdateRequest("test", "1"); - request.fromXContent( - createParser( + try ( + var parser = createParser( XContentFactory.jsonBuilder() .startObject() .startObject("script") @@ -158,7 +162,9 @@ public void testFromXContent() throws Exception { .endObject() .endObject() ) - ); + ) { + request.fromXContent(parser); + } script = request.script(); assertThat(script, notNullValue()); assertThat(script.getIdOrCode(), equalTo("script1")); @@ -171,8 +177,8 @@ public void testFromXContent() throws Exception { request.decRef(); request = new UpdateRequest("test", "1"); - request.fromXContent( - createParser( + try ( + var parser = createParser( XContentFactory.jsonBuilder() .startObject() .startObject("script") @@ -183,7 +189,9 @@ public void testFromXContent() throws Exception { .endObject() .endObject() ) - ); + ) { + request.fromXContent(parser); + } script = request.script(); assertThat(script, notNullValue()); assertThat(script.getIdOrCode(), equalTo("script1")); @@ -197,8 +205,8 @@ public void testFromXContent() throws Exception { // script with params and upsert request = new UpdateRequest("test", "1"); - request.fromXContent( - createParser( + try ( + var parser = createParser( XContentFactory.jsonBuilder() .startObject() .startObject("script") @@ -215,7 +223,9 @@ public void testFromXContent() throws Exception { .endObject() .endObject() ) - ); + ) { + request.fromXContent(parser); + } script = request.script(); assertThat(script, notNullValue()); assertThat(script.getIdOrCode(), equalTo("script1")); @@ -235,8 +245,8 @@ public void testFromXContent() throws Exception { request.decRef(); request = new UpdateRequest("test", "1"); - request.fromXContent( - createParser( + try ( + var parser = createParser( XContentFactory.jsonBuilder() .startObject() .startObject("upsert") @@ -253,7 +263,9 @@ public void testFromXContent() throws Exception { .endObject() .endObject() ) - ); + ) { + request.fromXContent(parser); + } script = request.script(); assertThat(script, notNullValue()); assertThat(script.getIdOrCode(), equalTo("script1")); @@ -270,8 +282,8 @@ public void testFromXContent() throws Exception { // script with doc request = new UpdateRequest("test", "1"); - request.fromXContent( - createParser( + try ( + var parser = createParser( XContentFactory.jsonBuilder() .startObject() .startObject("doc") @@ -282,7 +294,9 @@ public void testFromXContent() throws Exception { .endObject() .endObject() ) - ); + ) { + request.fromXContent(parser); + } Map doc = request.doc().sourceAsMap(); assertThat(doc.get("field1").toString(), equalTo("value1")); assertThat(((Map) doc.get("compound")).get("field2").toString(), equalTo("value2")); @@ -291,24 +305,31 @@ public void testFromXContent() throws Exception { public void testUnknownFieldParsing() throws Exception { UpdateRequest request = new UpdateRequest("test", "1"); - XContentParser contentParser = createParser(XContentFactory.jsonBuilder().startObject().field("unknown_field", "test").endObject()); - - XContentParseException ex = expectThrows(XContentParseException.class, () -> request.fromXContent(contentParser)); - assertEquals("[1:2] [UpdateRequest] unknown field [unknown_field]", ex.getMessage()); + try ( + XContentParser contentParser = createParser( + XContentFactory.jsonBuilder().startObject().field("unknown_field", "test").endObject() + ) + ) { + XContentParseException ex = expectThrows(XContentParseException.class, () -> request.fromXContent(contentParser)); + assertEquals("[1:2] [UpdateRequest] unknown field [unknown_field]", ex.getMessage()); + } request.decRef(); UpdateRequest request2 = new UpdateRequest("test", "1"); - XContentParser unknownObject = createParser( - XContentFactory.jsonBuilder() - .startObject() - .field("script", "ctx.op = ctx._source.views == params.count ? 'delete' : 'none'") - .startObject("params") - .field("count", 1) - .endObject() - .endObject() - ); - ex = expectThrows(XContentParseException.class, () -> request2.fromXContent(unknownObject)); - assertEquals("[1:76] [UpdateRequest] unknown field [params]", ex.getMessage()); + try ( + XContentParser unknownObject = createParser( + XContentFactory.jsonBuilder() + .startObject() + .field("script", "ctx.op = ctx._source.views == params.count ? 'delete' : 'none'") + .startObject("params") + .field("count", 1) + .endObject() + .endObject() + ) + ) { + XContentParseException ex = expectThrows(XContentParseException.class, () -> request2.fromXContent(unknownObject)); + assertEquals("[1:76] [UpdateRequest] unknown field [params]", ex.getMessage()); + } request2.decRef(); } @@ -577,9 +598,10 @@ public void testNoopDetection() throws Exception { ShardId shardId = new ShardId("test", "", 0); GetResult getResult = new GetResult("test", "1", 0, 1, 0, true, new BytesArray("{\"body\": \"foo\"}"), null, null); - UpdateRequest request = new UpdateRequest("test", "1").fromXContent( - createParser(JsonXContent.jsonXContent, new BytesArray("{\"doc\": {\"body\": \"foo\"}}")) - ); + UpdateRequest request; + try (var parser = createParser(JsonXContent.jsonXContent, new BytesArray("{\"doc\": {\"body\": \"foo\"}}"))) { + request = new UpdateRequest("test", "1").fromXContent(parser); + } UpdateHelper.Result result = UpdateHelper.prepareUpdateIndexRequest(shardId, request, getResult, true); @@ -594,17 +616,18 @@ public void testNoopDetection() throws Exception { request.decRef(); ((IndexRequest) result.action()).decRef(); - // Change the request to be a different doc - request = new UpdateRequest("test", "1").fromXContent( - createParser(JsonXContent.jsonXContent, new BytesArray("{\"doc\": {\"body\": \"bar\"}}")) - ); - result = UpdateHelper.prepareUpdateIndexRequest(shardId, request, getResult, true); + try (var parser = createParser(JsonXContent.jsonXContent, new BytesArray("{\"doc\": {\"body\": \"bar\"}}"))) { + // Change the request to be a different doc + request = new UpdateRequest("test", "1").fromXContent(parser); + result = UpdateHelper.prepareUpdateIndexRequest(shardId, request, getResult, true); + + assertThat(result.action(), instanceOf(IndexRequest.class)); + assertThat(result.getResponseResult(), equalTo(DocWriteResponse.Result.UPDATED)); + assertThat(result.updatedSourceAsMap().get("body").toString(), equalTo("bar")); + request.decRef(); + ((IndexRequest) result.action()).decRef(); + } - assertThat(result.action(), instanceOf(IndexRequest.class)); - assertThat(result.getResponseResult(), equalTo(DocWriteResponse.Result.UPDATED)); - assertThat(result.updatedSourceAsMap().get("body").toString(), equalTo("bar")); - request.decRef(); - ((IndexRequest) result.action()).decRef(); } public void testUpdateScript() throws Exception { @@ -656,12 +679,12 @@ public void testToString() throws IOException { update {[test][1], doc_as_upsert[false], script[Script{type=inline, lang='mock', idOrCode='ctx._source.body = "foo"', \ options={}, params={}}], scripted_upsert[false], detect_noop[true]}""")); request.decRef(); - request = new UpdateRequest("test", "1").fromXContent( - createParser(JsonXContent.jsonXContent, new BytesArray("{\"doc\": {\"body\": \"bar\"}}")) - ); - assertThat(request.toString(), equalTo(""" - update {[test][1], doc_as_upsert[false], doc[index {[null][null], source[{"body":"bar"}]}], \ - scripted_upsert[false], detect_noop[true]}""")); - request.decRef(); + try (var parser = createParser(JsonXContent.jsonXContent, new BytesArray("{\"doc\": {\"body\": \"bar\"}}"))) { + request = new UpdateRequest("test", "1").fromXContent(parser); + assertThat(request.toString(), equalTo(""" + update {[test][1], doc_as_upsert[false], doc[index {[null][null], source[{"body":"bar"}]}], \ + scripted_upsert[false], detect_noop[true]}""")); + request.decRef(); + } } } diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterSnapshotStatsTests.java b/server/src/test/java/org/elasticsearch/cluster/ClusterSnapshotStatsTests.java index 06a772d50c393..55cd6e5790f84 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterSnapshotStatsTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterSnapshotStatsTests.java @@ -49,7 +49,8 @@ protected Writeable.Reader instanceReader() { "ABORTED", "MISSING", "WAITING", - "QUEUED" }; + "QUEUED", + "PAUSED_FOR_NODE_REMOVAL" }; @Override protected ClusterSnapshotStats createTestInstance() { @@ -370,7 +371,9 @@ public void testComputation() { SnapshotsInProgress.ShardState.WAITING, 0, SnapshotsInProgress.ShardState.QUEUED, - 1 + 1, + SnapshotsInProgress.ShardState.PAUSED_FOR_NODE_REMOVAL, + 0 ) ) ) @@ -392,7 +395,7 @@ public void testComputation() { new Snapshot("test-repo", new SnapshotId("snapshot", "uuid")), randomBoolean(), randomBoolean(), - SnapshotsInProgress.State.INIT, + SnapshotsInProgress.State.STARTED, Map.of("index", new IndexId("index", "uuid")), List.of(), List.of(), diff --git a/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java b/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java index 7b2795abfd62d..75439578448a4 100644 --- a/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java @@ -13,7 +13,6 @@ import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.routing.RecoverySource.PeerRecoverySource; -import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingHelper; import org.elasticsearch.cluster.routing.UnassignedInfo; @@ -137,14 +136,7 @@ public void testFillShardLevelInfo() { Map shardSizes = new HashMap<>(); Map shardDataSetSizes = new HashMap<>(); Map routingToPath = new HashMap<>(); - InternalClusterInfoService.buildShardLevelInfo( - RoutingTable.EMPTY_ROUTING_TABLE, - stats, - shardSizes, - shardDataSetSizes, - routingToPath, - new HashMap<>() - ); + InternalClusterInfoService.buildShardLevelInfo(stats, shardSizes, shardDataSetSizes, routingToPath, new HashMap<>()); assertThat( shardSizes, diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinValidationServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinValidationServiceTests.java index 9287e279fe2f5..9b1ce4611169b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinValidationServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinValidationServiceTests.java @@ -397,7 +397,7 @@ public void testJoinValidationRejectsMismatchedClusterUUID() { deterministicTaskQueue.runAllTasks(); assertThat( - expectThrows(CoordinationStateRejectedException.class, future::actionGet).getMessage(), + expectThrows(CoordinationStateRejectedException.class, future).getMessage(), allOf( containsString("This node previously joined a cluster with UUID"), containsString("and is now trying to join a different cluster"), @@ -447,10 +447,7 @@ public void testJoinValidationRunsJoinValidators() { ); deterministicTaskQueue.runAllTasks(); - assertThat( - expectThrows(IllegalStateException.class, future::actionGet).getMessage(), - allOf(containsString("simulated validation failure")) - ); + assertThat(expectThrows(IllegalStateException.class, future).getMessage(), allOf(containsString("simulated validation failure"))); } public void testJoinValidationFallsBackToPingIfNotMaster() { diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/StableMasterHealthIndicatorServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/StableMasterHealthIndicatorServiceTests.java index b9d1cb50444e3..18385b1d7ad44 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/StableMasterHealthIndicatorServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/StableMasterHealthIndicatorServiceTests.java @@ -312,8 +312,11 @@ private static StableMasterHealthIndicatorService createStableMasterHealthIndica private Map xContentToMap(ToXContent xcontent) throws IOException { XContentBuilder builder = XContentFactory.jsonBuilder(); xcontent.toXContent(builder, ToXContent.EMPTY_PARAMS); - XContentParser parser = XContentType.JSON.xContent() - .createParser(xContentRegistry(), LoggingDeprecationHandler.INSTANCE, BytesReference.bytes(builder).streamInput()); - return parser.map(); + try ( + XContentParser parser = XContentType.JSON.xContent() + .createParser(xContentRegistry(), LoggingDeprecationHandler.INSTANCE, BytesReference.bytes(builder).streamInput()) + ) { + return parser.map(); + } } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexGraveyardTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexGraveyardTests.java index e539087de7b8e..e8892278879b9 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexGraveyardTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexGraveyardTests.java @@ -72,9 +72,10 @@ public void testXContent() throws IOException { ) ); } - XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); - parser.nextToken(); // the beginning of the parser - assertThat(IndexGraveyard.fromXContent(parser), equalTo(graveyard)); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder))) { + parser.nextToken(); // the beginning of the parser + assertThat(IndexGraveyard.fromXContent(parser), equalTo(graveyard)); + } } public void testChunking() { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java index 0680392ffb3f0..b4c9f670f66b6 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java @@ -112,8 +112,10 @@ public void testIndexMetadataSerialization() throws IOException { builder.startObject(); IndexMetadata.FORMAT.toXContent(builder, metadata); builder.endObject(); - XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); - final IndexMetadata fromXContentMeta = IndexMetadata.fromXContent(parser); + final IndexMetadata fromXContentMeta; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder))) { + fromXContentMeta = IndexMetadata.fromXContent(parser); + } assertEquals( "expected: " + Strings.toString(metadata) + "\nactual : " + Strings.toString(fromXContentMeta), metadata, diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexStateServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexStateServiceTests.java index 57f1265debf00..6e24735eba454 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexStateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexStateServiceTests.java @@ -367,7 +367,7 @@ private static ClusterState addSnapshotIndex(final String index, final int numSh snapshot, randomBoolean(), false, - SnapshotsInProgress.State.INIT, + SnapshotsInProgress.State.STARTED, Collections.singletonMap(index, new IndexId(index, index)), Collections.emptyList(), Collections.emptyList(), diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/ReservedStateMetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/ReservedStateMetadataTests.java index 2383c0b513ead..46be49ad7111f 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/ReservedStateMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/ReservedStateMetadataTests.java @@ -63,9 +63,10 @@ private void xContentTest(boolean addHandlers, boolean addErrors) throws IOExcep builder.startObject(); meta.toXContent(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); - XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); - parser.nextToken(); // the beginning of the object - assertThat(ReservedStateMetadata.fromXContent(parser), equalTo(meta)); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder))) { + parser.nextToken(); // the beginning of the object + assertThat(ReservedStateMetadata.fromXContent(parser), equalTo(meta)); + } } public void testXContent() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetadataTests.java index cb681b57b58dd..e7f49bc773404 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetadataTests.java @@ -138,7 +138,10 @@ public void testSimpleJsonFromAndTo() throws IOException { Metadata.FORMAT.toXContent(builder, metadata); builder.endObject(); - Metadata parsedMetadata = Metadata.Builder.fromXContent(createParser(builder)); + Metadata parsedMetadata; + try (var parser = createParser(builder)) { + parsedMetadata = Metadata.Builder.fromXContent(parser); + } // templates assertThat(parsedMetadata.templates().get("foo").name(), is("foo")); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java index e425b0e305050..aa4b4ec6dbbeb 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java @@ -142,8 +142,10 @@ public void testSerialization() throws IOException { allocationId = AllocationId.newRelocation(allocationId); } BytesReference bytes = BytesReference.bytes(allocationId.toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS)); - AllocationId parsedAllocationId = AllocationId.fromXContent(createParser(JsonXContent.jsonXContent, bytes)); - assertEquals(allocationId, parsedAllocationId); + try (var parser = createParser(JsonXContent.jsonXContent, bytes)) { + AllocationId parsedAllocationId = AllocationId.fromXContent(parser); + assertEquals(allocationId, parsedAllocationId); + } } public void testEquals() { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java index be8807292350b..4640392d7b164 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java @@ -811,23 +811,24 @@ public void testXContent() throws Exception { ] } """; - XContentParser parser = createParser(JsonXContent.jsonXContent, commands); - // move two tokens, parser expected to be "on" `commands` field - parser.nextToken(); - parser.nextToken(); - - assertThat( - AllocationCommands.fromXContent(parser), - equalTo( - new AllocationCommands( - new AllocateEmptyPrimaryAllocationCommand("test", 1, "node1", true), - new AllocateStalePrimaryAllocationCommand("test", 2, "node1", true), - new AllocateReplicaAllocationCommand("test", 2, "node1"), - new MoveAllocationCommand("test", 3, "node2", "node3"), - new CancelAllocationCommand("test", 4, "node5", true) + try (XContentParser parser = createParser(JsonXContent.jsonXContent, commands)) { + // move two tokens, parser expected to be "on" `commands` field + parser.nextToken(); + parser.nextToken(); + + assertThat( + AllocationCommands.fromXContent(parser), + equalTo( + new AllocationCommands( + new AllocateEmptyPrimaryAllocationCommand("test", 1, "node1", true), + new AllocateStalePrimaryAllocationCommand("test", 2, "node1", true), + new AllocateReplicaAllocationCommand("test", 2, "node1"), + new MoveAllocationCommand("test", 3, "node2", "node3"), + new CancelAllocationCommand("test", 4, "node5", true) + ) ) - ) - ); + ); + } } @Override diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocatorTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocatorTests.java index 3cafaf216eb39..fa1a542fff7dd 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocatorTests.java @@ -59,14 +59,19 @@ import static java.util.stream.Collectors.toSet; import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING; import static org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.Balancer.getIndexDiskUsageInBytes; +import static org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator.DISK_USAGE_BALANCE_FACTOR_SETTING; +import static org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider.SETTING_IGNORE_DISK_WATERMARKS; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThanOrEqualTo; public class BalancedShardsAllocatorTests extends ESAllocationTestCase { + private static final Settings WITH_DISK_BALANCING = Settings.builder().put(DISK_USAGE_BALANCE_FACTOR_SETTING.getKey(), "1e-9").build(); + public void testDecideShardAllocation() { BalancedShardsAllocator allocator = new BalancedShardsAllocator(Settings.EMPTY); ClusterState clusterState = ClusterStateCreationUtils.state("idx", false, ShardRoutingState.STARTED); @@ -105,22 +110,19 @@ public void testBalanceByForecastWriteLoad() { var allocationService = new MockAllocationService( yesAllocationDeciders(), new TestGatewayAllocator(), - new BalancedShardsAllocator( - new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), - TEST_WRITE_LOAD_FORECASTER - ), + new BalancedShardsAllocator(ClusterSettings.createBuiltInClusterSettings(), TEST_WRITE_LOAD_FORECASTER), EmptyClusterInfoService.INSTANCE, SNAPSHOT_INFO_SERVICE_WITH_NO_SHARD_SIZES ); var clusterState = applyStartedShardsUntilNoChange( stateWithStartedIndices( - IndexMetadata.builder("heavy-index").indexWriteLoadForecast(8.0), - IndexMetadata.builder("light-index-1").indexWriteLoadForecast(1.0), - IndexMetadata.builder("light-index-2").indexWriteLoadForecast(2.0), - IndexMetadata.builder("light-index-3").indexWriteLoadForecast(3.0), - IndexMetadata.builder("zero-write-load-index").indexWriteLoadForecast(0.0), - IndexMetadata.builder("no-write-load-index") + anIndex("heavy-index").indexWriteLoadForecast(8.0), + anIndex("light-index-1").indexWriteLoadForecast(1.0), + anIndex("light-index-2").indexWriteLoadForecast(2.0), + anIndex("light-index-3").indexWriteLoadForecast(3.0), + anIndex("zero-write-load-index").indexWriteLoadForecast(0.0), + anIndex("no-write-load-index") ), allocationService ); @@ -146,21 +148,16 @@ public void testBalanceByForecastWriteLoad() { public void testBalanceByForecastDiskUsage() { - var allocationService = createAllocationService( - Settings.builder() - // enable disk based balancing - .put(BalancedShardsAllocator.DISK_USAGE_BALANCE_FACTOR_SETTING.getKey(), "1e-9") - .build() - ); + var allocationService = createAllocationService(WITH_DISK_BALANCING); var clusterState = applyStartedShardsUntilNoChange( stateWithStartedIndices( - IndexMetadata.builder("heavy-index").shardSizeInBytesForecast(ByteSizeValue.ofGb(8).getBytes()), - IndexMetadata.builder("light-index-1").shardSizeInBytesForecast(ByteSizeValue.ofGb(1).getBytes()), - IndexMetadata.builder("light-index-2").shardSizeInBytesForecast(ByteSizeValue.ofGb(2).getBytes()), - IndexMetadata.builder("light-index-3").shardSizeInBytesForecast(ByteSizeValue.ofGb(3).getBytes()), - IndexMetadata.builder("zero-disk-usage-index").shardSizeInBytesForecast(0L), - IndexMetadata.builder("no-disk-usage-index") + anIndex("heavy-index").shardSizeInBytesForecast(ByteSizeValue.ofGb(8).getBytes()), + anIndex("light-index-1").shardSizeInBytesForecast(ByteSizeValue.ofGb(1).getBytes()), + anIndex("light-index-2").shardSizeInBytesForecast(ByteSizeValue.ofGb(2).getBytes()), + anIndex("light-index-3").shardSizeInBytesForecast(ByteSizeValue.ofGb(3).getBytes()), + anIndex("zero-disk-usage-index").shardSizeInBytesForecast(0L), + anIndex("no-disk-usage-index") ), allocationService ); @@ -185,10 +182,7 @@ public void testBalanceByForecastDiskUsage() { public void testBalanceByActualDiskUsage() { var allocationService = createAllocationService( - Settings.builder() - // enable disk based balancing - .put(BalancedShardsAllocator.DISK_USAGE_BALANCE_FACTOR_SETTING.getKey(), "1e-9") - .build(), + WITH_DISK_BALANCING, () -> createClusterInfo( Map.ofEntries( Map.entry("[heavy-index][0][p]", ByteSizeValue.ofGb(8).getBytes()), @@ -203,12 +197,12 @@ public void testBalanceByActualDiskUsage() { var clusterState = applyStartedShardsUntilNoChange( stateWithStartedIndices( - IndexMetadata.builder("heavy-index"), - IndexMetadata.builder("light-index-1"), - IndexMetadata.builder("light-index-2"), - IndexMetadata.builder("light-index-3"), - IndexMetadata.builder("zero-disk-usage-index"), - IndexMetadata.builder("no-disk-usage-index") + anIndex("heavy-index"), + anIndex("light-index-1"), + anIndex("light-index-2"), + anIndex("light-index-3"), + anIndex("zero-disk-usage-index"), + anIndex("no-disk-usage-index") ), allocationService ); @@ -233,21 +227,18 @@ public void testBalanceByActualDiskUsage() { public void testBalanceByActualAndForecastDiskUsage() { var allocationService = createAllocationService( - Settings.builder() - // enable disk based balancing - .put(BalancedShardsAllocator.DISK_USAGE_BALANCE_FACTOR_SETTING.getKey(), "1e-9") - .build(), + WITH_DISK_BALANCING, () -> createClusterInfo(Map.of("[heavy-index][0][p]", ByteSizeValue.ofGb(8).getBytes())) ); var clusterState = applyStartedShardsUntilNoChange( stateWithStartedIndices( - IndexMetadata.builder("heavy-index"),// size is set in cluster info - IndexMetadata.builder("light-index-1").shardSizeInBytesForecast(ByteSizeValue.ofGb(1).getBytes()), - IndexMetadata.builder("light-index-2").shardSizeInBytesForecast(ByteSizeValue.ofGb(2).getBytes()), - IndexMetadata.builder("light-index-3").shardSizeInBytesForecast(ByteSizeValue.ofGb(3).getBytes()), - IndexMetadata.builder("zero-disk-usage-index").shardSizeInBytesForecast(0L), - IndexMetadata.builder("no-disk-usage-index") + anIndex("heavy-index"),// size is set in cluster info + anIndex("light-index-1").shardSizeInBytesForecast(ByteSizeValue.ofGb(1).getBytes()), + anIndex("light-index-2").shardSizeInBytesForecast(ByteSizeValue.ofGb(2).getBytes()), + anIndex("light-index-3").shardSizeInBytesForecast(ByteSizeValue.ofGb(3).getBytes()), + anIndex("zero-disk-usage-index").shardSizeInBytesForecast(0L), + anIndex("no-disk-usage-index") ), allocationService ); @@ -269,6 +260,26 @@ public void testBalanceByActualAndForecastDiskUsage() { ); } + public void testDoNotBalancePartialIndicesByDiskUsage() { + + var allocationService = createAllocationService(WITH_DISK_BALANCING, () -> createClusterInfo(Map.of())); + + var partialSearchableSnapshotSettings = indexSettings(IndexVersion.current(), 1, 0) // + .put(SETTING_IGNORE_DISK_WATERMARKS.getKey(), true); + + var clusterState = applyStartedShardsUntilNoChange( + stateWithStartedIndices( + anIndex("frozen-index-1", partialSearchableSnapshotSettings).shardSizeInBytesForecast(ByteSizeValue.ofGb(1).getBytes()), + anIndex("frozen-index-2", partialSearchableSnapshotSettings).shardSizeInBytesForecast(ByteSizeValue.ofGb(1).getBytes()), + anIndex("frozen-index-3", partialSearchableSnapshotSettings).shardSizeInBytesForecast(ByteSizeValue.ofGb(1).getBytes()), + anIndex("frozen-index-4", partialSearchableSnapshotSettings).shardSizeInBytesForecast(ByteSizeValue.ofGb(10).getBytes()) + ), + allocationService + ); + + assertThat(getShardsPerNode(clusterState).values(), everyItem(hasSize(2))); + } + private static Map> getShardsPerNode(ClusterState clusterState) { return getPerNode(clusterState, mapping(ShardRouting::getIndexName, toSet())); } @@ -411,6 +422,20 @@ public void testGetIndexDiskUsageInBytes() { // should pick the max shard size among forecast and cluster info assertThat(indexDiskUsageInBytes, equalTo(Math.max(forecastedShardSize, observedShardSize))); } + + { + final var indexMetadata = IndexMetadata.builder("index") + .settings(indexSettings(IndexVersion.current(), 1, 0).put(SETTING_IGNORE_DISK_WATERMARKS.getKey(), true)) + .build(); + + final var indexDiskUsageInBytes = getIndexDiskUsageInBytes( + createClusterInfo(Map.of("[index][0][p]", randomLongBetween(1024, 10240))), + indexMetadata + ); + + // partially cached indices should not be balanced by disk usage + assertThat(indexDiskUsageInBytes, equalTo(0L)); + } } public void testThresholdLimit() { @@ -503,11 +528,19 @@ private static ClusterInfo createClusterInfo(Map indexSizes) { return new ClusterInfo(Map.of(), Map.of(), indexSizes, Map.of(), Map.of(), Map.of()); } + private static IndexMetadata.Builder anIndex(String name) { + return anIndex(name, indexSettings(IndexVersion.current(), 1, 0)); + } + + private static IndexMetadata.Builder anIndex(String name, Settings.Builder settings) { + return IndexMetadata.builder(name).settings(settings); + } + private static ClusterState stateWithStartedIndices(IndexMetadata.Builder... indices) { var metadataBuilder = Metadata.builder(); var routingTableBuilder = RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY); for (var index : indices) { - var build = index.settings(indexSettings(IndexVersion.current(), 1, 0)).build(); + var build = index.build(); metadataBuilder.put(build, false); routingTableBuilder.addAsNew(build); } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDeciderTests.java index 519184800d443..b9bf565ee58cd 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDeciderTests.java @@ -12,6 +12,8 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.SnapshotsInProgress; +import org.elasticsearch.cluster.metadata.NodesShutdownMetadata; +import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; @@ -25,7 +27,9 @@ import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.snapshots.SnapshotId; import org.elasticsearch.snapshots.SnapshotShardSizeInfo; +import org.elasticsearch.snapshots.SnapshotsInProgressSerializationTests; import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matchers; import java.util.Arrays; import java.util.List; @@ -197,6 +201,115 @@ public void testThrottleWhenSnapshotInProgress() { ); } + public void testYesWhenSnapshotInProgressButShardIsPausedDueToShutdown() { + + // need to have a shard in INIT state to avoid the fast-path + final var otherIndex = randomIdentifier(); + + final var clusterStateWithShutdownMetadata = SnapshotsInProgressSerializationTests.CLUSTER_STATE_FOR_NODE_SHUTDOWNS + .copyAndUpdateMetadata( + mdb -> mdb.putCustom( + NodesShutdownMetadata.TYPE, + new NodesShutdownMetadata( + Map.of( + nodeId, + SingleNodeShutdownMetadata.builder() + .setNodeId(nodeId) + .setType(SingleNodeShutdownMetadata.Type.REMOVE) + .setStartedAtMillis(randomNonNegativeLong()) + .setReason("test") + .build() + ) + ) + ) + ); + final var snapshotsInProgress = SnapshotsInProgress.EMPTY + // mark nodeID as shutting down for removal + .withUpdatedNodeIdsForRemoval(clusterStateWithShutdownMetadata) + // create a running snapshot with shardId paused + .withUpdatedEntriesForRepo( + repositoryName, + List.of( + SnapshotsInProgress.Entry.snapshot( + snapshot, + randomBoolean(), + randomBoolean(), + SnapshotsInProgress.State.STARTED, + Map.of( + shardId.getIndexName(), + new IndexId(shardId.getIndexName(), randomUUID()), + otherIndex, + new IndexId(otherIndex, randomUUID()) + ), + List.of(), + List.of(), + randomNonNegativeLong(), + randomNonNegativeLong(), + Map.of( + shardId, + new SnapshotsInProgress.ShardSnapshotStatus( + nodeId, + SnapshotsInProgress.ShardState.PAUSED_FOR_NODE_REMOVAL, + ShardGeneration.newGeneration(random()) + ), + new ShardId(otherIndex, randomUUID(), 0), + new SnapshotsInProgress.ShardSnapshotStatus( + nodeId, + SnapshotsInProgress.ShardState.INIT, + ShardGeneration.newGeneration(random()) + ) + ), + null, + Map.of(), + IndexVersion.current() + ) + ) + ); + + // if the node is marked for shutdown then the shard can move + + final var routingAllocationWithShutdownMetadata = new RoutingAllocation( + new AllocationDeciders(List.of(decider)), + ClusterState.builder(clusterStateWithShutdownMetadata).putCustom(SnapshotsInProgress.TYPE, snapshotsInProgress).build(), + ClusterInfo.EMPTY, + SnapshotShardSizeInfo.EMPTY, + randomNonNegativeLong() + ); + routingAllocationWithShutdownMetadata.setDebugMode(RoutingAllocation.DebugMode.ON); + + final var decisionWithShutdownMetadata = decider.canAllocate( + TestShardRouting.newShardRouting(shardId, nodeId, true, ShardRoutingState.STARTED), + null, + routingAllocationWithShutdownMetadata + ); + + assertEquals(Decision.Type.YES, decisionWithShutdownMetadata.type()); + assertEquals("the shard is not being snapshotted", decisionWithShutdownMetadata.getExplanation()); + + // if the node is not marked for shutdown then the shard is fixed in place + + final var routingAllocationWithoutShutdownMetadata = new RoutingAllocation( + new AllocationDeciders(List.of(decider)), + ClusterState.builder(ClusterName.DEFAULT).putCustom(SnapshotsInProgress.TYPE, snapshotsInProgress).build(), + ClusterInfo.EMPTY, + SnapshotShardSizeInfo.EMPTY, + randomNonNegativeLong() + ); + routingAllocationWithoutShutdownMetadata.setDebugMode(RoutingAllocation.DebugMode.ON); + + final var decisionWithoutShutdownMetadata = decider.canAllocate( + TestShardRouting.newShardRouting(shardId, nodeId, true, ShardRoutingState.STARTED), + null, + routingAllocationWithoutShutdownMetadata + ); + + assertEquals(Decision.Type.THROTTLE, decisionWithoutShutdownMetadata.type()); + assertThat( + decisionWithoutShutdownMetadata.getExplanation(), + Matchers.matchesRegex("waiting for snapshot .* of shard .* to complete on node .*") + ); + } + private ClusterState makeClusterState(ShardId shardId, SnapshotsInProgress.ShardState shardState) { return ClusterState.builder(ClusterName.DEFAULT) .putCustom(SnapshotsInProgress.TYPE, makeSnapshotsInProgress(shardId, shardState)) diff --git a/server/src/test/java/org/elasticsearch/common/geo/GeoBoundingBoxTests.java b/server/src/test/java/org/elasticsearch/common/geo/GeoBoundingBoxTests.java index 5e20f954d079d..e7b1404306920 100644 --- a/server/src/test/java/org/elasticsearch/common/geo/GeoBoundingBoxTests.java +++ b/server/src/test/java/org/elasticsearch/common/geo/GeoBoundingBoxTests.java @@ -29,10 +29,11 @@ public class GeoBoundingBoxTests extends ESTestCase { public void testInvalidParseInvalidWKT() throws IOException { XContentBuilder bboxBuilder = XContentFactory.jsonBuilder().startObject().field("wkt", "invalid").endObject(); - XContentParser parser = createParser(bboxBuilder); - parser.nextToken(); - ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> GeoBoundingBox.parseBoundingBox(parser)); - assertThat(e.getMessage(), equalTo("failed to parse WKT bounding box")); + try (XContentParser parser = createParser(bboxBuilder)) { + parser.nextToken(); + ElasticsearchParseException e = expectThrows(ElasticsearchParseException.class, () -> GeoBoundingBox.parseBoundingBox(parser)); + assertThat(e.getMessage(), equalTo("failed to parse WKT bounding box")); + } } public void testInvalidParsePoint() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/common/logging/HeaderWarningTests.java b/server/src/test/java/org/elasticsearch/common/logging/HeaderWarningTests.java index 491b3676911c8..997b076b328d9 100644 --- a/server/src/test/java/org/elasticsearch/common/logging/HeaderWarningTests.java +++ b/server/src/test/java/org/elasticsearch/common/logging/HeaderWarningTests.java @@ -318,8 +318,8 @@ public void testAddComplexWarning() { + ".monitoring-beats-mb => [.monitoring-beats-8-*],.monitoring-ent-search-mb => [.monitoring-ent-search-8-*]," + ".monitoring-es-mb => [.monitoring-es-8-*],.monitoring-kibana-mb => [.monitoring-kibana-8-*]," + ".monitoring-logstash-mb => [.monitoring-logstash-8-*],.profiling-ilm-lock => [.profiling-ilm-lock*]," - + ".slm-history => [.slm-history-6*],.watch-history-16 => [.watcher-history-16*]," - + "behavioral_analytics-events-default => [behavioral_analytics-events-*],ilm-history => [ilm-history-6*]," + + ".slm-history => [.slm-history-7*],.watch-history-16 => [.watcher-history-16*]," + + "behavioral_analytics-events-default => [behavioral_analytics-events-*],ilm-history => [ilm-history-7*]," + "logs => [logs-*-*],metrics => [metrics-*-*],profiling-events => [profiling-events*],profiling-executables => " + "[profiling-executables*],profiling-metrics => [profiling-metrics*],profiling-returnpads-private => " + "[.profiling-returnpads-private*],profiling-sq-executables => [.profiling-sq-executables*]," diff --git a/server/src/test/java/org/elasticsearch/common/settings/SettingsTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingsTests.java index 965f305c3c23f..c94ab22352741 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/SettingsTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingsTests.java @@ -507,8 +507,10 @@ public void testToAndFromXContent() throws IOException { builder.startObject(); settings.toXContent(builder, new ToXContent.MapParams(Collections.singletonMap(Settings.FLAT_SETTINGS_PARAM, "" + flatSettings))); builder.endObject(); - XContentParser parser = createParser(builder); - Settings build = Settings.fromXContent(parser); + Settings build; + try (XContentParser parser = createParser(builder)) { + build = Settings.fromXContent(parser); + } assertEquals(5, build.size()); assertEquals(Arrays.asList("1", "2", "3"), build.getAsList("foo.bar.baz")); assertEquals(2, build.getAsInt("foo.foobar", 0).intValue()); diff --git a/server/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java b/server/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java index 0cac71b6751ff..f6b310abac770 100644 --- a/server/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.gateway; import org.apache.lucene.index.CorruptIndexException; +import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ESAllocationTestCase; @@ -519,7 +520,14 @@ private RoutingAllocation routingAllocationWithOnePrimaryNoReplicas( .routingTable(routingTableBuilder.build()) .nodes(DiscoveryNodes.builder().add(node1).add(node2).add(node3)) .build(); - return new RoutingAllocation(deciders, state.mutableRoutingNodes(), state, null, null, System.nanoTime()); + return new RoutingAllocation( + deciders, + state.mutableRoutingNodes(), + state, + ClusterInfo.EMPTY, + SnapshotShardSizeInfo.EMPTY, + System.nanoTime() + ); } private void assertClusterHealthStatus(RoutingAllocation allocation, ClusterHealthStatus expectedStatus) { diff --git a/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java b/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java index 559d3fce9cebf..4f25e00f8c083 100644 --- a/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java +++ b/server/src/test/java/org/elasticsearch/http/AbstractHttpServerTransportTests.java @@ -1013,6 +1013,7 @@ public void testStopForceClosesConnectionDuringRequest() throws Exception { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103782") public void testStopClosesChannelAfterRequest() throws Exception { var grace = LONG_GRACE_PERIOD_MS; try (var noTimeout = LogExpectation.unexpectedTimeout(grace); var transport = new TestHttpServerTransport(gracePeriod(grace))) { diff --git a/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java b/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java index 409023afc4576..2a2986d974b0d 100644 --- a/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java +++ b/server/src/test/java/org/elasticsearch/http/DefaultRestChannelTests.java @@ -495,7 +495,7 @@ public void testTraceStopped() { executeRequest(Settings.EMPTY, "request-host"); - verify(tracer).setAttribute(argThat(id -> id.getRawId().startsWith("rest-")), eq("http.status_code"), eq(200L)); + verify(tracer).setAttribute(argThat(id -> id.getSpanId().startsWith("rest-")), eq("http.status_code"), eq(200L)); verify(tracer).stopTrace(any(RestRequest.class)); } diff --git a/server/src/test/java/org/elasticsearch/index/codec/PerFieldMapperCodecTests.java b/server/src/test/java/org/elasticsearch/index/codec/PerFieldMapperCodecTests.java index e2a2c72d3eae3..d3ad4dd8586d5 100644 --- a/server/src/test/java/org/elasticsearch/index/codec/PerFieldMapperCodecTests.java +++ b/server/src/test/java/org/elasticsearch/index/codec/PerFieldMapperCodecTests.java @@ -15,11 +15,14 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.MapperTestUtils; +import org.elasticsearch.index.codec.bloomfilter.ES87BloomFilterPostingsFormat; +import org.elasticsearch.index.codec.postings.ES812PostingsFormat; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.test.ESTestCase; import java.io.IOException; +import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; public class PerFieldMapperCodecTests extends ESTestCase { @@ -27,23 +30,29 @@ public class PerFieldMapperCodecTests extends ESTestCase { public void testUseBloomFilter() throws IOException { PerFieldMapperCodec perFieldMapperCodec = createCodec(false, randomBoolean(), false); assertThat(perFieldMapperCodec.useBloomFilter("_id"), is(true)); + assertThat(perFieldMapperCodec.getPostingsFormatForField("_id"), instanceOf(ES87BloomFilterPostingsFormat.class)); assertThat(perFieldMapperCodec.useBloomFilter("another_field"), is(false)); + assertThat(perFieldMapperCodec.getPostingsFormatForField("another_field"), instanceOf(ES812PostingsFormat.class)); } public void testUseBloomFilterWithTimestampFieldEnabled() throws IOException { PerFieldMapperCodec perFieldMapperCodec = createCodec(true, true, false); assertThat(perFieldMapperCodec.useBloomFilter("_id"), is(true)); + assertThat(perFieldMapperCodec.getPostingsFormatForField("_id"), instanceOf(ES87BloomFilterPostingsFormat.class)); assertThat(perFieldMapperCodec.useBloomFilter("another_field"), is(false)); + assertThat(perFieldMapperCodec.getPostingsFormatForField("another_field"), instanceOf(ES812PostingsFormat.class)); } public void testUseBloomFilterWithTimestampFieldEnabled_noTimeSeriesMode() throws IOException { PerFieldMapperCodec perFieldMapperCodec = createCodec(true, false, false); assertThat(perFieldMapperCodec.useBloomFilter("_id"), is(false)); + assertThat(perFieldMapperCodec.getPostingsFormatForField("_id"), instanceOf(ES812PostingsFormat.class)); } public void testUseBloomFilterWithTimestampFieldEnabled_disableBloomFilter() throws IOException { PerFieldMapperCodec perFieldMapperCodec = createCodec(true, true, true); assertThat(perFieldMapperCodec.useBloomFilter("_id"), is(false)); + assertThat(perFieldMapperCodec.getPostingsFormatForField("_id"), instanceOf(ES812PostingsFormat.class)); assertWarnings( "[index.bloom_filter_for_id_field.enabled] setting was deprecated in Elasticsearch and will be removed in a future release." ); diff --git a/server/src/test/java/org/elasticsearch/index/codec/postings/ES812PostingsFormatTests.java b/server/src/test/java/org/elasticsearch/index/codec/postings/ES812PostingsFormatTests.java new file mode 100644 index 0000000000000..b11ab47102288 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/codec/postings/ES812PostingsFormatTests.java @@ -0,0 +1,138 @@ +/* + * @notice + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Modifications copyright (C) 2022 Elasticsearch B.V. + */ +package org.elasticsearch.index.codec.postings; + +import org.apache.lucene.codecs.Codec; +import org.apache.lucene.codecs.CompetitiveImpactAccumulator; +import org.apache.lucene.codecs.lucene90.blocktree.FieldReader; +import org.apache.lucene.codecs.lucene90.blocktree.Stats; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.Impact; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.store.ByteArrayDataInput; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.tests.analysis.MockAnalyzer; +import org.apache.lucene.tests.index.BasePostingsFormatTestCase; +import org.apache.lucene.tests.util.TestUtil; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +public class ES812PostingsFormatTests extends BasePostingsFormatTestCase { + private final Codec codec = TestUtil.alwaysPostingsFormat(new ES812PostingsFormat()); + + @Override + protected Codec getCodec() { + return codec; + } + + /** Make sure the final sub-block(s) are not skipped. */ + public void testFinalBlock() throws Exception { + Directory d = newDirectory(); + IndexWriter w = new IndexWriter(d, new IndexWriterConfig(new MockAnalyzer(random()))); + for (int i = 0; i < 25; i++) { + Document doc = new Document(); + doc.add(newStringField("field", Character.toString((char) (97 + i)), Field.Store.NO)); + doc.add(newStringField("field", "z" + Character.toString((char) (97 + i)), Field.Store.NO)); + w.addDocument(doc); + } + w.forceMerge(1); + + DirectoryReader r = DirectoryReader.open(w); + assertEquals(1, r.leaves().size()); + FieldReader field = (FieldReader) r.leaves().get(0).reader().terms("field"); + // We should see exactly two blocks: one root block (prefix empty string) and one block for z* + // terms (prefix z): + Stats stats = field.getStats(); + assertEquals(0, stats.floorBlockCount); + assertEquals(2, stats.nonFloorBlockCount); + r.close(); + w.close(); + d.close(); + } + + public void testImpactSerialization() throws IOException { + // omit norms and omit freqs + doTestImpactSerialization(Collections.singletonList(new Impact(1, 1L))); + + // omit freqs + doTestImpactSerialization(Collections.singletonList(new Impact(1, 42L))); + // omit freqs with very large norms + doTestImpactSerialization(Collections.singletonList(new Impact(1, -100L))); + + // omit norms + doTestImpactSerialization(Collections.singletonList(new Impact(30, 1L))); + // omit norms with large freq + doTestImpactSerialization(Collections.singletonList(new Impact(500, 1L))); + + // freqs and norms, basic + doTestImpactSerialization( + Arrays.asList( + new Impact(1, 7L), + new Impact(3, 9L), + new Impact(7, 10L), + new Impact(15, 11L), + new Impact(20, 13L), + new Impact(28, 14L) + ) + ); + + // freqs and norms, high values + doTestImpactSerialization( + Arrays.asList( + new Impact(2, 2L), + new Impact(10, 10L), + new Impact(12, 50L), + new Impact(50, -100L), + new Impact(1000, -80L), + new Impact(1005, -3L) + ) + ); + } + + private void doTestImpactSerialization(List impacts) throws IOException { + CompetitiveImpactAccumulator acc = new CompetitiveImpactAccumulator(); + for (Impact impact : impacts) { + acc.add(impact.freq, impact.norm); + } + try (Directory dir = newDirectory()) { + try (IndexOutput out = dir.createOutput("foo", IOContext.DEFAULT)) { + ES812SkipWriter.writeImpacts(acc, out); + } + try (IndexInput in = dir.openInput("foo", IOContext.DEFAULT)) { + byte[] b = new byte[Math.toIntExact(in.length())]; + in.readBytes(b, 0, b.length); + List impacts2 = ES812ScoreSkipReader.readImpacts( + new ByteArrayDataInput(b), + new ES812ScoreSkipReader.MutableImpactList() + ); + assertEquals(impacts, impacts2); + } + } + } +} diff --git a/server/src/test/java/org/elasticsearch/index/codec/postings/ForUtilTests.java b/server/src/test/java/org/elasticsearch/index/codec/postings/ForUtilTests.java new file mode 100644 index 0000000000000..14e8d3344c3dc --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/codec/postings/ForUtilTests.java @@ -0,0 +1,99 @@ +/* + * @notice + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Modifications copyright (C) 2022 Elasticsearch B.V. + */ +package org.elasticsearch.index.codec.postings; + +import com.carrotsearch.randomizedtesting.generators.RandomNumbers; + +import org.apache.lucene.store.ByteBuffersDirectory; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.tests.util.LuceneTestCase; +import org.apache.lucene.tests.util.TestUtil; +import org.apache.lucene.util.ArrayUtil; +import org.apache.lucene.util.packed.PackedInts; + +import java.io.IOException; +import java.util.Arrays; + +public class ForUtilTests extends LuceneTestCase { + + public void testEncodeDecode() throws IOException { + final int iterations = RandomNumbers.randomIntBetween(random(), 50, 1000); + final int[] values = new int[iterations * ForUtil.BLOCK_SIZE]; + + for (int i = 0; i < iterations; ++i) { + final int bpv = TestUtil.nextInt(random(), 1, 31); + for (int j = 0; j < ForUtil.BLOCK_SIZE; ++j) { + values[i * ForUtil.BLOCK_SIZE + j] = RandomNumbers.randomIntBetween(random(), 0, (int) PackedInts.maxValue(bpv)); + } + } + + final Directory d = new ByteBuffersDirectory(); + final long endPointer; + + { + // encode + IndexOutput out = d.createOutput("test.bin", IOContext.DEFAULT); + final ForUtil forUtil = new ForUtil(); + + for (int i = 0; i < iterations; ++i) { + long[] source = new long[ForUtil.BLOCK_SIZE]; + long or = 0; + for (int j = 0; j < ForUtil.BLOCK_SIZE; ++j) { + source[j] = values[i * ForUtil.BLOCK_SIZE + j]; + or |= source[j]; + } + final int bpv = PackedInts.bitsRequired(or); + out.writeByte((byte) bpv); + forUtil.encode(source, bpv, out); + } + endPointer = out.getFilePointer(); + out.close(); + } + + { + // decode + IndexInput in = d.openInput("test.bin", IOContext.READONCE); + final ForUtil forUtil = new ForUtil(); + for (int i = 0; i < iterations; ++i) { + final int bitsPerValue = in.readByte(); + final long currentFilePointer = in.getFilePointer(); + final long[] restored = new long[ForUtil.BLOCK_SIZE]; + forUtil.decode(bitsPerValue, in, restored); + int[] ints = new int[ForUtil.BLOCK_SIZE]; + for (int j = 0; j < ForUtil.BLOCK_SIZE; ++j) { + ints[j] = Math.toIntExact(restored[j]); + } + assertArrayEquals( + Arrays.toString(ints), + ArrayUtil.copyOfSubArray(values, i * ForUtil.BLOCK_SIZE, (i + 1) * ForUtil.BLOCK_SIZE), + ints + ); + assertEquals(forUtil.numBytes(bitsPerValue), in.getFilePointer() - currentFilePointer); + } + assertEquals(endPointer, in.getFilePointer()); + in.close(); + } + + d.close(); + } +} diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 25d3298f82bd7..cbb713f01ec03 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -691,8 +691,8 @@ protected void commitIndexWriter(IndexWriter writer, Translog translog) throws I } @Override - public void flush(boolean force, boolean waitIfOngoing, ActionListener listener) throws EngineException { - super.flush(force, waitIfOngoing, listener); + protected void flushHoldingLock(boolean force, boolean waitIfOngoing, ActionListener listener) { + super.flushHoldingLock(force, waitIfOngoing, listener); postFlushSegmentInfoGen.set(getLastCommittedSegmentInfos().getGeneration()); assertThat(getPreCommitSegmentGeneration(), equalTo(preCommitGen.get())); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/BooleanScriptFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/BooleanScriptFieldTypeTests.java index d8f063ece35c0..053e4226b3d79 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/BooleanScriptFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/BooleanScriptFieldTypeTests.java @@ -347,65 +347,66 @@ public void testDualingQueries() throws IOException { try (Directory directory = newDirectory(); RandomIndexWriter iw = new RandomIndexWriter(random(), directory)) { List values = randomList(0, 2, ESTestCase::randomBoolean); String source = "{\"foo\": " + values + "}"; - XContentParser parser = createParser(JsonXContent.jsonXContent, source); - SourceToParse sourceToParse = new SourceToParse("test", new BytesArray(source), XContentType.JSON); - DocumentParserContext ctx = new TestDocumentParserContext(MappingLookup.EMPTY, sourceToParse) { - @Override - public XContentParser parser() { - return parser; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, source)) { + SourceToParse sourceToParse = new SourceToParse("test", new BytesArray(source), XContentType.JSON); + DocumentParserContext ctx = new TestDocumentParserContext(MappingLookup.EMPTY, sourceToParse) { + @Override + public XContentParser parser() { + return parser; + } + }; + ctx.doc().add(new StoredField("_source", new BytesRef(source))); + + ctx.parser().nextToken(); + ctx.parser().nextToken(); + ctx.parser().nextToken(); + while (ctx.parser().nextToken() != Token.END_ARRAY) { + ootb.parse(ctx); } - }; - ctx.doc().add(new StoredField("_source", new BytesRef(source))); - - ctx.parser().nextToken(); - ctx.parser().nextToken(); - ctx.parser().nextToken(); - while (ctx.parser().nextToken() != Token.END_ARRAY) { - ootb.parse(ctx); - } - iw.addDocument(ctx.doc()); - try (DirectoryReader reader = iw.getReader()) { - IndexSearcher searcher = newSearcher(reader); - assertSameCount( - searcher, - source, - "*", - simpleMappedFieldType().existsQuery(mockContext()), - ootb.fieldType().existsQuery(mockContext()) - ); - boolean term = randomBoolean(); - assertSameCount( - searcher, - source, - term, - simpleMappedFieldType().termQuery(term, mockContext()), - ootb.fieldType().termQuery(term, mockContext()) - ); - List terms = randomList(0, 3, ESTestCase::randomBoolean); - assertSameCount( - searcher, - source, - terms, - simpleMappedFieldType().termsQuery(terms, mockContext()), - ootb.fieldType().termsQuery(terms, mockContext()) - ); - boolean low; - boolean high; - if (randomBoolean()) { - low = high = randomBoolean(); - } else { - low = false; - high = true; + iw.addDocument(ctx.doc()); + try (DirectoryReader reader = iw.getReader()) { + IndexSearcher searcher = newSearcher(reader); + assertSameCount( + searcher, + source, + "*", + simpleMappedFieldType().existsQuery(mockContext()), + ootb.fieldType().existsQuery(mockContext()) + ); + boolean term = randomBoolean(); + assertSameCount( + searcher, + source, + term, + simpleMappedFieldType().termQuery(term, mockContext()), + ootb.fieldType().termQuery(term, mockContext()) + ); + List terms = randomList(0, 3, ESTestCase::randomBoolean); + assertSameCount( + searcher, + source, + terms, + simpleMappedFieldType().termsQuery(terms, mockContext()), + ootb.fieldType().termsQuery(terms, mockContext()) + ); + boolean low; + boolean high; + if (randomBoolean()) { + low = high = randomBoolean(); + } else { + low = false; + high = true; + } + boolean includeLow = randomBoolean(); + boolean includeHigh = randomBoolean(); + assertSameCount( + searcher, + source, + (includeLow ? "[" : "(") + low + "," + high + (includeHigh ? "]" : ")"), + simpleMappedFieldType().rangeQuery(low, high, includeLow, includeHigh, null, null, null, mockContext()), + ootb.fieldType().rangeQuery(low, high, includeLow, includeHigh, null, null, null, mockContext()) + ); } - boolean includeLow = randomBoolean(); - boolean includeHigh = randomBoolean(); - assertSameCount( - searcher, - source, - (includeLow ? "[" : "(") + low + "," + high + (includeHigh ? "]" : ")"), - simpleMappedFieldType().rangeQuery(low, high, includeLow, includeHigh, null, null, null, mockContext()), - ootb.fieldType().rangeQuery(low, high, includeLow, includeHigh, null, null, null, mockContext()) - ); } } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java index 1f473d0ade35b..229d16ab85aef 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/CompletionFieldMapperTests.java @@ -223,7 +223,10 @@ public void testTypeParsing() throws Exception { XContentBuilder builder = jsonBuilder().startObject(); fieldMapper.toXContent(builder, new ToXContent.MapParams(Map.of("include_defaults", "true"))).endObject(); builder.close(); - Map serializedMap = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)).map(); + Map serializedMap; + try (var parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder))) { + serializedMap = parser.map(); + } Map configMap = (Map) serializedMap.get("field"); assertThat(configMap.get("analyzer").toString(), is("simple")); assertThat(configMap.get("search_analyzer").toString(), is("standard")); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DocCountFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DocCountFieldMapperTests.java index 079a79938c310..c1fd872e89f45 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DocCountFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DocCountFieldMapperTests.java @@ -104,8 +104,10 @@ public void testSyntheticSourceMany() throws IOException { LeafStoredFieldLoader storedFieldLoader = StoredFieldLoader.empty().getLoader(leaf, docIds); for (int docId : docIds) { String source = sourceLoaderLeaf.source(storedFieldLoader, docId).internalSourceRef().utf8ToString(); - int doc = (int) JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, source).map().get("doc"); - assertThat("doc " + docId, source, equalTo("{\"_doc_count\":" + counts.get(doc) + ",\"doc\":" + doc + "}")); + try (var parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, source)) { + int doc = (int) parser.map().get("doc"); + assertThat("doc " + docId, source, equalTo("{\"_doc_count\":" + counts.get(doc) + ",\"doc\":" + doc + "}")); + } } } }); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DotExpandingXContentParserTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DotExpandingXContentParserTests.java index c55ffaaa70a16..f089648bce27f 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DotExpandingXContentParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DotExpandingXContentParserTests.java @@ -22,24 +22,32 @@ public class DotExpandingXContentParserTests extends ESTestCase { private void assertXContentMatches(String dotsExpanded, String withDots) throws IOException { - XContentParser inputParser = createParser(JsonXContent.jsonXContent, withDots); final ContentPath contentPath = new ContentPath(); - XContentParser expandedParser = DotExpandingXContentParser.expandDots(inputParser, contentPath); - expandedParser.allowDuplicateKeys(true); - - XContentBuilder actualOutput = XContentBuilder.builder(JsonXContent.jsonXContent).copyCurrentStructure(expandedParser); - assertEquals(dotsExpanded, Strings.toString(actualOutput)); - - XContentParser expectedParser = createParser(JsonXContent.jsonXContent, dotsExpanded); - expectedParser.allowDuplicateKeys(true); - XContentParser actualParser = DotExpandingXContentParser.expandDots(createParser(JsonXContent.jsonXContent, withDots), contentPath); - XContentParser.Token currentToken; - while ((currentToken = actualParser.nextToken()) != null) { - assertEquals(currentToken, expectedParser.nextToken()); - assertEquals(expectedParser.currentToken(), actualParser.currentToken()); - assertEquals(actualParser.currentToken().name(), expectedParser.currentName(), actualParser.currentName()); + try ( + XContentParser inputParser = createParser(JsonXContent.jsonXContent, withDots); + XContentParser expandedParser = DotExpandingXContentParser.expandDots(inputParser, contentPath) + ) { + expandedParser.allowDuplicateKeys(true); + + XContentBuilder actualOutput = XContentBuilder.builder(JsonXContent.jsonXContent).copyCurrentStructure(expandedParser); + assertEquals(dotsExpanded, Strings.toString(actualOutput)); + + try (XContentParser expectedParser = createParser(JsonXContent.jsonXContent, dotsExpanded)) { + expectedParser.allowDuplicateKeys(true); + try ( + var p = createParser(JsonXContent.jsonXContent, withDots); + XContentParser actualParser = DotExpandingXContentParser.expandDots(p, contentPath) + ) { + XContentParser.Token currentToken; + while ((currentToken = actualParser.nextToken()) != null) { + assertEquals(currentToken, expectedParser.nextToken()); + assertEquals(expectedParser.currentToken(), actualParser.currentToken()); + assertEquals(actualParser.currentToken().name(), expectedParser.currentName(), actualParser.currentName()); + } + assertNull(expectedParser.nextToken()); + } + } } - assertNull(expectedParser.nextToken()); } public void testEmbeddedObject() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldTypeTests.java index aa4dec379f085..8627a236d6401 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldTypeTests.java @@ -11,15 +11,19 @@ import org.apache.lucene.tests.geo.GeoTestUtil; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.SimpleFeatureFactory; +import org.elasticsearch.geometry.Point; +import org.elasticsearch.geometry.utils.WellKnownBinary; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.script.ScriptCompiler; -import org.hamcrest.Matchers; import java.io.IOException; +import java.nio.ByteOrder; import java.util.ArrayList; import java.util.List; import java.util.Map; +import static org.hamcrest.Matchers.equalTo; + public class GeoPointFieldTypeTests extends FieldTypeTestCase { public void testFetchSourceValue() throws IOException { @@ -36,31 +40,50 @@ public void testFetchSourceValue() throws IOException { Map otherJsonPoint = Map.of("type", "Point", "coordinates", List.of(30.0, 50.0)); String wktPoint = "POINT (42.0 27.1)"; String otherWktPoint = "POINT (30.0 50.0)"; + byte[] wkbPoint = WellKnownBinary.toWKB(new Point(42.0, 27.1), ByteOrder.LITTLE_ENDIAN); + byte[] otherWkbPoint = WellKnownBinary.toWKB(new Point(30.0, 50.0), ByteOrder.LITTLE_ENDIAN); // Test a single point in [lon, lat] array format. Object sourceValue = List.of(42.0, 27.1); assertEquals(List.of(jsonPoint), fetchSourceValue(mapper, sourceValue, null)); assertEquals(List.of(wktPoint), fetchSourceValue(mapper, sourceValue, "wkt")); + List wkb = fetchSourceValue(mapper, sourceValue, "wkb"); + assertThat(wkb.size(), equalTo(1)); + assertThat(wkb.get(0), equalTo(wkbPoint)); // Test a single point in "lat, lon" string format. sourceValue = "27.1,42.0"; assertEquals(List.of(jsonPoint), fetchSourceValue(mapper, sourceValue, null)); assertEquals(List.of(wktPoint), fetchSourceValue(mapper, sourceValue, "wkt")); + wkb = fetchSourceValue(mapper, sourceValue, "wkb"); + assertThat(wkb.size(), equalTo(1)); + assertThat(wkb.get(0), equalTo(wkbPoint)); // Test a list of points in [lon, lat] array format. sourceValue = List.of(List.of(42.0, 27.1), List.of(30.0, 50.0)); assertEquals(List.of(jsonPoint, otherJsonPoint), fetchSourceValue(mapper, sourceValue, null)); assertEquals(List.of(wktPoint, otherWktPoint), fetchSourceValue(mapper, sourceValue, "wkt")); + wkb = fetchSourceValue(mapper, sourceValue, "wkb"); + assertThat(wkb.size(), equalTo(2)); + assertThat(wkb.get(0), equalTo(wkbPoint)); + assertThat(wkb.get(1), equalTo(otherWkbPoint)); // Test a list of points in [lat,lon] array format with one malformed sourceValue = List.of(List.of(42.0, 27.1), List.of("a", "b"), List.of(30.0, 50.0)); assertEquals(List.of(jsonPoint, otherJsonPoint), fetchSourceValue(mapper, sourceValue, null)); assertEquals(List.of(wktPoint, otherWktPoint), fetchSourceValue(mapper, sourceValue, "wkt")); + wkb = fetchSourceValue(mapper, sourceValue, "wkb"); + assertThat(wkb.size(), equalTo(2)); + assertThat(wkb.get(0), equalTo(wkbPoint)); + assertThat(wkb.get(1), equalTo(otherWkbPoint)); // Test a single point in well-known text format. sourceValue = "POINT (42.0 27.1)"; assertEquals(List.of(jsonPoint), fetchSourceValue(mapper, sourceValue, null)); assertEquals(List.of(wktPoint), fetchSourceValue(mapper, sourceValue, "wkt")); + wkb = fetchSourceValue(mapper, sourceValue, "wkb"); + assertThat(wkb.size(), equalTo(1)); + assertThat(wkb.get(0), equalTo(wkbPoint)); // Test a malformed value sourceValue = "malformed"; @@ -71,9 +94,13 @@ public void testFetchSourceValue() throws IOException { if (ignoreMalformed) { assertEquals(List.of(jsonPoint), fetchSourceValue(mapper, sourceValue, null)); assertEquals(List.of(wktPoint), fetchSourceValue(mapper, sourceValue, "wkt")); + wkb = fetchSourceValue(mapper, sourceValue, "wkb"); + assertThat(wkb.size(), equalTo(1)); + assertThat(wkb.get(0), equalTo(wkbPoint)); } else { assertEquals(List.of(), fetchSourceValue(mapper, sourceValue, null)); assertEquals(List.of(), fetchSourceValue(mapper, sourceValue, "wkt")); + assertEquals(List.of(), fetchSourceValue(mapper, sourceValue, "wkb")); } // test single point in GeoJSON format @@ -110,13 +137,13 @@ public void testFetchVectorTile() throws IOException { final double lat = GeoTestUtil.nextLatitude(); final double lon = GeoTestUtil.nextLongitude(); List sourceValue = fetchSourceValue(mapper, List.of(lon, lat), mvtString); - assertThat(sourceValue.size(), Matchers.equalTo(1)); - assertThat(sourceValue.get(0), Matchers.equalTo(featureFactory.point(lon, lat))); + assertThat(sourceValue.size(), equalTo(1)); + assertThat(sourceValue.get(0), equalTo(featureFactory.point(lon, lat))); geoPoints.add(new GeoPoint(lat, lon)); values.add(List.of(lon, lat)); } List sourceValue = fetchSourceValue(mapper, values, mvtString); - assertThat(sourceValue.size(), Matchers.equalTo(1)); - assertThat(sourceValue.get(0), Matchers.equalTo(featureFactory.points(geoPoints))); + assertThat(sourceValue.size(), equalTo(1)); + assertThat(sourceValue.get(0), equalTo(featureFactory.points(geoPoints))); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/IgnoreMalformedStoredValuesTests.java b/server/src/test/java/org/elasticsearch/index/mapper/IgnoreMalformedStoredValuesTests.java index 60fe4c4a6d99f..b3684d420d728 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/IgnoreMalformedStoredValuesTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/IgnoreMalformedStoredValuesTests.java @@ -148,13 +148,13 @@ private static XContentParser ignoreMalformed(XContentType type, Object value) t private static StoredField ignoreMalformedStoredField(XContentType type, Object value) throws IOException { XContentBuilder b = XContentBuilder.builder(type.xContent()); b.startObject().field("name", value).endObject(); - XContentParser p = type.xContent().createParser(XContentParserConfiguration.EMPTY, BytesReference.bytes(b).streamInput()); - assertThat(p.nextToken(), equalTo(XContentParser.Token.START_OBJECT)); - assertThat(p.nextToken(), equalTo(XContentParser.Token.FIELD_NAME)); - assertThat(p.currentName(), equalTo("name")); - p.nextToken(); - - return IgnoreMalformedStoredValues.storedField("foo.name", p); + try (XContentParser p = type.xContent().createParser(XContentParserConfiguration.EMPTY, BytesReference.bytes(b).streamInput())) { + assertThat(p.nextToken(), equalTo(XContentParser.Token.START_OBJECT)); + assertThat(p.nextToken(), equalTo(XContentParser.Token.FIELD_NAME)); + assertThat(p.currentName(), equalTo("name")); + p.nextToken(); + return IgnoreMalformedStoredValues.storedField("foo.name", p); + } } private static XContentParser parserFrom(IgnoreMalformedStoredValues values, String fieldName) throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java index f683cb60c87c3..5601290fed5c7 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/SourceFieldMapperTests.java @@ -73,7 +73,7 @@ public void testNoFormat() throws Exception { ) ); - assertThat(XContentFactory.xContentType(doc.source().toBytesRef().bytes), equalTo(XContentType.JSON)); + assertThat(XContentHelper.xContentType(doc.source()), equalTo(XContentType.JSON)); doc = documentMapper.parse( new SourceToParse( diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 4d6e316b4b7d9..869143066bd7a 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -78,7 +78,6 @@ import org.elasticsearch.index.engine.DocIdSeqNoAndSource; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineConfig; -import org.elasticsearch.index.engine.EngineException; import org.elasticsearch.index.engine.EngineTestCase; import org.elasticsearch.index.engine.InternalEngine; import org.elasticsearch.index.engine.InternalEngineFactory; @@ -4165,13 +4164,13 @@ public void testMultiplePeriodicFlushesCanBeTriggeredBeforeTheyAreDurable() thro var flushExecutedBarrier = new CyclicBarrier(2); var shard = newStartedShard(true, indexSettings, config -> new InternalEngine(config) { @Override - public void flush(boolean force, boolean waitIfOngoing, ActionListener listener) throws EngineException { + protected void flushHoldingLock(boolean force, boolean waitIfOngoing, ActionListener listener) { if (shardStarted.get()) { - super.flush(force, waitIfOngoing, ActionListener.noop()); + super.flushHoldingLock(force, waitIfOngoing, ActionListener.noop()); pendingListeners.add(listener); safeAwait(flushExecutedBarrier); } else { - super.flush(force, waitIfOngoing, listener); + super.flushHoldingLock(force, waitIfOngoing, listener); } } }); diff --git a/server/src/test/java/org/elasticsearch/index/store/StoreTests.java b/server/src/test/java/org/elasticsearch/index/store/StoreTests.java index dfbfb737c9ab2..3eb4675d37e97 100644 --- a/server/src/test/java/org/elasticsearch/index/store/StoreTests.java +++ b/server/src/test/java/org/elasticsearch/index/store/StoreTests.java @@ -775,22 +775,22 @@ public void testStoreStats() throws IOException { final long localStoreSizeDelta = randomLongBetween(-initialStoreSize, initialStoreSize); final long reservedBytes = randomBoolean() ? StoreStats.UNKNOWN_RESERVED_BYTES : randomLongBetween(0L, Integer.MAX_VALUE); StoreStats stats = store.stats(reservedBytes, size -> size + localStoreSizeDelta); - assertEquals(initialStoreSize, stats.totalDataSetSize().getBytes()); - assertEquals(initialStoreSize + localStoreSizeDelta, stats.getSize().getBytes()); - assertEquals(reservedBytes, stats.getReservedSize().getBytes()); + assertEquals(initialStoreSize, stats.totalDataSetSizeInBytes()); + assertEquals(initialStoreSize + localStoreSizeDelta, stats.sizeInBytes()); + assertEquals(reservedBytes, stats.reservedSizeInBytes()); stats.add(null); - assertEquals(initialStoreSize, stats.totalDataSetSize().getBytes()); - assertEquals(initialStoreSize + localStoreSizeDelta, stats.getSize().getBytes()); - assertEquals(reservedBytes, stats.getReservedSize().getBytes()); + assertEquals(initialStoreSize, stats.totalDataSetSizeInBytes()); + assertEquals(initialStoreSize + localStoreSizeDelta, stats.sizeInBytes()); + assertEquals(reservedBytes, stats.reservedSizeInBytes()); final long otherStatsDataSetBytes = randomLongBetween(0L, Integer.MAX_VALUE); final long otherStatsLocalBytes = randomLongBetween(0L, Integer.MAX_VALUE); final long otherStatsReservedBytes = randomBoolean() ? StoreStats.UNKNOWN_RESERVED_BYTES : randomLongBetween(0L, Integer.MAX_VALUE); stats.add(new StoreStats(otherStatsLocalBytes, otherStatsDataSetBytes, otherStatsReservedBytes)); - assertEquals(initialStoreSize + otherStatsDataSetBytes, stats.totalDataSetSize().getBytes()); - assertEquals(initialStoreSize + otherStatsLocalBytes + localStoreSizeDelta, stats.getSize().getBytes()); - assertEquals(Math.max(reservedBytes, 0L) + Math.max(otherStatsReservedBytes, 0L), stats.getReservedSize().getBytes()); + assertEquals(initialStoreSize + otherStatsDataSetBytes, stats.totalDataSetSizeInBytes()); + assertEquals(initialStoreSize + otherStatsLocalBytes + localStoreSizeDelta, stats.sizeInBytes()); + assertEquals(Math.max(reservedBytes, 0L) + Math.max(otherStatsReservedBytes, 0L), stats.reservedSizeInBytes()); Directory dir = store.directory(); final long length; @@ -805,8 +805,8 @@ public void testStoreStats() throws IOException { assertTrue(numNonExtraFiles(store) > 0); stats = store.stats(0L, size -> size + localStoreSizeDelta); - assertEquals(initialStoreSize + length, stats.totalDataSetSize().getBytes()); - assertEquals(initialStoreSize + localStoreSizeDelta + length, stats.getSizeInBytes()); + assertEquals(initialStoreSize + length, stats.totalDataSetSizeInBytes()); + assertEquals(initialStoreSize + localStoreSizeDelta + length, stats.sizeInBytes()); deleteContent(store.directory()); IOUtils.close(store); diff --git a/server/src/test/java/org/elasticsearch/indices/TermsLookupTests.java b/server/src/test/java/org/elasticsearch/indices/TermsLookupTests.java index dc2b7614fb52c..8f4bb9d9e3c5d 100644 --- a/server/src/test/java/org/elasticsearch/indices/TermsLookupTests.java +++ b/server/src/test/java/org/elasticsearch/indices/TermsLookupTests.java @@ -63,14 +63,15 @@ public void testSerialization() throws IOException { } public void testXContentParsing() throws IOException { - XContentParser parser = createParser(JsonXContent.jsonXContent, """ - { "index" : "index", "id" : "id", "path" : "path", "routing" : "routing" }"""); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, """ + { "index" : "index", "id" : "id", "path" : "path", "routing" : "routing" }""")) { - TermsLookup tl = TermsLookup.parseTermsLookup(parser); - assertEquals("index", tl.index()); - assertEquals("id", tl.id()); - assertEquals("path", tl.path()); - assertEquals("routing", tl.routing()); + TermsLookup tl = TermsLookup.parseTermsLookup(parser); + assertEquals("index", tl.index()); + assertEquals("id", tl.id()); + assertEquals("path", tl.path()); + assertEquals("routing", tl.routing()); + } } public static TermsLookup randomTermsLookup() { diff --git a/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java b/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java index c20c9615573d6..a517d09b2aefe 100644 --- a/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java +++ b/server/src/test/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerServiceTests.java @@ -346,6 +346,7 @@ public void testParentTriggersG1GCBeforeBreaking() throws InterruptedException, AtomicReference> onOverLimit = new AtomicReference<>(leader -> {}); AtomicLong time = new AtomicLong(randomLongBetween(Long.MIN_VALUE / 2, Long.MAX_VALUE / 2)); long interval = randomLongBetween(1, 1000); + long fullGCInterval = randomLongBetween(500, 2000); final HierarchyCircuitBreakerService service = new HierarchyCircuitBreakerService( CircuitBreakerMetrics.NOOP, clusterSettings, @@ -357,6 +358,8 @@ public void testParentTriggersG1GCBeforeBreaking() throws InterruptedException, HierarchyCircuitBreakerService.createYoungGcCountSupplier(), time::get, interval, + fullGCInterval, + TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(30) ) { @@ -481,6 +484,7 @@ public void testG1OverLimitStrategyBreakOnMemory() { AtomicInteger leaderTriggerCount = new AtomicInteger(); AtomicInteger nonLeaderTriggerCount = new AtomicInteger(); long interval = randomLongBetween(1, 1000); + long fullGCInterval = randomLongBetween(500, 2000); AtomicLong memoryUsage = new AtomicLong(); HierarchyCircuitBreakerService.G1OverLimitStrategy strategy = new HierarchyCircuitBreakerService.G1OverLimitStrategy( @@ -489,6 +493,8 @@ public void testG1OverLimitStrategyBreakOnMemory() { () -> 0, time::get, interval, + fullGCInterval, + TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(30) ) { @Override @@ -535,6 +541,7 @@ public void testG1OverLimitStrategyBreakOnGcCount() { AtomicInteger leaderTriggerCount = new AtomicInteger(); AtomicInteger nonLeaderTriggerCount = new AtomicInteger(); long interval = randomLongBetween(1, 1000); + long fullGCInterval = randomLongBetween(500, 2000); AtomicLong memoryUsageCounter = new AtomicLong(); AtomicLong gcCounter = new AtomicLong(); LongSupplier memoryUsageSupplier = () -> { @@ -547,6 +554,8 @@ public void testG1OverLimitStrategyBreakOnGcCount() { gcCounter::incrementAndGet, time::get, interval, + fullGCInterval, + TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(30) ) { @@ -569,13 +578,15 @@ void overLimitTriggered(boolean leader) { assertThat(strategy.overLimit(input), sameInstance(input)); assertThat(leaderTriggerCount.get(), equalTo(1)); assertThat(gcCounter.get(), equalTo(2L)); - assertThat(memoryUsageCounter.get(), equalTo(2L)); // 1 before gc count break and 1 to get resulting memory usage. + // 1 before gc count break, 1 for full GC check and 1 to get resulting memory usage. + assertThat(memoryUsageCounter.get(), equalTo(3L)); } public void testG1OverLimitStrategyThrottling() throws InterruptedException, BrokenBarrierException, TimeoutException { AtomicLong time = new AtomicLong(randomLongBetween(Long.MIN_VALUE / 2, Long.MAX_VALUE / 2)); AtomicInteger leaderTriggerCount = new AtomicInteger(); long interval = randomLongBetween(1, 1000); + long fullGCInterval = randomLongBetween(500, 2000); AtomicLong memoryUsage = new AtomicLong(); HierarchyCircuitBreakerService.G1OverLimitStrategy strategy = new HierarchyCircuitBreakerService.G1OverLimitStrategy( JvmInfo.jvmInfo(), @@ -583,6 +594,8 @@ public void testG1OverLimitStrategyThrottling() throws InterruptedException, Bro () -> 0, time::get, interval, + fullGCInterval, + TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(30) ) { @@ -661,6 +674,8 @@ public void testG1LockTimeout() throws Exception { gcCounter::incrementAndGet, () -> 0, 1, + 1, + TimeValue.timeValueMillis(randomFrom(0, 5, 10)), TimeValue.timeValueMillis(randomFrom(0, 5, 10)) ) { diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index fd285ba8b239f..82fb694db6c66 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -784,10 +784,8 @@ void phase2( }; PlainActionFuture future = new PlainActionFuture<>(); - expectThrows(IndexShardRelocatedException.class, () -> { - handler.recoverToTarget(future); - future.actionGet(); - }); + handler.recoverToTarget(future); + expectThrows(IndexShardRelocatedException.class, future); assertFalse(phase1Called.get()); assertFalse(prepareTargetForTranslogCalled.get()); assertFalse(phase2Called.get()); diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestDocumentTests.java b/server/src/test/java/org/elasticsearch/ingest/IngestDocumentTests.java index 166b830d14301..71e90e8f4cc06 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestDocumentTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestDocumentTests.java @@ -339,7 +339,7 @@ public void testSimpleSetFieldValue() { } public void testSetFieldValueNullValue() { - ingestDocument.setFieldValue("new_field", null); + ingestDocument.setFieldValue("new_field", (Object) null); assertThat(ingestDocument.getSourceAndMetadata().containsKey("new_field"), equalTo(true)); assertThat(ingestDocument.getSourceAndMetadata().get("new_field"), nullValue()); } diff --git a/server/src/test/java/org/elasticsearch/ingest/ValueSourceTests.java b/server/src/test/java/org/elasticsearch/ingest/ValueSourceTests.java index 84b7db7301597..0f18d04d8ac9c 100644 --- a/server/src/test/java/org/elasticsearch/ingest/ValueSourceTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/ValueSourceTests.java @@ -45,7 +45,7 @@ public void testCopyDoesNotChangeProvidedMap() { IngestDocument ingestDocument = TestIngestDocument.emptyIngestDocument(); ingestDocument.setFieldValue( - new TestTemplateService.MockTemplateScript.Factory("field1"), + ingestDocument.renderTemplate(new TestTemplateService.MockTemplateScript.Factory("field1")), ValueSource.wrap(myPreciousMap, TestTemplateService.instance()) ); ingestDocument.removeField("field1.field2"); @@ -60,7 +60,7 @@ public void testCopyDoesNotChangeProvidedList() { IngestDocument ingestDocument = TestIngestDocument.emptyIngestDocument(); ingestDocument.setFieldValue( - new TestTemplateService.MockTemplateScript.Factory("field1"), + ingestDocument.renderTemplate(new TestTemplateService.MockTemplateScript.Factory("field1")), ValueSource.wrap(myPreciousList, TestTemplateService.instance()) ); ingestDocument.removeField("field1.0"); diff --git a/server/src/test/java/org/elasticsearch/plugins/ExtensionLoaderTests.java b/server/src/test/java/org/elasticsearch/plugins/ExtensionLoaderTests.java index 35f65ebedf9b9..f9647c27e0acb 100644 --- a/server/src/test/java/org/elasticsearch/plugins/ExtensionLoaderTests.java +++ b/server/src/test/java/org/elasticsearch/plugins/ExtensionLoaderTests.java @@ -20,10 +20,12 @@ import java.util.HashMap; import java.util.Locale; import java.util.Map; +import java.util.Optional; import java.util.ServiceLoader; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; @@ -68,21 +70,16 @@ public int getValue() { """, name, value); } - public void testNoProviderNullFallback() { - TestService service = ExtensionLoader.loadSingleton(ServiceLoader.load(TestService.class), () -> null); - assertThat(service, nullValue()); - } - public void testNoProvider() { - TestService service = ExtensionLoader.loadSingleton(ServiceLoader.load(TestService.class), () -> () -> 2); - assertThat(service, not(nullValue())); - assertThat(service.getValue(), equalTo(2)); + Optional service = ExtensionLoader.loadSingleton(ServiceLoader.load(TestService.class)); + assertThat(service.isEmpty(), is(true)); } public void testOneProvider() throws Exception { Map sources = Map.of("p.FooService", defineProvider("FooService", 1)); try (var loader = buildProviderJar(sources)) { - TestService service = ExtensionLoader.loadSingleton(ServiceLoader.load(TestService.class, loader.classloader()), () -> null); + TestService service = ExtensionLoader.loadSingleton(ServiceLoader.load(TestService.class, loader.classloader())) + .orElseThrow(AssertionError::new); assertThat(service, not(nullValue())); assertThat(service.getValue(), equalTo(1)); } @@ -98,7 +95,7 @@ public void testManyProviders() throws Exception { try (var loader = buildProviderJar(sources)) { var e = expectThrows( IllegalStateException.class, - () -> ExtensionLoader.loadSingleton(ServiceLoader.load(TestService.class, loader.classloader()), () -> null) + () -> ExtensionLoader.loadSingleton(ServiceLoader.load(TestService.class, loader.classloader())) ); assertThat(e.getMessage(), containsString("More than one extension found")); assertThat(e.getMessage(), containsString("TestService")); diff --git a/server/src/test/java/org/elasticsearch/repositories/IndexIdTests.java b/server/src/test/java/org/elasticsearch/repositories/IndexIdTests.java index 580fdaf6f7f7d..37ff5521c201f 100644 --- a/server/src/test/java/org/elasticsearch/repositories/IndexIdTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/IndexIdTests.java @@ -48,17 +48,20 @@ public void testXContent() throws IOException { IndexId indexId = new IndexId(randomAlphaOfLength(8), UUIDs.randomBase64UUID()); XContentBuilder builder = JsonXContent.contentBuilder(); indexId.toXContent(builder, ToXContent.EMPTY_PARAMS); - XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder)); - assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); - String name = null; - String id = null; - while (parser.nextToken() != XContentParser.Token.END_OBJECT) { - final String currentFieldName = parser.currentName(); - parser.nextToken(); - if (currentFieldName.equals(IndexId.NAME)) { - name = parser.text(); - } else if (currentFieldName.equals(IndexId.ID)) { - id = parser.text(); + String name; + String id; + try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder))) { + assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); + name = null; + id = null; + while (parser.nextToken() != XContentParser.Token.END_OBJECT) { + final String currentFieldName = parser.currentName(); + parser.nextToken(); + if (currentFieldName.equals(IndexId.NAME)) { + name = parser.text(); + } else if (currentFieldName.equals(IndexId.ID)) { + id = parser.text(); + } } } assertNotNull(name); diff --git a/server/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java b/server/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java index 0211397fdeee8..b5c6b28693b3a 100644 --- a/server/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/rest/BaseRestHandlerTests.java @@ -27,7 +27,6 @@ import java.util.HashMap; import java.util.List; import java.util.Set; -import java.util.concurrent.atomic.AtomicBoolean; import static org.hamcrest.core.StringContains.containsString; import static org.hamcrest.object.HasToString.hasToString; @@ -49,13 +48,13 @@ public void tearDown() throws Exception { threadPool.shutdown(); } - public void testOneUnconsumedParameters() throws Exception { - final AtomicBoolean executed = new AtomicBoolean(); + public void testOneUnconsumedParameters() { + final var restChannelConsumer = new TestRestChannelConsumer(); BaseRestHandler handler = new BaseRestHandler() { @Override - protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { request.param("consumed"); - return channel -> executed.set(true); + return restChannelConsumer; } @Override @@ -79,16 +78,17 @@ public List routes() { () -> handler.handleRequest(request, channel, mockClient) ); assertThat(e, hasToString(containsString("request [/] contains unrecognized parameter: [unconsumed]"))); - assertFalse(executed.get()); + assertFalse(restChannelConsumer.executed); + assertTrue(restChannelConsumer.closed); } - public void testMultipleUnconsumedParameters() throws Exception { - final AtomicBoolean executed = new AtomicBoolean(); + public void testMultipleUnconsumedParameters() { + final var restChannelConsumer = new TestRestChannelConsumer(); BaseRestHandler handler = new BaseRestHandler() { @Override - protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { request.param("consumed"); - return channel -> executed.set(true); + return restChannelConsumer; } @Override @@ -113,20 +113,21 @@ public List routes() { () -> handler.handleRequest(request, channel, mockClient) ); assertThat(e, hasToString(containsString("request [/] contains unrecognized parameters: [unconsumed-first], [unconsumed-second]"))); - assertFalse(executed.get()); + assertFalse(restChannelConsumer.executed); + assertTrue(restChannelConsumer.closed); } public void testUnconsumedParametersDidYouMean() throws Exception { - final AtomicBoolean executed = new AtomicBoolean(); + final var restChannelConsumer = new TestRestChannelConsumer(); BaseRestHandler handler = new BaseRestHandler() { @Override - protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { request.param("consumed"); request.param("field"); request.param("tokenizer"); request.param("very_close_to_parameter_1"); request.param("very_close_to_parameter_2"); - return channel -> executed.set(true); + return restChannelConsumer; } @Override @@ -171,16 +172,17 @@ public List routes() { ) ) ); - assertFalse(executed.get()); + assertFalse(restChannelConsumer.executed); + assertTrue(restChannelConsumer.closed); } public void testUnconsumedResponseParameters() throws Exception { - final AtomicBoolean executed = new AtomicBoolean(); + final var restChannelConsumer = new TestRestChannelConsumer(); BaseRestHandler handler = new BaseRestHandler() { @Override - protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { request.param("consumed"); - return channel -> executed.set(true); + return restChannelConsumer; } @Override @@ -205,15 +207,16 @@ public List routes() { RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams(params).build(); RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); handler.handleRequest(request, channel, mockClient); - assertTrue(executed.get()); + assertTrue(restChannelConsumer.executed); + assertTrue(restChannelConsumer.closed); } public void testDefaultResponseParameters() throws Exception { - final AtomicBoolean executed = new AtomicBoolean(); + final var restChannelConsumer = new TestRestChannelConsumer(); BaseRestHandler handler = new BaseRestHandler() { @Override - protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { - return channel -> executed.set(true); + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { + return restChannelConsumer; } @Override @@ -235,15 +238,16 @@ public List routes() { RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams(params).build(); RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); handler.handleRequest(request, channel, mockClient); - assertTrue(executed.get()); + assertTrue(restChannelConsumer.executed); + assertTrue(restChannelConsumer.closed); } public void testCatResponseParameters() throws Exception { - final AtomicBoolean executed = new AtomicBoolean(); + final var restChannelConsumer = new TestRestChannelConsumer(); AbstractCatAction handler = new AbstractCatAction() { @Override protected RestChannelConsumer doCatRequest(RestRequest request, NodeClient client) { - return channel -> executed.set(true); + return restChannelConsumer; } @Override @@ -279,16 +283,17 @@ public List routes() { RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams(params).build(); RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); handler.handleRequest(request, channel, mockClient); - assertTrue(executed.get()); + assertTrue(restChannelConsumer.executed); + assertTrue(restChannelConsumer.closed); } public void testConsumedBody() throws Exception { - final AtomicBoolean executed = new AtomicBoolean(); + final var restChannelConsumer = new TestRestChannelConsumer(); final BaseRestHandler handler = new BaseRestHandler() { @Override protected RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { request.content(); - return channel -> executed.set(true); + return restChannelConsumer; } @Override @@ -309,16 +314,17 @@ public List routes() { ).build(); final RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); handler.handleRequest(request, channel, mockClient); - assertTrue(executed.get()); + assertTrue(restChannelConsumer.executed); + assertTrue(restChannelConsumer.closed); } } public void testUnconsumedNoBody() throws Exception { - final AtomicBoolean executed = new AtomicBoolean(); + final var restChannelConsumer = new TestRestChannelConsumer(); final BaseRestHandler handler = new BaseRestHandler() { @Override protected RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - return channel -> executed.set(true); + return restChannelConsumer; } @Override @@ -335,15 +341,16 @@ public List routes() { final RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).build(); final RestChannel channel = new FakeRestChannel(request, randomBoolean(), 1); handler.handleRequest(request, channel, mockClient); - assertTrue(executed.get()); + assertTrue(restChannelConsumer.executed); + assertTrue(restChannelConsumer.closed); } public void testUnconsumedBody() throws IOException { - final AtomicBoolean executed = new AtomicBoolean(); + final var restChannelConsumer = new TestRestChannelConsumer(); final BaseRestHandler handler = new BaseRestHandler() { @Override protected RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - return channel -> executed.set(true); + return restChannelConsumer; } @Override @@ -368,7 +375,25 @@ public List routes() { () -> handler.handleRequest(request, channel, mockClient) ); assertThat(e, hasToString(containsString("request [GET /] does not support having a body"))); - assertFalse(executed.get()); + assertFalse(restChannelConsumer.executed); + assertTrue(restChannelConsumer.closed); + } + } + + private static class TestRestChannelConsumer implements BaseRestHandler.RestChannelConsumer { + boolean executed; + boolean closed; + + @Override + public void accept(RestChannel restChannel) { + assertFalse(executed); + executed = true; + } + + @Override + public void close() { + assertFalse(closed); + closed = true; } } diff --git a/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java b/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java index a531a74d956ee..9aa358123d282 100644 --- a/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java +++ b/server/src/test/java/org/elasticsearch/search/DefaultSearchContextTests.java @@ -395,8 +395,6 @@ public void testClearQueryCancellationsOnClose() throws IOException { when(indexShard.getThreadPool()).thenReturn(threadPool); IndexService indexService = mock(IndexService.class); - MapperService mapperService = mock(MapperService.class); - when(indexService.mapperService()).thenReturn(mapperService); try (Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { diff --git a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java index 99d2a81f72358..0c07a16b975c3 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -337,7 +337,8 @@ public void testSearchWhileIndexDeleted() throws InterruptedException { IndexShard indexShard = indexService.getShard(0); AtomicBoolean running = new AtomicBoolean(true); CountDownLatch startGun = new CountDownLatch(1); - Semaphore semaphore = new Semaphore(Integer.MAX_VALUE); + final int permitCount = 100; + Semaphore semaphore = new Semaphore(permitCount); ShardRouting routing = TestShardRouting.newShardRouting( indexShard.shardId(), randomAlphaOfLength(5), @@ -365,21 +366,20 @@ public void run() { IndexRequestBuilder indexRequestBuilder = prepareIndex("index").setSource("field", "value") .setRefreshPolicy(randomFrom(WriteRequest.RefreshPolicy.values())); try { - indexRequestBuilder.execute(new ActionListener() { - @Override - public void onResponse(DocWriteResponse indexResponse) { - semaphore.release(); - } - - @Override - public void onFailure(Exception e) { - semaphore.release(); - } - }); - } finally { + indexRequestBuilder.execute( + ActionListener.runBefore( + ActionListener.running(semaphore::release), + () -> indexRequestBuilder.request().decRef() + ) + ); + } catch (Exception e) { indexRequestBuilder.request().decRef(); + throw e; } } + prepareIndex("index").setSource("field", "value") + .setRefreshPolicy(randomFrom(WriteRequest.RefreshPolicy.values())) + .execute(ActionListener.running(semaphore::release)); } } }; @@ -447,7 +447,7 @@ public void onFailure(Exception e) { } finally { running.set(false); thread.join(); - semaphore.acquire(Integer.MAX_VALUE); + semaphore.acquire(permitCount); } assertEquals(0, service.getActiveContexts()); diff --git a/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/SearchTransportTelemetryTests.java b/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/SearchTransportTelemetryTests.java new file mode 100644 index 0000000000000..f435a9da382fb --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/TelemetryMetrics/SearchTransportTelemetryTests.java @@ -0,0 +1,137 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.TelemetryMetrics; + +import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.telemetry.Measurement; +import org.elasticsearch.telemetry.TestTelemetryPlugin; +import org.elasticsearch.test.ESIntegTestCase; + +import java.util.Collection; +import java.util.List; + +import static org.elasticsearch.action.search.SearchTransportAPMMetrics.DFS_ACTION_METRIC; +import static org.elasticsearch.action.search.SearchTransportAPMMetrics.FETCH_ID_ACTION_METRIC; +import static org.elasticsearch.action.search.SearchTransportAPMMetrics.FETCH_ID_SCROLL_ACTION_METRIC; +import static org.elasticsearch.action.search.SearchTransportAPMMetrics.FREE_CONTEXT_SCROLL_ACTION_METRIC; +import static org.elasticsearch.action.search.SearchTransportAPMMetrics.QUERY_ACTION_METRIC; +import static org.elasticsearch.action.search.SearchTransportAPMMetrics.QUERY_ID_ACTION_METRIC; +import static org.elasticsearch.action.search.SearchTransportAPMMetrics.QUERY_SCROLL_ACTION_METRIC; +import static org.elasticsearch.index.query.QueryBuilders.simpleQueryStringQuery; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertScrollResponsesAndHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHitsWithoutFailures; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 1) +public class SearchTransportTelemetryTests extends ESIntegTestCase { + @Override + protected Collection> nodePlugins() { + return List.of(TestTelemetryPlugin.class); + } + + @Override + protected int minimumNumberOfShards() { + return 2; + } + + @Override + protected int maximumNumberOfShards() { + return 7; + } + + @Override + protected int maximumNumberOfReplicas() { + return 0; + } + + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103781") + public void testSearchTransportMetricsDfsQueryThenFetch() throws InterruptedException { + var indexName = "test1"; + createIndex(indexName); + indexRandom(true, false, prepareIndex(indexName).setId("1").setSource("body", "foo")); + + assertSearchHitsWithoutFailures( + prepareSearch(indexName).setSearchType(SearchType.DFS_QUERY_THEN_FETCH).setQuery(simpleQueryStringQuery("foo")), + "1" + ); + assertEquals(getNumShards(indexName).numPrimaries, getNumberOfMeasurements(DFS_ACTION_METRIC)); + assertEquals(getNumShards(indexName).numPrimaries, getNumberOfMeasurements(QUERY_ID_ACTION_METRIC)); + assertNotEquals(0, getNumberOfMeasurements(FETCH_ID_ACTION_METRIC)); + resetMeter(); + } + + public void testSearchTransportMetricsQueryThenFetch() throws InterruptedException { + var indexName = "test2"; + createIndex(indexName); + indexRandom(true, false, prepareIndex(indexName).setId("1").setSource("body", "foo")); + + assertSearchHitsWithoutFailures( + prepareSearch(indexName).setSearchType(SearchType.QUERY_THEN_FETCH).setQuery(simpleQueryStringQuery("foo")), + "1" + ); + assertEquals(getNumShards(indexName).numPrimaries, getNumberOfMeasurements(QUERY_ACTION_METRIC)); + assertNotEquals(0, getNumberOfMeasurements(FETCH_ID_ACTION_METRIC)); + resetMeter(); + } + + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103810") + public void testSearchTransportMetricsScroll() throws InterruptedException { + var indexName = "test3"; + createIndex(indexName); + indexRandom( + true, + false, + prepareIndex(indexName).setId("1").setSource("body", "foo"), + prepareIndex(indexName).setId("2").setSource("body", "foo") + ); // getNumShards(indexName).numPrimaries + + assertScrollResponsesAndHitCount( + TimeValue.timeValueSeconds(60), + prepareSearch(indexName).setSearchType(SearchType.DFS_QUERY_THEN_FETCH).setSize(1).setQuery(simpleQueryStringQuery("foo")), + 2, + (respNum, response) -> { + if (respNum == 1) { + assertEquals(getNumShards(indexName).numPrimaries, getNumberOfMeasurements(DFS_ACTION_METRIC)); + assertEquals(getNumShards(indexName).numPrimaries, getNumberOfMeasurements(QUERY_ID_ACTION_METRIC)); + assertNotEquals(0, getNumberOfMeasurements(FETCH_ID_ACTION_METRIC)); + resetMeter(); + } else if (respNum == 2) { + assertEquals(getNumShards(indexName).numPrimaries, getNumberOfMeasurements(QUERY_SCROLL_ACTION_METRIC)); + assertNotEquals(0, getNumberOfMeasurements(FETCH_ID_SCROLL_ACTION_METRIC)); + } else { + resetMeter(); + } + } + ); + + assertEquals(getNumShards(indexName).numPrimaries, getNumberOfMeasurements(FREE_CONTEXT_SCROLL_ACTION_METRIC)); + resetMeter(); + } + + private void resetMeter() { + getTestTelemetryPlugin().resetMeter(); + } + + private TestTelemetryPlugin getTestTelemetryPlugin() { + return internalCluster().getDataNodeInstance(PluginsService.class).filterPlugins(TestTelemetryPlugin.class).toList().get(0); + } + + private long getNumberOfMeasurements(String attributeValue) { + final List measurements = getTestTelemetryPlugin().getLongHistogramMeasurement( + org.elasticsearch.action.search.SearchTransportAPMMetrics.SEARCH_ACTION_LATENCY_BASE_METRIC + ); + return measurements.stream() + .filter( + m -> m.attributes().get(org.elasticsearch.action.search.SearchTransportAPMMetrics.ACTION_ATTRIBUTE_NAME) == attributeValue + ) + .count(); + } +} diff --git a/server/src/test/java/org/elasticsearch/search/internal/ContextIndexSearcherTests.java b/server/src/test/java/org/elasticsearch/search/internal/ContextIndexSearcherTests.java index a4e52af5f43c2..d1a7e93efb075 100644 --- a/server/src/test/java/org/elasticsearch/search/internal/ContextIndexSearcherTests.java +++ b/server/src/test/java/org/elasticsearch/search/internal/ContextIndexSearcherTests.java @@ -40,6 +40,7 @@ import org.apache.lucene.search.KnnFloatVectorQuery; import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryVisitor; import org.apache.lucene.search.Scorable; @@ -515,6 +516,81 @@ public boolean isCacheable(LeafReaderContext ctx) { } } + public void testTimeoutOnRewriteStandalone() throws IOException { + try (Directory dir = newDirectory()) { + indexDocs(dir); + ThreadPoolExecutor executor = null; + try (DirectoryReader directoryReader = DirectoryReader.open(dir)) { + if (randomBoolean()) { + executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(randomIntBetween(2, 5)); + } + ContextIndexSearcher contextIndexSearcher = new ContextIndexSearcher( + directoryReader, + IndexSearcher.getDefaultSimilarity(), + IndexSearcher.getDefaultQueryCache(), + IndexSearcher.getDefaultQueryCachingPolicy(), + true, + executor, + executor == null ? -1 : executor.getMaximumPoolSize(), + 1 + ); + TestQuery testQuery = new TestQuery() { + @Override + public Query rewrite(IndexSearcher indexSearcher) { + contextIndexSearcher.throwTimeExceededException(); + assert false; + return null; + } + }; + Query rewrite = contextIndexSearcher.rewrite(testQuery); + assertThat(rewrite, instanceOf(MatchNoDocsQuery.class)); + assertEquals("MatchNoDocsQuery(\"rewrite timed out\")", rewrite.toString()); + assertTrue(contextIndexSearcher.timeExceeded()); + } finally { + if (executor != null) { + terminate(executor); + } + } + } + } + + public void testTimeoutOnRewriteDuringSearch() throws IOException { + try (Directory dir = newDirectory()) { + indexDocs(dir); + ThreadPoolExecutor executor = null; + try (DirectoryReader directoryReader = DirectoryReader.open(dir)) { + if (randomBoolean()) { + executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(randomIntBetween(2, 5)); + } + ContextIndexSearcher contextIndexSearcher = new ContextIndexSearcher( + directoryReader, + IndexSearcher.getDefaultSimilarity(), + IndexSearcher.getDefaultQueryCache(), + IndexSearcher.getDefaultQueryCachingPolicy(), + true, + executor, + executor == null ? -1 : executor.getMaximumPoolSize(), + 1 + ); + TestQuery testQuery = new TestQuery() { + @Override + public Query rewrite(IndexSearcher indexSearcher) { + contextIndexSearcher.throwTimeExceededException(); + assert false; + return null; + } + }; + Integer hitCount = contextIndexSearcher.search(testQuery, new TotalHitCountCollectorManager()); + assertEquals(0, hitCount.intValue()); + assertTrue(contextIndexSearcher.timeExceeded()); + } finally { + if (executor != null) { + terminate(executor); + } + } + } + } + private static class TestQuery extends Query { @Override public String toString(String field) { diff --git a/server/src/test/java/org/elasticsearch/search/query/ThrowingQueryBuilder.java b/server/src/test/java/org/elasticsearch/search/query/ThrowingQueryBuilder.java index cd3d195030c55..f42ca49dc14b9 100644 --- a/server/src/test/java/org/elasticsearch/search/query/ThrowingQueryBuilder.java +++ b/server/src/test/java/org/elasticsearch/search/query/ThrowingQueryBuilder.java @@ -67,7 +67,7 @@ public ThrowingQueryBuilder(StreamInput in) throws IOException { this.randomUID = in.readLong(); this.failure = in.readException(); this.shardId = in.readVInt(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_040)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { this.index = in.readOptionalString(); } else { this.index = null; @@ -79,7 +79,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeLong(randomUID); out.writeException(failure); out.writeVInt(shardId); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_040)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { out.writeOptionalString(index); } } diff --git a/server/src/test/java/org/elasticsearch/search/suggest/completion/QueryContextTestCase.java b/server/src/test/java/org/elasticsearch/search/suggest/completion/QueryContextTestCase.java index 48711a665c39e..8395fcce918d9 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/completion/QueryContextTestCase.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/completion/QueryContextTestCase.java @@ -34,11 +34,12 @@ public void testToXContext() throws IOException { QC toXContent = createTestModel(); XContentBuilder builder = XContentFactory.jsonBuilder(); toXContent.toXContent(builder, ToXContent.EMPTY_PARAMS); - XContentParser parser = createParser(builder); - parser.nextToken(); - QC fromXContext = fromXContent(parser); - assertEquals(toXContent, fromXContext); - assertEquals(toXContent.hashCode(), fromXContext.hashCode()); + try (XContentParser parser = createParser(builder)) { + parser.nextToken(); + QC fromXContext = fromXContent(parser); + assertEquals(toXContent, fromXContext); + assertEquals(toXContent.hashCode(), fromXContext.hashCode()); + } } } } diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index 5583f84086679..f181cfe9f0ca4 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -46,7 +46,6 @@ import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; import org.elasticsearch.action.admin.indices.mapping.put.TransportAutoPutMappingAction; import org.elasticsearch.action.admin.indices.mapping.put.TransportPutMappingAction; -import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresAction; import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction; import org.elasticsearch.action.bulk.BulkAction; import org.elasticsearch.action.bulk.BulkRequest; @@ -59,6 +58,7 @@ import org.elasticsearch.action.search.SearchPhaseController; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchTransportAPMMetrics; import org.elasticsearch.action.search.SearchTransportService; import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.ActionFilters; @@ -175,6 +175,7 @@ import org.elasticsearch.search.SearchService; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.fetch.FetchPhase; +import org.elasticsearch.telemetry.TelemetryProvider; import org.elasticsearch.telemetry.tracing.Tracer; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -2026,7 +2027,8 @@ protected void assertSnapshotOrGenericThread() { actionFilters, indexNameExpressionResolver, namedWriteableRegistry, - EmptySystemIndices.INSTANCE.getExecutorSelector() + EmptySystemIndices.INSTANCE.getExecutorSelector(), + new SearchTransportAPMMetrics(TelemetryProvider.NOOP.getMeterRegistry()) ) ); actions.put( @@ -2107,7 +2109,7 @@ protected void assertSnapshotOrGenericThread() { ) ); actions.put( - IndicesShardStoresAction.INSTANCE, + TransportIndicesShardStoresAction.TYPE, new TransportIndicesShardStoresAction( transportService, clusterService, diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java index 893242ccaa308..873a35aa49107 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java @@ -8,13 +8,19 @@ package org.elasticsearch.snapshots; +import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState.Custom; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.SnapshotsInProgress.Entry; import org.elasticsearch.cluster.SnapshotsInProgress.ShardState; import org.elasticsearch.cluster.SnapshotsInProgress.State; +import org.elasticsearch.cluster.metadata.NodesShutdownMetadata; +import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; @@ -38,6 +44,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -46,6 +53,10 @@ public class SnapshotsInProgressSerializationTests extends SimpleDiffableWireSerializationTestCase { + public static final ClusterState CLUSTER_STATE_FOR_NODE_SHUTDOWNS = ClusterState.builder(ClusterName.DEFAULT) + .putCompatibilityVersions("local", new CompatibilityVersions(TransportVersion.current(), Map.of())) + .build(); + @Override protected Custom createTestInstance() { int numberOfSnapshots = randomInt(10); @@ -53,9 +64,39 @@ protected Custom createTestInstance() { for (int i = 0; i < numberOfSnapshots; i++) { snapshotsInProgress = snapshotsInProgress.withAddedEntry(randomSnapshot()); } + + final var nodeIdsForRemoval = randomList(3, ESTestCase::randomUUID); + if (nodeIdsForRemoval.isEmpty() == false) { + snapshotsInProgress = snapshotsInProgress.withUpdatedNodeIdsForRemoval( + getClusterStateWithNodeShutdownMetadata(nodeIdsForRemoval) + ); + } + return snapshotsInProgress; } + private ClusterState getClusterStateWithNodeShutdownMetadata(List nodeIdsForRemoval) { + return CLUSTER_STATE_FOR_NODE_SHUTDOWNS.copyAndUpdateMetadata( + mdb -> mdb.putCustom( + NodesShutdownMetadata.TYPE, + new NodesShutdownMetadata( + nodeIdsForRemoval.stream() + .collect( + Collectors.toMap( + Function.identity(), + nodeId -> SingleNodeShutdownMetadata.builder() + .setType(SingleNodeShutdownMetadata.Type.REMOVE) + .setNodeId(nodeId) + .setStartedAtMillis(randomNonNegativeLong()) + .setReason(getTestName()) + .build() + ) + ) + ) + ) + ); + } + private Entry randomSnapshot() { Snapshot snapshot = new Snapshot("repo-" + randomInt(5), new SnapshotId(randomAlphaOfLength(10), randomAlphaOfLength(10))); boolean includeGlobalState = randomBoolean(); @@ -170,20 +211,30 @@ protected NamedWriteableRegistry getNamedWriteableRegistry() { @Override protected Custom mutateInstance(Custom instance) { final SnapshotsInProgress snapshotsInProgress = (SnapshotsInProgress) instance; - if (snapshotsInProgress.isEmpty()) { - // add or remove an entry - return snapshotsInProgress.withAddedEntry(randomSnapshot()); + if (randomBoolean()) { + if (snapshotsInProgress.isEmpty()) { + // add an entry + return snapshotsInProgress.withAddedEntry(randomSnapshot()); + } else { + // mutate or remove an entry + final String repo = randomFrom( + snapshotsInProgress.asStream().map(SnapshotsInProgress.Entry::repository).collect(Collectors.toSet()) + ); + final List forRepo = snapshotsInProgress.forRepo(repo); + int index = randomIntBetween(0, forRepo.size() - 1); + Entry entry = forRepo.get(index); + final List updatedEntries = new ArrayList<>(forRepo); + if (randomBoolean()) { + updatedEntries.set(index, mutateEntry(entry)); + } else { + updatedEntries.remove(index); + } + return snapshotsInProgress.withUpdatedEntriesForRepo(repo, updatedEntries); + } } else { - // mutate an entry - final String repo = randomFrom( - snapshotsInProgress.asStream().map(SnapshotsInProgress.Entry::repository).collect(Collectors.toSet()) + return snapshotsInProgress.withUpdatedNodeIdsForRemoval( + getClusterStateWithNodeShutdownMetadata(randomList(1, 3, ESTestCase::randomUUID)) ); - final List forRepo = snapshotsInProgress.forRepo(repo); - int index = randomIntBetween(0, forRepo.size() - 1); - Entry entry = forRepo.get(index); - final List updatedEntries = new ArrayList<>(forRepo); - updatedEntries.set(index, mutateEntry(entry)); - return snapshotsInProgress.withUpdatedEntriesForRepo(repo, updatedEntries); } } @@ -414,9 +465,27 @@ public void testXContent() throws IOException { null, IndexVersion.current() ) - ); + ) + .withUpdatedNodeIdsForRemoval( + CLUSTER_STATE_FOR_NODE_SHUTDOWNS.copyAndUpdateMetadata( + b -> b.putCustom( + NodesShutdownMetadata.TYPE, + new NodesShutdownMetadata( + Map.of( + "node-id", + SingleNodeShutdownMetadata.builder() + .setNodeId("node-id") + .setType(SingleNodeShutdownMetadata.Type.REMOVE) + .setStartedAtMillis(randomNonNegativeLong()) + .setReason("test") + .build() + ) + ) + ) + ) + ); - AbstractChunkedSerializingTestCase.assertChunkCount(sip, instance -> Math.toIntExact(instance.asStream().count() + 2)); + AbstractChunkedSerializingTestCase.assertChunkCount(sip, instance -> Math.toIntExact(instance.asStream().count() + 5)); final var json = Strings.toString(sip, false, true); assertThat( json, @@ -467,7 +536,8 @@ public void testXContent() throws IOException { "feature_states": [], "data_streams": [] } - ] + ], + "node_ids_for_removal":["node-id"] }""")), // or the shards might be in the other order: equalTo(XContentHelper.stripWhitespace(""" @@ -516,7 +586,8 @@ public void testXContent() throws IOException { "feature_states": [], "data_streams": [] } - ] + ], + "node_ids_for_removal":["node-id"] }""")) ) ); @@ -532,6 +603,6 @@ public static State randomState(Map st.completed() || st == ShardState.ABORTED)) { return State.ABORTED; } - return randomFrom(State.STARTED, State.INIT); + return State.STARTED; } } diff --git a/server/src/test/java/org/elasticsearch/transport/ProxyConnectionStrategyTests.java b/server/src/test/java/org/elasticsearch/transport/ProxyConnectionStrategyTests.java index ead43d0bac05e..b3c7c5adac95d 100644 --- a/server/src/test/java/org/elasticsearch/transport/ProxyConnectionStrategyTests.java +++ b/server/src/test/java/org/elasticsearch/transport/ProxyConnectionStrategyTests.java @@ -130,7 +130,11 @@ public void testProxyStrategyWillOpenExpectedNumberOfConnectionsToAddress() { ); int numOfConnections = randomIntBetween(4, 8); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); ProxyConnectionStrategy strategy = new ProxyConnectionStrategy( clusterAlias, localService, @@ -188,7 +192,11 @@ public void testProxyStrategyWillOpenNewConnectionsOnDisconnect() throws Excepti AtomicBoolean useAddress1 = new AtomicBoolean(true); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); ProxyConnectionStrategy strategy = new ProxyConnectionStrategy( clusterAlias, localService, @@ -263,7 +271,11 @@ public void testConnectFailsWithIncompatibleNodes() { ); int numOfConnections = randomIntBetween(4, 8); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); ProxyConnectionStrategy strategy = new ProxyConnectionStrategy( clusterAlias, localService, @@ -328,7 +340,11 @@ public void testConnectFailsWithNonRetryableException() { ); int numOfConnections = randomIntBetween(4, 8); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); ProxyConnectionStrategy strategy = new ProxyConnectionStrategy( clusterAlias, localService, @@ -388,7 +404,11 @@ public void testClusterNameValidationPreventConnectingToDifferentClusters() thro AtomicBoolean useAddress1 = new AtomicBoolean(true); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); ProxyConnectionStrategy strategy = new ProxyConnectionStrategy( clusterAlias, localService, @@ -459,7 +479,11 @@ public void testProxyStrategyWillResolveAddressesEachConnect() throws Exception ); int numOfConnections = randomIntBetween(4, 8); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); ProxyConnectionStrategy strategy = new ProxyConnectionStrategy( clusterAlias, localService, @@ -511,7 +535,11 @@ public void onNodeConnected(DiscoveryNode node, Transport.Connection connection) }); try ( - var remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + var remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); var strategy = new ProxyConnectionStrategy( clusterAlias, localService, @@ -554,7 +582,11 @@ public void testProxyStrategyWillNeedToBeRebuiltIfNumOfSocketsOrAddressesOrServe ); int numOfConnections = randomIntBetween(4, 8); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); ProxyConnectionStrategy strategy = new ProxyConnectionStrategy( clusterAlias, localService, @@ -672,7 +704,11 @@ public void testServerNameAttributes() { ); int numOfConnections = randomIntBetween(4, 8); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); ProxyConnectionStrategy strategy = new ProxyConnectionStrategy( clusterAlias, localService, diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java index d4f03f1027838..dee28d6dea630 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java @@ -45,7 +45,6 @@ import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.TestThreadPool; @@ -62,6 +61,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.Objects; import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; @@ -155,17 +155,14 @@ public static MockTransportService startTransport( } else { searchHits = new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), Float.NaN); } - InternalSearchResponse response = new InternalSearchResponse( + SearchResponse searchResponse = new SearchResponse( searchHits, InternalAggregations.EMPTY, null, - null, false, null, - 1 - ); - SearchResponse searchResponse = new SearchResponse( - response, + null, + 1, null, 1, 1, @@ -252,7 +249,14 @@ public void run() { AtomicReference exceptionReference = new AtomicReference<>(); String clusterAlias = "test-cluster"; Settings settings = buildRandomSettings(clusterAlias, addresses(seedNode)); - try (RemoteClusterConnection connection = new RemoteClusterConnection(settings, clusterAlias, service, randomBoolean())) { + try ( + RemoteClusterConnection connection = new RemoteClusterConnection( + settings, + clusterAlias, + service, + randomFrom(RemoteClusterCredentialsManager.EMPTY, buildCredentialsManager(clusterAlias)) + ) + ) { ActionListener listener = ActionListener.wrap(x -> { listenerCalled.countDown(); fail("expected exception"); @@ -322,7 +326,14 @@ public void testCloseWhileConcurrentlyConnecting() throws IOException, Interrupt service.acceptIncomingRequests(); String clusterAlias = "test-cluster"; Settings settings = buildRandomSettings(clusterAlias, seedNodes); - try (RemoteClusterConnection connection = new RemoteClusterConnection(settings, clusterAlias, service, false)) { + try ( + RemoteClusterConnection connection = new RemoteClusterConnection( + settings, + clusterAlias, + service, + RemoteClusterCredentialsManager.EMPTY + ) + ) { int numThreads = randomIntBetween(4, 10); Thread[] threads = new Thread[numThreads]; CyclicBarrier barrier = new CyclicBarrier(numThreads + 1); @@ -470,7 +481,12 @@ private void doTestGetConnectionInfo(boolean hasClusterCredentials) throws Excep settings = Settings.builder().put(settings).setSecureSettings(secureSettings).build(); } try ( - RemoteClusterConnection connection = new RemoteClusterConnection(settings, clusterAlias, service, hasClusterCredentials) + RemoteClusterConnection connection = new RemoteClusterConnection( + settings, + clusterAlias, + service, + hasClusterCredentials ? buildCredentialsManager(clusterAlias) : RemoteClusterCredentialsManager.EMPTY + ) ) { // test no nodes connected RemoteConnectionInfo remoteConnectionInfo = assertSerialization(connection.getConnectionInfo()); @@ -662,7 +678,12 @@ private void doTestCollectNodes(boolean hasClusterCredentials) throws Exception } try ( - RemoteClusterConnection connection = new RemoteClusterConnection(settings, clusterAlias, service, hasClusterCredentials) + RemoteClusterConnection connection = new RemoteClusterConnection( + settings, + clusterAlias, + service, + hasClusterCredentials ? buildCredentialsManager(clusterAlias) : RemoteClusterCredentialsManager.EMPTY + ) ) { CountDownLatch responseLatch = new CountDownLatch(1); AtomicReference> reference = new AtomicReference<>(); @@ -713,7 +734,14 @@ public void testNoChannelsExceptREG() throws Exception { String clusterAlias = "test-cluster"; Settings settings = buildRandomSettings(clusterAlias, addresses(seedNode)); - try (RemoteClusterConnection connection = new RemoteClusterConnection(settings, clusterAlias, service, false)) { + try ( + RemoteClusterConnection connection = new RemoteClusterConnection( + settings, + clusterAlias, + service, + RemoteClusterCredentialsManager.EMPTY + ) + ) { PlainActionFuture plainActionFuture = new PlainActionFuture<>(); connection.ensureConnected(plainActionFuture); plainActionFuture.get(10, TimeUnit.SECONDS); @@ -779,7 +807,14 @@ public void testConnectedNodesConcurrentAccess() throws IOException, Interrupted String clusterAlias = "test-cluster"; Settings settings = buildRandomSettings(clusterAlias, seedNodes); - try (RemoteClusterConnection connection = new RemoteClusterConnection(settings, clusterAlias, service, randomBoolean())) { + try ( + RemoteClusterConnection connection = new RemoteClusterConnection( + settings, + clusterAlias, + service, + randomFrom(RemoteClusterCredentialsManager.EMPTY, buildCredentialsManager(clusterAlias)) + ) + ) { final int numGetThreads = randomIntBetween(4, 10); final Thread[] getThreads = new Thread[numGetThreads]; final int numModifyingThreads = randomIntBetween(4, 10); @@ -873,7 +908,14 @@ public void testGetConnection() throws Exception { service.acceptIncomingRequests(); String clusterAlias = "test-cluster"; Settings settings = buildRandomSettings(clusterAlias, addresses(seedNode)); - try (RemoteClusterConnection connection = new RemoteClusterConnection(settings, clusterAlias, service, false)) { + try ( + RemoteClusterConnection connection = new RemoteClusterConnection( + settings, + clusterAlias, + service, + RemoteClusterCredentialsManager.EMPTY + ) + ) { PlainActionFuture.get(fut -> connection.ensureConnected(fut.map(x -> null))); for (int i = 0; i < 10; i++) { // always a direct connection as the remote node is already connected @@ -921,4 +963,13 @@ private static Settings buildSniffSettings(String clusterAlias, List see ); return builder.build(); } + + private static RemoteClusterCredentialsManager buildCredentialsManager(String clusterAlias) { + Objects.requireNonNull(clusterAlias); + final Settings.Builder builder = Settings.builder(); + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("cluster.remote." + clusterAlias + ".credentials", randomAlphaOfLength(20)); + builder.setSecureSettings(secureSettings); + return new RemoteClusterCredentialsManager(builder.build()); + } } diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterCredentialsManagerTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterCredentialsManagerTests.java new file mode 100644 index 0000000000000..f02148a40e47e --- /dev/null +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterCredentialsManagerTests.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +public class RemoteClusterCredentialsManagerTests extends ESTestCase { + public void testResolveRemoteClusterCredentials() { + final String clusterAlias = randomAlphaOfLength(9); + final String otherClusterAlias = randomAlphaOfLength(10); + + final String secret = randomAlphaOfLength(20); + final Settings settings = buildSettingsWithCredentials(clusterAlias, secret); + RemoteClusterCredentialsManager credentialsManager = new RemoteClusterCredentialsManager(settings); + assertThat(credentialsManager.resolveCredentials(clusterAlias).toString(), equalTo(secret)); + assertThat(credentialsManager.hasCredentials(otherClusterAlias), is(false)); + + final String updatedSecret = randomAlphaOfLength(21); + credentialsManager.updateClusterCredentials(buildSettingsWithCredentials(clusterAlias, updatedSecret)); + assertThat(credentialsManager.resolveCredentials(clusterAlias).toString(), equalTo(updatedSecret)); + + credentialsManager.updateClusterCredentials(Settings.EMPTY); + assertThat(credentialsManager.hasCredentials(clusterAlias), is(false)); + } + + private Settings buildSettingsWithCredentials(String clusterAlias, String secret) { + final Settings.Builder builder = Settings.builder(); + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("cluster.remote." + clusterAlias + ".credentials", secret); + return builder.setSecureSettings(secureSettings).build(); + } +} diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteConnectionManagerTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteConnectionManagerTests.java index 839138d3c7c34..b1ffda669e6a1 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteConnectionManagerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteConnectionManagerTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -23,17 +24,20 @@ import java.io.IOException; import java.net.InetAddress; import java.util.HashSet; +import java.util.Optional; import java.util.Set; import java.util.concurrent.ExecutionException; import static org.elasticsearch.transport.RemoteClusterService.REMOTE_CLUSTER_HANDSHAKE_ACTION_NAME; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; import static org.hamcrest.core.IsInstanceOf.instanceOf; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class RemoteConnectionManagerTests extends ESTestCase { @@ -49,6 +53,7 @@ public void setUp() throws Exception { transport = mock(Transport.class); remoteConnectionManager = new RemoteConnectionManager( "remote-cluster", + RemoteClusterCredentialsManager.EMPTY, new ClusterConnectionManager(Settings.EMPTY, transport, new ThreadContext(Settings.EMPTY)) ); @@ -120,10 +125,13 @@ public void testResolveRemoteClusterAlias() throws ExecutionException, Interrupt public void testRewriteHandshakeAction() throws IOException { final Transport.Connection connection = mock(Transport.Connection.class); + final String clusterAlias = randomAlphaOfLengthBetween(3, 8); + final RemoteClusterCredentialsManager credentialsResolver = mock(RemoteClusterCredentialsManager.class); + when(credentialsResolver.resolveCredentials(clusterAlias)).thenReturn(new SecureString(randomAlphaOfLength(42))); final Transport.Connection wrappedConnection = RemoteConnectionManager.wrapConnectionWithRemoteClusterInfo( connection, - randomAlphaOfLengthBetween(3, 8), - RemoteClusterPortSettings.REMOTE_CLUSTER_PROFILE + clusterAlias, + credentialsResolver ); final long requestId = randomLong(); final TransportRequest request = mock(TransportRequest.class); @@ -142,6 +150,26 @@ public void testRewriteHandshakeAction() throws IOException { verify(connection).sendRequest(requestId, anotherAction, request, options); } + public void testWrapAndResolveConnectionRoundTrip() { + final Transport.Connection connection = mock(Transport.Connection.class); + final String clusterAlias = randomAlphaOfLengthBetween(3, 8); + final RemoteClusterCredentialsManager credentialsResolver = mock(RemoteClusterCredentialsManager.class); + final SecureString credentials = new SecureString(randomAlphaOfLength(42)); + // second credential will never be resolved + when(credentialsResolver.resolveCredentials(clusterAlias)).thenReturn(credentials, (SecureString) null); + final Transport.Connection wrappedConnection = RemoteConnectionManager.wrapConnectionWithRemoteClusterInfo( + connection, + clusterAlias, + credentialsResolver + ); + + final Optional actual = RemoteConnectionManager + .resolveRemoteClusterAliasWithCredentials(wrappedConnection); + + assertThat(actual.isPresent(), is(true)); + assertThat(actual.get(), equalTo(new RemoteConnectionManager.RemoteClusterAliasWithCredentials(clusterAlias, credentials))); + } + private static class TestRemoteConnection extends CloseableConnection { private final DiscoveryNode node; diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteConnectionStrategyTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteConnectionStrategyTests.java index 5d461e906a266..ca9986ba5eb1f 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteConnectionStrategyTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteConnectionStrategyTests.java @@ -26,7 +26,11 @@ public void testStrategyChangeMeansThatStrategyMustBeRebuilt() { mock(Transport.class), threadContext ); - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager("cluster-alias", connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + "cluster-alias", + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); FakeConnectionStrategy first = new FakeConnectionStrategy( "cluster-alias", mock(TransportService.class), @@ -46,7 +50,11 @@ public void testSameStrategyChangeMeansThatStrategyDoesNotNeedToBeRebuilt() { mock(Transport.class), threadContext ); - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager("cluster-alias", connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + "cluster-alias", + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); FakeConnectionStrategy first = new FakeConnectionStrategy( "cluster-alias", mock(TransportService.class), @@ -69,7 +77,11 @@ public void testChangeInConnectionProfileMeansTheStrategyMustBeRebuilt() { assertEquals(TimeValue.MINUS_ONE, connectionManager.getConnectionProfile().getPingInterval()); assertEquals(Compression.Enabled.INDEXING_DATA, connectionManager.getConnectionProfile().getCompressionEnabled()); assertEquals(Compression.Scheme.LZ4, connectionManager.getConnectionProfile().getCompressionScheme()); - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager("cluster-alias", connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + "cluster-alias", + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); FakeConnectionStrategy first = new FakeConnectionStrategy( "cluster-alias", mock(TransportService.class), diff --git a/server/src/test/java/org/elasticsearch/transport/SniffConnectionStrategyTests.java b/server/src/test/java/org/elasticsearch/transport/SniffConnectionStrategyTests.java index 3c955258d45c8..ddee1ff4d690a 100644 --- a/server/src/test/java/org/elasticsearch/transport/SniffConnectionStrategyTests.java +++ b/server/src/test/java/org/elasticsearch/transport/SniffConnectionStrategyTests.java @@ -192,7 +192,11 @@ public void testSniffStrategyWillConnectToAndDiscoverNodes() { threadPool.getThreadContext() ); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + hasClusterCredentials ? new RemoteClusterCredentialsManager(clientSettings) : RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); SniffConnectionStrategy strategy = new SniffConnectionStrategy( clusterAlias, localService, @@ -262,7 +266,11 @@ public void testSniffStrategyWillResolveDiscoveryNodesEachConnect() throws Excep threadPool.getThreadContext() ); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); SniffConnectionStrategy strategy = new SniffConnectionStrategy( clusterAlias, localService, @@ -336,7 +344,11 @@ public void testSniffStrategyWillConnectToMaxAllowedNodesAndOpenNewConnectionsOn threadPool.getThreadContext() ); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); SniffConnectionStrategy strategy = new SniffConnectionStrategy( clusterAlias, localService, @@ -424,7 +436,11 @@ public void testDiscoverWithSingleIncompatibleSeedNode() { threadPool.getThreadContext() ); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); SniffConnectionStrategy strategy = new SniffConnectionStrategy( clusterAlias, localService, @@ -486,7 +502,11 @@ public void testConnectFailsWithIncompatibleNodes() { threadPool.getThreadContext() ); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); SniffConnectionStrategy strategy = new SniffConnectionStrategy( clusterAlias, localService, @@ -549,7 +569,11 @@ public void testFilterNodesWithNodePredicate() { threadPool.getThreadContext() ); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); SniffConnectionStrategy strategy = new SniffConnectionStrategy( clusterAlias, localService, @@ -617,7 +641,11 @@ public void testConnectFailsIfNoConnectionsOpened() { threadPool.getThreadContext() ); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); SniffConnectionStrategy strategy = new SniffConnectionStrategy( clusterAlias, localService, @@ -694,7 +722,11 @@ public void testClusterNameValidationPreventConnectingToDifferentClusters() thro threadPool.getThreadContext() ); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); SniffConnectionStrategy strategy = new SniffConnectionStrategy( clusterAlias, localService, @@ -783,7 +815,11 @@ public void testMultipleCallsToConnectEnsuresConnection() { threadPool.getThreadContext() ); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); SniffConnectionStrategy strategy = new SniffConnectionStrategy( clusterAlias, localService, @@ -895,7 +931,11 @@ public void testConfiguredProxyAddressModeWillReplaceNodeAddress() { threadPool.getThreadContext() ); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); SniffConnectionStrategy strategy = new SniffConnectionStrategy( clusterAlias, localService, @@ -964,7 +1004,11 @@ public void testSniffStrategyWillNeedToBeRebuiltIfNumOfConnectionsOrSeedsOrProxy threadPool.getThreadContext() ); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); SniffConnectionStrategy strategy = new SniffConnectionStrategy( clusterAlias, localService, diff --git a/server/src/test/java/org/elasticsearch/transport/TransportInfoTests.java b/server/src/test/java/org/elasticsearch/transport/TransportInfoTests.java index 8e23f0e3984b9..261a4ba339c18 100644 --- a/server/src/test/java/org/elasticsearch/transport/TransportInfoTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TransportInfoTests.java @@ -70,7 +70,10 @@ private void assertPublishAddress(TransportInfo httpInfo, String expected) throw httpInfo.toXContent(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); - Map transportMap = (Map) createParser(builder).map().get(TransportInfo.Fields.TRANSPORT); + Map transportMap; + try (var parser = createParser(builder)) { + transportMap = (Map) parser.map().get(TransportInfo.Fields.TRANSPORT); + } Map profilesMap = (Map) transportMap.get("profiles"); assertEquals(expected, transportMap.get(TransportInfo.Fields.PUBLISH_ADDRESS)); assertEquals(expected, ((Map) profilesMap.get("test_profile")).get(TransportInfo.Fields.PUBLISH_ADDRESS)); diff --git a/settings.gradle b/settings.gradle index 90422913ef441..ce35c873f176e 100644 --- a/settings.gradle +++ b/settings.gradle @@ -100,7 +100,6 @@ List projects = [ 'test:fixtures:testcontainer-utils', 'test:fixtures:geoip-fixture', 'test:fixtures:url-fixture', - 'test:fixtures:nginx-fixture', 'test:logger-usage', 'test:test-clusters', 'test:x-content', diff --git a/test/external-modules/error-query/src/main/java/org/elasticsearch/test/errorquery/IndexError.java b/test/external-modules/error-query/src/main/java/org/elasticsearch/test/errorquery/IndexError.java index 19284152efab6..92b05ec9bf649 100644 --- a/test/external-modules/error-query/src/main/java/org/elasticsearch/test/errorquery/IndexError.java +++ b/test/external-modules/error-query/src/main/java/org/elasticsearch/test/errorquery/IndexError.java @@ -53,7 +53,7 @@ public IndexError(StreamInput in) throws IOException { this.shardIds = in.readBoolean() ? in.readIntArray() : null; this.errorType = in.readEnum(ERROR_TYPE.class); this.message = in.readString(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_051)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { this.stallTimeSeconds = in.readVInt(); } else { this.stallTimeSeconds = 0; @@ -69,7 +69,7 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeEnum(errorType); out.writeString(message); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_051)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { out.writeVInt(stallTimeSeconds); } } diff --git a/test/fixtures/minio-fixture/build.gradle b/test/fixtures/minio-fixture/build.gradle index 9a71387d7c6b7..3c97315dc07ce 100644 --- a/test/fixtures/minio-fixture/build.gradle +++ b/test/fixtures/minio-fixture/build.gradle @@ -11,26 +11,20 @@ apply plugin: 'elasticsearch.cache-test-fixtures' description = 'Fixture for MinIO Storage service' configurations.all { - transitive = false + exclude group: 'org.hamcrest', module: 'hamcrest-core' } - dependencies { - testImplementation project(':test:framework') - + testImplementation (project(':test:framework')) api "junit:junit:${versions.junit}" api project(':test:fixtures:testcontainer-utils') - api "org.testcontainers:testcontainers:${versions.testcontainer}" - implementation "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" - implementation "org.slf4j:slf4j-api:${versions.slf4j}" - implementation "com.github.docker-java:docker-java-api:${versions.dockerJava}" - - runtimeOnly "com.github.docker-java:docker-java-transport-zerodep:${versions.dockerJava}" - runtimeOnly "com.github.docker-java:docker-java-transport:${versions.dockerJava}" - runtimeOnly "com.github.docker-java:docker-java-core:${versions.dockerJava}" - runtimeOnly "org.apache.commons:commons-compress:${versions.commonsCompress}" - runtimeOnly "org.rnorth.duct-tape:duct-tape:${versions.ductTape}" + api("org.testcontainers:testcontainers:${versions.testcontainer}") { + transitive = false + } + api("com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}"){ + transitive = false + } // ensure we have proper logging during when used in tests runtimeOnly "org.slf4j:slf4j-simple:${versions.slf4j}" - runtimeOnly "org.hamcrest:hamcrest:${versions.hamcrest}" + runtimeOnly("org.hamcrest:hamcrest:${versions.hamcrest}") } diff --git a/test/fixtures/nginx-fixture/Dockerfile b/test/fixtures/nginx-fixture/Dockerfile deleted file mode 100644 index 01bad77c488c8..0000000000000 --- a/test/fixtures/nginx-fixture/Dockerfile +++ /dev/null @@ -1,2 +0,0 @@ -FROM nginx -COPY nginx.conf /etc/nginx/nginx.conf diff --git a/test/fixtures/nginx-fixture/build.gradle b/test/fixtures/nginx-fixture/build.gradle deleted file mode 100644 index 438473f70a6f2..0000000000000 --- a/test/fixtures/nginx-fixture/build.gradle +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0 and the Server Side Public License, v 1; you may not use this file except - * in compliance with, at your election, the Elastic License 2.0 or the Server - * Side Public License, v 1. - */ -apply plugin: 'elasticsearch.test.fixtures' - -description = 'Fixture for an external http service' - -// These directories are shared between the URL repository and the FS repository in integration tests -project.ext { - fsRepositoryDir = file("${testFixturesDir}/fs-repository") -} - -tasks.named("preProcessFixture").configure { - doLast { - // tests expect to have an empty repo - project.ext.fsRepositoryDir.mkdirs() - } -} diff --git a/test/fixtures/nginx-fixture/docker-compose.yml b/test/fixtures/nginx-fixture/docker-compose.yml deleted file mode 100644 index bf6ab56bb0c9a..0000000000000 --- a/test/fixtures/nginx-fixture/docker-compose.yml +++ /dev/null @@ -1,9 +0,0 @@ -version: '3' -services: - nginx-fixture: - build: - context: . - volumes: - - ./testfixtures_shared/fs-repository:/data - ports: - - "80" diff --git a/test/fixtures/nginx-fixture/nginx.conf b/test/fixtures/nginx-fixture/nginx.conf deleted file mode 100644 index 9b199b2dc48b7..0000000000000 --- a/test/fixtures/nginx-fixture/nginx.conf +++ /dev/null @@ -1,10 +0,0 @@ -events {} - -http { - server { - listen 80 default_server; - listen [::]:80 default_server; - - root /data; - } -} diff --git a/test/fixtures/testcontainer-utils/build.gradle b/test/fixtures/testcontainer-utils/build.gradle index 80886d99087c9..3766722abcd65 100644 --- a/test/fixtures/testcontainer-utils/build.gradle +++ b/test/fixtures/testcontainer-utils/build.gradle @@ -1,6 +1,5 @@ apply plugin: 'elasticsearch.java' - configurations.all { transitive = false } @@ -10,6 +9,14 @@ dependencies { api "junit:junit:${versions.junit}" api "org.testcontainers:testcontainers:${versions.testcontainer}" implementation "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" + api "com.github.docker-java:docker-java-api:${versions.dockerJava}" implementation "org.slf4j:slf4j-api:${versions.slf4j}" - implementation "com.github.docker-java:docker-java-api:${versions.dockerJava}" + runtimeOnly "com.github.docker-java:docker-java-transport-zerodep:${versions.dockerJava}" + runtimeOnly "com.github.docker-java:docker-java-transport:${versions.dockerJava}" + runtimeOnly "com.github.docker-java:docker-java-core:${versions.dockerJava}" + runtimeOnly "org.apache.commons:commons-compress:${versions.commonsCompress}" + runtimeOnly "org.rnorth.duct-tape:duct-tape:${versions.ductTape}" + runtimeOnly "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" + runtimeOnly "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" + } diff --git a/test/fixtures/testcontainer-utils/src/main/java/org/elasticsearch/test/fixtures/testcontainers/TestContainersThreadFilter.java b/test/fixtures/testcontainer-utils/src/main/java/org/elasticsearch/test/fixtures/testcontainers/TestContainersThreadFilter.java index f36a3264bffbb..d825330120eec 100644 --- a/test/fixtures/testcontainer-utils/src/main/java/org/elasticsearch/test/fixtures/testcontainers/TestContainersThreadFilter.java +++ b/test/fixtures/testcontainer-utils/src/main/java/org/elasticsearch/test/fixtures/testcontainers/TestContainersThreadFilter.java @@ -19,6 +19,6 @@ public class TestContainersThreadFilter implements ThreadFilter { public boolean reject(Thread t) { return t.getName().startsWith("testcontainers-") || t.getName().startsWith("ducttape") - || t.getName().startsWith("ForkJoinPool.commonPool-worker-1"); + || t.getName().startsWith("ForkJoinPool.commonPool-worker-"); } } diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java b/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java index bb86dab60b0eb..1004ea5b50119 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/MockInternalClusterInfoService.java @@ -107,7 +107,7 @@ ShardStats[] adjustShardStats(ShardStats[] shardsStats) { var storeStats = new StoreStats( shardSizeFunctionCopy.apply(shardRouting), shardSizeFunctionCopy.apply(shardRouting), - shardStats.getStats().store.getReservedSize().getBytes() + shardStats.getStats().store.reservedSizeInBytes() ); var commonStats = new CommonStats(new CommonStatsFlags(CommonStatsFlags.Flag.Store)); commonStats.store = storeStats; diff --git a/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java b/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java index 0a0592b5a01f2..5f6e50a7c83e0 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java +++ b/test/framework/src/main/java/org/elasticsearch/common/util/MockBigArrays.java @@ -702,5 +702,11 @@ public void addWithoutBreaking(long bytes) { public long getUsed() { return used.get(); } + + @Override + public String toString() { + long u = used.get(); + return "LimitedBreaker[" + u + "/" + max.getBytes() + "][" + ByteSizeValue.ofBytes(u) + "/" + max + "]"; + } } } diff --git a/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java b/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java index d1835459ab932..66f536fd378cb 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java +++ b/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java @@ -327,12 +327,15 @@ public static SearchSourceBuilder randomSearchSourceBuilder( } jsonBuilder.endArray(); jsonBuilder.endObject(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(XContentParserConfiguration.EMPTY, BytesReference.bytes(jsonBuilder).streamInput()); - parser.nextToken(); - parser.nextToken(); - parser.nextToken(); - builder.searchAfter(SearchAfterBuilder.fromXContent(parser).getSortValues()); + try ( + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(XContentParserConfiguration.EMPTY, BytesReference.bytes(jsonBuilder).streamInput()) + ) { + parser.nextToken(); + parser.nextToken(); + parser.nextToken(); + builder.searchAfter(SearchAfterBuilder.fromXContent(parser).getSortValues()); + } } catch (IOException e) { throw new RuntimeException("Error building search_from", e); } diff --git a/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java b/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java index 589bc76c55a3d..3950683ca0c9d 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java @@ -9,6 +9,12 @@ import org.apache.lucene.search.TotalHits; import org.elasticsearch.action.search.SearchRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.client.Response; +import org.elasticsearch.test.rest.ESRestTestCase; + +import java.io.IOException; public enum SearchResponseUtils { ; @@ -25,4 +31,37 @@ public static TotalHits getTotalHits(SearchRequestBuilder request) { public static long getTotalHitsValue(SearchRequestBuilder request) { return getTotalHits(request).value; } + + public static SearchResponse responseAsSearchResponse(Response searchResponse) throws IOException { + try (var parser = ESRestTestCase.responseAsParser(searchResponse)) { + return SearchResponse.fromXContent(parser); + } + } + + public static SearchResponse emptyWithTotalHits( + String scrollId, + int totalShards, + int successfulShards, + int skippedShards, + long tookInMillis, + ShardSearchFailure[] shardFailures, + SearchResponse.Clusters clusters + ) { + return new SearchResponse( + SearchHits.EMPTY_WITH_TOTAL_HITS, + null, + null, + false, + null, + null, + 1, + scrollId, + totalShards, + successfulShards, + skippedShards, + tookInMillis, + shardFailures, + clusters + ); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java index c03058f22da5d..3b347c50671cc 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java @@ -51,11 +51,12 @@ public void testFromXContent() throws IOException { } factoriesBuilder.toXContent(builder, ToXContent.EMPTY_PARAMS); XContentBuilder shuffled = shuffleXContent(builder); - XContentParser parser = createParser(shuffled); - AggregationBuilder newAgg = parse(parser); - assertNotSame(newAgg, testAgg); - assertEquals(testAgg, newAgg); - assertEquals(testAgg.hashCode(), newAgg.hashCode()); + try (XContentParser parser = createParser(shuffled)) { + AggregationBuilder newAgg = parse(parser); + assertNotSame(newAgg, testAgg); + assertEquals(testAgg, newAgg); + assertEquals(testAgg.hashCode(), newAgg.hashCode()); + } } public void testSupportsConcurrentExecution() { @@ -85,10 +86,12 @@ public void testFromXContentMulti() throws IOException { } factoriesBuilder.toXContent(builder, ToXContent.EMPTY_PARAMS); XContentBuilder shuffled = shuffleXContent(builder); - XContentParser parser = createParser(shuffled); - assertSame(XContentParser.Token.START_OBJECT, parser.nextToken()); - AggregatorFactories.Builder parsed = AggregatorFactories.parseAggregators(parser); + AggregatorFactories.Builder parsed; + try (XContentParser parser = createParser(shuffled)) { + assertSame(XContentParser.Token.START_OBJECT, parser.nextToken()); + parsed = AggregatorFactories.parseAggregators(parser); + } assertThat(parsed.getAggregatorFactories(), hasSize(testAggs.size())); assertThat(parsed.getPipelineAggregatorFactories(), hasSize(0)); @@ -127,8 +130,10 @@ public void testSerializationMulti() throws IOException { public void testToString() throws IOException { AB testAgg = createTestAggregatorBuilder(); String toString = randomBoolean() ? Strings.toString(testAgg) : testAgg.toString(); - XContentParser parser = createParser(XContentType.JSON.xContent(), toString); - AggregationBuilder newAgg = parse(parser); + AggregationBuilder newAgg; + try (XContentParser parser = createParser(XContentType.JSON.xContent(), toString)) { + newAgg = parse(parser); + } assertNotSame(newAgg, testAgg); assertEquals(testAgg, newAgg); assertEquals(testAgg.hashCode(), newAgg.hashCode()); diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/AbstractSignificanceHeuristicTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/AbstractSignificanceHeuristicTestCase.java index 3967a86ea7065..519b67aae556e 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/AbstractSignificanceHeuristicTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/bucket/AbstractSignificanceHeuristicTestCase.java @@ -197,8 +197,10 @@ public void testParseFromAggBuilder() throws IOException { stBuilder.significanceHeuristic(significanceHeuristic).field("text").minDocCount(200); XContentBuilder stXContentBuilder = XContentFactory.jsonBuilder(); stBuilder.internalXContent(stXContentBuilder, null); - XContentParser stParser = createParser(JsonXContent.jsonXContent, Strings.toString(stXContentBuilder)); - SignificanceHeuristic parsedHeuristic = parseSignificanceHeuristic(stParser); + SignificanceHeuristic parsedHeuristic; + try (XContentParser stParser = createParser(JsonXContent.jsonXContent, Strings.toString(stXContentBuilder))) { + parsedHeuristic = parseSignificanceHeuristic(stParser); + } assertThat(significanceHeuristic, equalTo(parsedHeuristic)); } diff --git a/test/framework/src/main/java/org/elasticsearch/search/geo/BasePointShapeQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/geo/BasePointShapeQueryTestCase.java index b79f9ca3ab903..e73f52e827a14 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/geo/BasePointShapeQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/geo/BasePointShapeQueryTestCase.java @@ -390,7 +390,8 @@ public void testQueryWithinMultiLine() throws Exception { try { client().prepareSearch(defaultIndexName) .setQuery(queryBuilder().shapeQuery(defaultFieldName, multiline).relation(ShapeRelation.WITHIN)) - .get(); + .get() + .decRef(); } catch (SearchPhaseExecutionException e) { assertThat(e.getCause().getMessage(), containsString("Field [" + defaultFieldName + "] found an unsupported shape Line")); } diff --git a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index aa4eb9db723ec..55b057327763a 100644 --- a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -405,24 +405,25 @@ protected String initWithSnapshotVersion(String repoName, Path repoPath, IndexVe oldVersionString = currentVersionString.replace(",\"index_version\":" + IndexVersion.current(), "") .replace(",\"version\":\"8.11.0\"", ",\"version\":\"" + Version.fromId(version.id()) + "\""); } - final RepositoryData downgradedRepoData = RepositoryData.snapshotsFromXContent( - JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, oldVersionString), - repositoryData.getGenId(), - randomBoolean() - ); + final RepositoryData downgradedRepoData; + try (var parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, oldVersionString)) { + downgradedRepoData = RepositoryData.snapshotsFromXContent(parser, repositoryData.getGenId(), randomBoolean()); + } Files.write( repoPath.resolve(BlobStoreRepository.INDEX_FILE_PREFIX + repositoryData.getGenId()), BytesReference.toBytes(BytesReference.bytes(downgradedRepoData.snapshotsToXContent(XContentFactory.jsonBuilder(), version))), StandardOpenOption.TRUNCATE_EXISTING ); - final SnapshotInfo downgradedSnapshotInfo = SnapshotInfo.fromXContentInternal( - repoName, - JsonXContent.jsonXContent.createParser( + final SnapshotInfo downgradedSnapshotInfo; + try ( + var parser = JsonXContent.jsonXContent.createParser( XContentParserConfiguration.EMPTY, Strings.toString(snapshotInfo, ChecksumBlobStoreFormat.SNAPSHOT_ONLY_FORMAT_PARAMS) .replace(IndexVersion.current().toString(), version.toString()) ) - ); + ) { + downgradedSnapshotInfo = SnapshotInfo.fromXContentInternal(repoName, parser); + } final BlobStoreRepository blobStoreRepository = getRepositoryOnMaster(repoName); PlainActionFuture.get( f -> blobStoreRepository.threadPool() @@ -506,7 +507,7 @@ protected void indexRandomDocs(String index, int numdocs) throws InterruptedExce protected long getCountForIndex(String indexName) { return SearchResponseUtils.getTotalHitsValue( - client().prepareSearch(indexName).setSource(new SearchSourceBuilder().size(0).trackTotalHits(true)) + prepareSearch(indexName).setSource(new SearchSourceBuilder().size(0).trackTotalHits(true)) ); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java index 0a3316b87bd04..d3833fdb3a778 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java @@ -152,10 +152,14 @@ public void testFromXContent() throws IOException { randomBoolean(), shuffleProtectedFields() ); - assertParsedQuery(createParser(xContentType.xContent(), shuffledXContent), testQuery); + try (var parser = createParser(xContentType.xContent(), shuffledXContent)) { + assertParsedQuery(parser, testQuery); + } for (Map.Entry alternateVersion : getAlternateVersions().entrySet()) { String queryAsString = alternateVersion.getKey(); - assertParsedQuery(createParser(JsonXContent.jsonXContent, queryAsString), alternateVersion.getValue()); + try (var parser = createParser(JsonXContent.jsonXContent, queryAsString)) { + assertParsedQuery(parser, alternateVersion.getValue()); + } } } } @@ -424,12 +428,15 @@ private void assertParsedQuery(XContentParser parser, QueryBuilder expectedQuery protected QueryBuilder parseQuery(AbstractQueryBuilder builder) throws IOException { BytesReference bytes = XContentHelper.toXContent(builder, XContentType.JSON, false); - return parseQuery(createParser(JsonXContent.jsonXContent, bytes)); + try (var parser = createParser(JsonXContent.jsonXContent, bytes)) { + return parseQuery(parser); + } } protected QueryBuilder parseQuery(String queryAsString) throws IOException { - XContentParser parser = createParser(JsonXContent.jsonXContent, queryAsString); - return parseQuery(parser); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, queryAsString)) { + return parseQuery(parser); + } } protected QueryBuilder parseQuery(XContentParser parser) throws IOException { @@ -651,9 +658,13 @@ public void testValidOutput() throws IOException { QB testQuery = createTestQueryBuilder(); XContentType xContentType = XContentType.JSON; String toString = Strings.toString(testQuery); - assertParsedQuery(createParser(xContentType.xContent(), toString), testQuery); + try (var parser = createParser(xContentType.xContent(), toString)) { + assertParsedQuery(parser, testQuery); + } BytesReference bytes = XContentHelper.toXContent(testQuery, xContentType, false); - assertParsedQuery(createParser(xContentType.xContent(), bytes), testQuery); + try (var parser = createParser(xContentType.xContent(), bytes)) { + assertParsedQuery(parser, testQuery); + } } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractXContentTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractXContentTestCase.java index 3f952caa222ee..7c96d88695f1c 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractXContentTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractXContentTestCase.java @@ -159,8 +159,10 @@ public void test() throws IOException { randomFieldsExcludeFilter, createParser ); - XContentParser parser = createParser.apply(XContentFactory.xContent(xContentType), shuffledContent); - T parsed = fromXContent.apply(parser); + final T parsed; + try (XContentParser parser = createParser.apply(XContentFactory.xContent(xContentType), shuffledContent)) { + parsed = fromXContent.apply(parser); + } try { assertEqualsConsumer.accept(testInstance, parsed); if (assertToXContentEquivalence) { @@ -226,6 +228,34 @@ public static void testFromXContent( BiConsumer assertEqualsConsumer, boolean assertToXContentEquivalence, ToXContent.Params toXContentParams + ) throws IOException { + testFromXContent( + numberOfTestRuns, + instanceSupplier, + supportsUnknownFields, + shuffleFieldsExceptions, + randomFieldsExcludeFilter, + createParserFunction, + fromXContent, + assertEqualsConsumer, + assertToXContentEquivalence, + toXContentParams, + t -> {} + ); + } + + public static void testFromXContent( + int numberOfTestRuns, + Supplier instanceSupplier, + boolean supportsUnknownFields, + String[] shuffleFieldsExceptions, + Predicate randomFieldsExcludeFilter, + CheckedBiFunction createParserFunction, + CheckedFunction fromXContent, + BiConsumer assertEqualsConsumer, + boolean assertToXContentEquivalence, + ToXContent.Params toXContentParams, + Consumer dispose ) throws IOException { xContentTester(createParserFunction, instanceSupplier, toXContentParams, fromXContent).numberOfTestRuns(numberOfTestRuns) .supportsUnknownFields(supportsUnknownFields) @@ -233,6 +263,7 @@ public static void testFromXContent( .randomFieldsExcludeFilter(randomFieldsExcludeFilter) .assertEqualsConsumer(assertEqualsConsumer) .assertToXContentEquivalence(assertToXContentEquivalence) + .dispose(dispose) .test(); } @@ -251,10 +282,17 @@ public final void testFromXContent() throws IOException { this::parseInstance, this::assertEqualInstances, assertToXContentEquivalence(), - getToXContentParams() + getToXContentParams(), + this::dispose ); } + /** + * Callback invoked after a test instance is no longer needed that can be overridden to release resources associated with the instance. + * @param instance test instance that is no longer used + */ + protected void dispose(T instance) {} + /** * Creates a random test instance to use in the tests. This method will be * called multiple times during test execution and should return a different @@ -325,8 +363,9 @@ static BytesReference insertRandomFieldsAndShuffle( } else { withRandomFields = xContent; } - XContentParser parserWithRandomFields = createParserFunction.apply(XContentFactory.xContent(xContentType), withRandomFields); - return BytesReference.bytes(ESTestCase.shuffleXContent(parserWithRandomFields, false, shuffleFieldsExceptions)); + try (XContentParser parserWithRandomFields = createParserFunction.apply(XContentFactory.xContent(xContentType), withRandomFields)) { + return BytesReference.bytes(ESTestCase.shuffleXContent(parserWithRandomFields, false, shuffleFieldsExceptions)); + } } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index f8482a65bd92b..caf9f4abc0ade 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -39,6 +39,8 @@ import org.apache.lucene.tests.util.TimeUnits; import org.apache.lucene.util.SetOnce; import org.elasticsearch.TransportVersion; +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.bootstrap.BootstrapForTesting; @@ -79,6 +81,7 @@ import org.elasticsearch.core.CheckedRunnable; import org.elasticsearch.core.PathUtils; import org.elasticsearch.core.PathUtilsForTesting; +import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.core.Strings; import org.elasticsearch.core.SuppressForbidden; @@ -2150,4 +2153,20 @@ public static T asInstanceOf(Class clazz, Object o) { assertThat(o, Matchers.instanceOf(clazz)); return (T) o; } + + public static T expectThrows(Class expectedType, ActionFuture future) { + return expectThrows( + expectedType, + "Expected exception " + expectedType.getSimpleName() + " but no exception was thrown", + () -> future.actionGet().decRef() // dec ref if we unexpectedly fail to not leak transport response + ); + } + + public static T expectThrows(Class expectedType, ActionRequestBuilder builder) { + return expectThrows( + expectedType, + "Expected exception " + expectedType.getSimpleName() + " but no exception was thrown", + () -> builder.get().decRef() // dec ref if we unexpectedly fail to not leak transport response + ); + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/client/RandomizingClient.java b/test/framework/src/main/java/org/elasticsearch/test/client/RandomizingClient.java index 5392986c25507..3adf92e30e15d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/client/RandomizingClient.java +++ b/test/framework/src/main/java/org/elasticsearch/test/client/RandomizingClient.java @@ -14,9 +14,11 @@ import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.client.internal.Client; +import org.elasticsearch.client.internal.ElasticsearchClient; import org.elasticsearch.client.internal.FilterClient; import org.elasticsearch.cluster.routing.Preference; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.search.builder.PointInTimeBuilder; import java.util.Arrays; import java.util.Random; @@ -59,7 +61,7 @@ public RandomizingClient(Client client, Random random) { @Override public SearchRequestBuilder prepareSearch(String... indices) { - SearchRequestBuilder searchRequestBuilder = in.prepareSearch(indices) + SearchRequestBuilder searchRequestBuilder = new RandomizedSearchRequestBuilder(this).setIndices(indices) .setSearchType(defaultSearchType) .setPreference(defaultPreference) .setBatchedReduceSize(batchedReduceSize); @@ -84,4 +86,18 @@ public Client in() { return super.in(); } + private class RandomizedSearchRequestBuilder extends SearchRequestBuilder { + RandomizedSearchRequestBuilder(ElasticsearchClient client) { + super(client); + } + + @Override + public SearchRequestBuilder setPointInTime(PointInTimeBuilder pointInTimeBuilder) { + if (defaultPreference != null) { + setPreference(null); + } + return super.setPointInTime(pointInTimeBuilder); + } + } + } diff --git a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java index 0d7ab26faecf9..0892f3eef8a81 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java +++ b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java @@ -41,6 +41,7 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.suggest.Suggest; +import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.NotEqualMessageBuilder; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -178,7 +179,7 @@ public static void assertBlocked(BaseBroadcastResponse replicatedBroadcastRespon * @param expectedBlockId the expected block id */ public static void assertBlocked(final ActionRequestBuilder builder, @Nullable final Integer expectedBlockId) { - var e = expectThrows(ClusterBlockException.class, builder::get); + var e = ESTestCase.expectThrows(ClusterBlockException.class, builder); assertThat(e.blocks(), not(empty())); RestStatus status = checkRetryableBlock(e.blocks()) ? RestStatus.TOO_MANY_REQUESTS : RestStatus.FORBIDDEN; assertThat(e.status(), equalTo(status)); @@ -686,13 +687,6 @@ public static T assertBooleanSubQuery(Query query, Class su return subqueryType.cast(q.clauses().get(i).getQuery()); } - /** - * Run the request from a given builder and check that it throws an exception of the right type - */ - public static void assertRequestBuilderThrows(ActionRequestBuilder builder, Class exceptionClass) { - assertFutureThrows(builder.execute(), exceptionClass); - } - /** * Run the request from a given builder and check that it throws an exception of the right type, with a given {@link RestStatus} */ diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index 98a1bad5dda77..349c4fe640aa3 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -8,6 +8,8 @@ package org.elasticsearch.test.rest; +import io.netty.handler.codec.http.HttpMethod; + import org.apache.http.Header; import org.apache.http.HttpHost; import org.apache.http.HttpStatus; @@ -15,6 +17,8 @@ import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; +import org.apache.http.entity.ContentType; +import org.apache.http.entity.InputStreamEntity; import org.apache.http.message.BasicHeader; import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy; import org.apache.http.ssl.SSLContextBuilder; @@ -68,7 +72,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.DeprecationHandler; import org.elasticsearch.xcontent.NamedXContentRegistry; -import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; @@ -107,8 +111,8 @@ import java.util.Objects; import java.util.Optional; import java.util.Set; -import java.util.TreeSet; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; import java.util.function.Predicate; import java.util.function.Supplier; @@ -122,6 +126,7 @@ import static org.elasticsearch.client.RestClient.IGNORE_RESPONSE_CODES_PARAM; import static org.elasticsearch.cluster.ClusterState.VERSION_INTRODUCING_TRANSPORT_VERSIONS; import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xcontent.ToXContent.EMPTY_PARAMS; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -216,9 +221,27 @@ public enum ProductFeature { } private static EnumSet availableFeatures; - private static Set nodeVersions; + private static Set nodesVersions; private static TestFeatureService testFeatureService; + protected static Set getCachedNodesVersions() { + assert nodesVersions != null; + return nodesVersions; + } + + protected static Set readVersionsFromNodesInfo(RestClient adminClient) throws IOException { + return getNodesInfo(adminClient).values().stream().map(nodeInfo -> nodeInfo.get("version").toString()).collect(Collectors.toSet()); + } + + protected static Map> getNodesInfo(RestClient adminClient) throws IOException { + Map response = entityAsMap(adminClient.performRequest(new Request("GET", "_nodes/plugins"))); + Map nodes = (Map) response.get("nodes"); + + return nodes.entrySet() + .stream() + .collect(Collectors.toUnmodifiableMap(entry -> entry.getKey().toString(), entry -> (Map) entry.getValue())); + } + protected static boolean clusterHasFeature(String featureId) { return testFeatureService.clusterHasFeature(featureId); } @@ -233,7 +256,7 @@ public void initClient() throws IOException { assert adminClient == null; assert clusterHosts == null; assert availableFeatures == null; - assert nodeVersions == null; + assert nodesVersions == null; assert testFeatureService == null; clusterHosts = parseClusterHosts(getTestRestCluster()); logger.info("initializing REST clients against {}", clusterHosts); @@ -241,16 +264,12 @@ public void initClient() throws IOException { adminClient = buildClient(restAdminSettings(), clusterHosts.toArray(new HttpHost[clusterHosts.size()])); availableFeatures = EnumSet.of(ProductFeature.LEGACY_TEMPLATES); - nodeVersions = new TreeSet<>(); - var semanticNodeVersions = new HashSet(); + Set versions = new HashSet<>(); boolean serverless = false; - Map response = entityAsMap(adminClient.performRequest(new Request("GET", "_nodes/plugins"))); - Map nodes = (Map) response.get("nodes"); - for (Map.Entry node : nodes.entrySet()) { - Map nodeInfo = (Map) node.getValue(); + + for (Map nodeInfo : getNodesInfo(adminClient).values()) { var nodeVersion = nodeInfo.get("version").toString(); - nodeVersions.add(nodeVersion); - parseLegacyVersion(nodeVersion).map(semanticNodeVersions::add); + versions.add(nodeVersion); for (Object module : (List) nodeInfo.get("modules")) { Map moduleInfo = (Map) module; final String moduleName = moduleInfo.get("name").toString(); @@ -289,21 +308,15 @@ public void initClient() throws IOException { ); } } + nodesVersions = Collections.unmodifiableSet(versions); + var semanticNodeVersions = nodesVersions.stream() + .map(ESRestTestCase::parseLegacyVersion) + .flatMap(Optional::stream) + .collect(Collectors.toSet()); assert semanticNodeVersions.isEmpty() == false || serverless; - // Historical features information is unavailable when using legacy test plugins - boolean hasHistoricalFeaturesInformation = System.getProperty("tests.features.metadata.path") != null; - var providers = hasHistoricalFeaturesInformation - ? List.of(new RestTestLegacyFeatures(), new ESRestTestCaseHistoricalFeatures()) - : List.of(new RestTestLegacyFeatures()); - - testFeatureService = new TestFeatureService( - hasHistoricalFeaturesInformation, - providers, - semanticNodeVersions, - ClusterFeatures.calculateAllNodeFeatures(getClusterStateFeatures().values()) - ); + testFeatureService = createTestFeatureService(getClusterStateFeatures(adminClient), semanticNodeVersions); } assert testFeatureService != null; @@ -311,7 +324,25 @@ public void initClient() throws IOException { assert adminClient != null; assert clusterHosts != null; assert availableFeatures != null; - assert nodeVersions != null; + assert nodesVersions != null; + } + + protected static TestFeatureService createTestFeatureService( + Map> clusterStateFeatures, + Set semanticNodeVersions + ) { + // Historical features information is unavailable when using legacy test plugins + boolean hasHistoricalFeaturesInformation = System.getProperty("tests.features.metadata.path") != null; + var providers = hasHistoricalFeaturesInformation + ? List.of(new RestTestLegacyFeatures(), new ESRestTestCaseHistoricalFeatures()) + : List.of(new RestTestLegacyFeatures()); + + return new ESRestTestFeatureService( + hasHistoricalFeaturesInformation, + providers, + semanticNodeVersions, + ClusterFeatures.calculateAllNodeFeatures(clusterStateFeatures.values()) + ); } protected static boolean has(ProductFeature feature) { @@ -415,7 +446,7 @@ private boolean isExclusivelyTargetingCurrentVersionCluster() { public static RequestOptions expectVersionSpecificWarnings(Consumer expectationsSetter) { Builder builder = RequestOptions.DEFAULT.toBuilder(); - VersionSensitiveWarningsHandler warningsHandler = new VersionSensitiveWarningsHandler(new HashSet<>(nodeVersions)); + VersionSensitiveWarningsHandler warningsHandler = new VersionSensitiveWarningsHandler(getCachedNodesVersions()); expectationsSetter.accept(warningsHandler); builder.setWarningsHandler(warningsHandler); return builder.build(); @@ -484,7 +515,7 @@ public static void closeClients() throws IOException { client = null; adminClient = null; availableFeatures = null; - nodeVersions = null; + nodesVersions = null; testFeatureService = null; } } @@ -690,8 +721,8 @@ protected Set preserveILMPolicyIds() { "logs@lifecycle", "metrics", "metrics@lifecycle", - "profiling", - "profiling@lifecycle", + "profiling-60-days", + "profiling-60-days@lifecycle", "synthetics", "synthetics@lifecycle", "7-days-default", @@ -1128,27 +1159,25 @@ protected void deleteRepository(String repoName) throws IOException { private static void wipeClusterSettings() throws IOException { Map getResponse = entityAsMap(adminClient().performRequest(new Request("GET", "/_cluster/settings"))); - boolean mustClear = false; - XContentBuilder clearCommand = JsonXContent.contentBuilder(); - clearCommand.startObject(); - for (Map.Entry entry : getResponse.entrySet()) { - String type = entry.getKey().toString(); - Map settings = (Map) entry.getValue(); - if (settings.isEmpty()) { - continue; - } - mustClear = true; - clearCommand.startObject(type); - for (Object key : settings.keySet()) { - clearCommand.field(key + ".*").nullValue(); + final var mustClear = new AtomicBoolean(); + final var request = newXContentRequest(HttpMethod.PUT, "/_cluster/settings", (clearCommand, params) -> { + for (Map.Entry entry : getResponse.entrySet()) { + String type = entry.getKey().toString(); + Map settings = (Map) entry.getValue(); + if (settings.isEmpty()) { + continue; + } + mustClear.set(true); + clearCommand.startObject(type); + for (Object key : settings.keySet()) { + clearCommand.field(key + ".*").nullValue(); + } + clearCommand.endObject(); } - clearCommand.endObject(); - } - clearCommand.endObject(); - - if (mustClear) { - Request request = new Request("PUT", "/_cluster/settings"); + return clearCommand; + }); + if (mustClear.get()) { request.setOptions(RequestOptions.DEFAULT.toBuilder().setWarningsHandler(warnings -> { if (warnings.isEmpty()) { return false; @@ -1158,8 +1187,6 @@ private static void wipeClusterSettings() throws IOException { return warnings.get(0).contains("xpack.monitoring") == false; } })); - - request.setJsonEntity(Strings.toString(clearCommand)); adminClient().performRequest(request); } } @@ -1228,7 +1255,9 @@ protected static RefreshResponse refresh(String index) throws IOException { protected static RefreshResponse refresh(RestClient client, String index) throws IOException { Request refreshRequest = new Request("POST", "/" + index + "/_refresh"); Response response = client.performRequest(refreshRequest); - return RefreshResponse.fromXContent(responseAsParser(response)); + try (var parser = responseAsParser(response)) { + return RefreshResponse.fromXContent(parser); + } } private static void waitForPendingRollupTasks() throws Exception { @@ -1555,11 +1584,12 @@ public static void updateClusterSettings(Settings settings) throws IOException { * Updates the cluster with the provided settings (as persistent settings) **/ public static void updateClusterSettings(RestClient client, Settings settings) throws IOException { - Request request = new Request("PUT", "/_cluster/settings"); - String entity = "{ \"persistent\":" + Strings.toString(settings) + "}"; - request.setJsonEntity(entity); - Response response = client.performRequest(request); - assertOK(response); + final var request = newXContentRequest(HttpMethod.PUT, "/_cluster/settings", (builder, params) -> { + builder.startObject("persistent"); + settings.toXContent(builder, params); + return builder.endObject(); + }); + assertOK(client.performRequest(request)); } /** @@ -1658,34 +1688,44 @@ protected static CreateIndexResponse createIndex(String name, Settings settings, public static CreateIndexResponse createIndex(RestClient client, String name, Settings settings, String mapping, String aliases) throws IOException { - Request request = new Request("PUT", "/" + name); - String entity = "{"; - if (settings != null) { - entity += "\"settings\": " + Strings.toString(settings); - if (settings.getAsBoolean(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) == false) { - expectSoftDeletesWarning(request, name); - } - } - if (mapping != null) { + + final Request request = newXContentRequest(HttpMethod.PUT, "/" + name, (builder, params) -> { if (settings != null) { - entity += ","; + builder.startObject("settings"); + settings.toXContent(builder, params); + builder.endObject(); } - if (mapping.trim().startsWith("{")) { - entity += "\"mappings\" : " + mapping + ""; - } else { - entity += "\"mappings\" : {" + mapping + "}"; + + if (mapping != null) { + try ( + var mappingParser = XContentType.JSON.xContent() + .createParser(XContentParserConfiguration.EMPTY, mapping.trim().startsWith("{") ? mapping : '{' + mapping + '}') + ) { + builder.field("mappings"); + builder.copyCurrentStructure(mappingParser); + } } - } - if (aliases != null) { - if (settings != null || mapping != null) { - entity += ","; + + if (aliases != null) { + try ( + var aliasesParser = XContentType.JSON.xContent().createParser(XContentParserConfiguration.EMPTY, '{' + aliases + '}') + ) { + builder.field("aliases"); + builder.copyCurrentStructure(aliasesParser); + } } - entity += "\"aliases\": {" + aliases + "}"; + + return builder; + }); + + if (settings != null && settings.getAsBoolean(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true) == false) { + expectSoftDeletesWarning(request, name); + } + + final Response response = client.performRequest(request); + try (var parser = responseAsParser(response)) { + return CreateIndexResponse.fromXContent(parser); } - entity += "}"; - request.setJsonEntity(entity); - Response response = client.performRequest(request); - return CreateIndexResponse.fromXContent(responseAsParser(response)); } protected static AcknowledgedResponse deleteIndex(String name) throws IOException { @@ -1695,7 +1735,9 @@ protected static AcknowledgedResponse deleteIndex(String name) throws IOExceptio protected static AcknowledgedResponse deleteIndex(RestClient restClient, String name) throws IOException { Request request = new Request("DELETE", "/" + name); Response response = restClient.performRequest(request); - return AcknowledgedResponse.fromXContent(responseAsParser(response)); + try (var parser = responseAsParser(response)) { + return AcknowledgedResponse.fromXContent(parser); + } } protected static void updateIndexSettings(String index, Settings.Builder settings) throws IOException { @@ -1703,9 +1745,8 @@ protected static void updateIndexSettings(String index, Settings.Builder setting } private static void updateIndexSettings(String index, Settings settings) throws IOException { - Request request = new Request("PUT", "/" + index + "/_settings"); - request.setJsonEntity(Strings.toString(settings)); - client().performRequest(request); + final var request = newXContentRequest(HttpMethod.PUT, "/" + index + "/_settings", settings); + assertOK(client.performRequest(request)); } protected static void expectSoftDeletesWarning(Request request, String indexName) throws IOException { @@ -1729,7 +1770,11 @@ protected static Map getIndexSettings(String index) throws IOExc request.addParameter("flat_settings", "true"); Response response = client().performRequest(request); try (InputStream is = response.getEntity().getContent()) { - return XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); + return XContentHelper.convertToMap( + XContentType.fromMediaType(response.getEntity().getContentType().getValue()).xContent(), + is, + true + ); } } @@ -1818,8 +1863,12 @@ protected static Map responseAsMap(Response response) throws IOE return responseEntity; } - protected static XContentParser responseAsParser(Response response) throws IOException { - return XContentHelper.createParser(XContentParserConfiguration.EMPTY, responseAsBytes(response), XContentType.JSON); + public static XContentParser responseAsParser(Response response) throws IOException { + return XContentHelper.createParser( + XContentParserConfiguration.EMPTY, + responseAsBytes(response), + XContentType.fromMediaType(response.getEntity().getContentType().getValue()) + ); } protected static BytesReference responseAsBytes(Response response) throws IOException { @@ -1832,9 +1881,13 @@ protected static void registerRepository(String repository, String type, boolean protected static void registerRepository(RestClient restClient, String repository, String type, boolean verify, Settings settings) throws IOException { - final Request request = new Request(HttpPut.METHOD_NAME, "_snapshot/" + repository); + + final Request request = newXContentRequest( + HttpMethod.PUT, + "/_snapshot/" + repository, + new PutRepositoryRequest(repository).type(type).settings(settings) + ); request.addParameter("verify", Boolean.toString(verify)); - request.setJsonEntity(Strings.toString(new PutRepositoryRequest(repository).type(type).settings(settings))); final Response response = restClient.performRequest(request); assertAcked("Failed to create repository [" + repository + "] of type [" + type + "]: " + response, response); @@ -1941,10 +1994,12 @@ protected static boolean isXPackTemplate(String name) { || name.startsWith("logs-apm")) { return true; } + if (name.startsWith(".slm-history") || name.startsWith("ilm-history")) { + return true; + } switch (name) { case ".watches": case "security_audit_log": - case ".slm-history": case ".async-search": case ".profiling-ilm-lock": // TODO: Remove after switch to K/V indices case "saml-service-provider": @@ -1959,7 +2014,6 @@ protected static boolean isXPackTemplate(String name) { case "synthetics-settings": case "synthetics-mappings": case ".snapshot-blob-cache": - case "ilm-history": case "logstash-index-template": case "security-index-template": case "data-streams-mappings": @@ -2064,11 +2118,11 @@ public void ensurePeerRecoveryRetentionLeasesRenewedAndSynced(String index) thro }, 60, TimeUnit.SECONDS); } - private static Map> getClusterStateFeatures() throws IOException { + protected static Map> getClusterStateFeatures(RestClient adminClient) throws IOException { final Request request = new Request("GET", "_cluster/state"); request.addParameter("filter_path", "nodes_features"); - final Response response = adminClient().performRequest(request); + final Response response = adminClient.performRequest(request); var responseData = responseAsMap(response); if (responseData.get("nodes_features") instanceof List nodesFeatures) { @@ -2157,7 +2211,7 @@ protected static TransportVersion getTransportVersionWithFallback( return fallbackSupplier.get(); } - protected static Optional parseLegacyVersion(String version) { + public static Optional parseLegacyVersion(String version) { var semanticVersionMatcher = SEMANTIC_VERSION_PATTERN.matcher(version); if (semanticVersionMatcher.matches()) { return Optional.of(Version.fromString(semanticVersionMatcher.group(1))); @@ -2260,15 +2314,11 @@ protected FieldCapabilitiesResponse fieldCaps( request.addParameter("filters", fieldFilters); } if (indexFilter != null) { - XContentBuilder body = JsonXContent.contentBuilder(); - body.startObject(); - body.field("index_filter", indexFilter); - body.endObject(); - request.setJsonEntity(Strings.toString(body)); + addXContentBody(request, (body, params) -> body.field("index_filter", indexFilter)); } Response response = restClient.performRequest(request); assertOK(response); - try (XContentParser parser = createParser(JsonXContent.jsonXContent, response.getEntity().getContent())) { + try (XContentParser parser = responseAsParser(response)) { return FieldCapabilitiesResponse.fromXContent(parser); } } @@ -2324,4 +2374,22 @@ public static void setIgnoredErrorResponseCodes(Request request, RestStatus... r Arrays.stream(restStatuses).map(restStatus -> Integer.toString(restStatus.getStatus())).collect(Collectors.joining(",")) ); } + + public static void addXContentBody(Request request, ToXContent body) throws IOException { + final var xContentType = randomFrom(XContentType.values()); + final var bodyBytes = XContentHelper.toXContent(body, xContentType, EMPTY_PARAMS, randomBoolean()); + request.setEntity( + new InputStreamEntity( + bodyBytes.streamInput(), + bodyBytes.length(), + ContentType.create(xContentType.mediaTypeWithoutParameters()) + ) + ); + } + + public static Request newXContentRequest(HttpMethod method, String endpoint, ToXContent body) throws IOException { + final var request = new Request(method.name(), endpoint); + addXContentBody(request, body); + return request; + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestFeatureService.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestFeatureService.java new file mode 100644 index 0000000000000..25a5da93804ca --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestFeatureService.java @@ -0,0 +1,65 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.test.rest; + +import org.elasticsearch.Version; +import org.elasticsearch.core.Strings; +import org.elasticsearch.features.FeatureData; +import org.elasticsearch.features.FeatureSpecification; + +import java.util.Collection; +import java.util.List; +import java.util.NavigableMap; +import java.util.Set; +import java.util.function.Predicate; + +class ESRestTestFeatureService implements TestFeatureService { + private final Predicate historicalFeaturesPredicate; + private final Set clusterStateFeatures; + + ESRestTestFeatureService( + boolean hasHistoricalFeaturesInformation, + List specs, + Collection nodeVersions, + Set clusterStateFeatures + ) { + var minNodeVersion = nodeVersions.stream().min(Version::compareTo); + var featureData = FeatureData.createFromSpecifications(specs); + var historicalFeatures = featureData.getHistoricalFeatures(); + var allHistoricalFeatures = historicalFeatures.lastEntry() == null ? Set.of() : historicalFeatures.lastEntry().getValue(); + + var errorMessage = hasHistoricalFeaturesInformation + ? "Check the feature has been added to the correct FeatureSpecification in the relevant module or, if this is a " + + "legacy feature used only in tests, to a test-only FeatureSpecification" + : "This test is running on the legacy test framework; historical features from production code will not be available." + + " You need to port the test to the new test plugins in order to use historical features from production code." + + " If this is a legacy feature used only in tests, you can add it to a test-only FeatureSpecification"; + this.historicalFeaturesPredicate = minNodeVersion.>map(v -> featureId -> { + assert allHistoricalFeatures.contains(featureId) : Strings.format("Unknown historical feature %s: %s", featureId, errorMessage); + return hasHistoricalFeature(historicalFeatures, v, featureId); + }).orElse(featureId -> { + // We can safely assume that new non-semantic versions (serverless) support all historical features + assert allHistoricalFeatures.contains(featureId) : Strings.format("Unknown historical feature %s: %s", featureId, errorMessage); + return true; + }); + this.clusterStateFeatures = clusterStateFeatures; + } + + private static boolean hasHistoricalFeature(NavigableMap> historicalFeatures, Version version, String featureId) { + var features = historicalFeatures.floorEntry(version); + return features != null && features.getValue().contains(featureId); + } + + public boolean clusterHasFeature(String featureId) { + if (clusterStateFeatures.contains(featureId)) { + return true; + } + return historicalFeaturesPredicate.test(featureId); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/TestFeatureService.java b/test/framework/src/main/java/org/elasticsearch/test/rest/TestFeatureService.java index 1f7a48add1f1c..ab6c97843355f 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/TestFeatureService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/TestFeatureService.java @@ -8,58 +8,6 @@ package org.elasticsearch.test.rest; -import org.elasticsearch.Version; -import org.elasticsearch.core.Strings; -import org.elasticsearch.features.FeatureData; -import org.elasticsearch.features.FeatureSpecification; - -import java.util.Collection; -import java.util.List; -import java.util.NavigableMap; -import java.util.Set; -import java.util.function.Predicate; - -class TestFeatureService { - private final Predicate historicalFeaturesPredicate; - private final Set clusterStateFeatures; - - TestFeatureService( - boolean hasHistoricalFeaturesInformation, - List specs, - Collection nodeVersions, - Set clusterStateFeatures - ) { - var minNodeVersion = nodeVersions.stream().min(Version::compareTo); - var featureData = FeatureData.createFromSpecifications(specs); - var historicalFeatures = featureData.getHistoricalFeatures(); - var allHistoricalFeatures = historicalFeatures.lastEntry() == null ? Set.of() : historicalFeatures.lastEntry().getValue(); - - var errorMessage = hasHistoricalFeaturesInformation - ? "Check the feature has been added to the correct FeatureSpecification in the relevant module or, if this is a " - + "legacy feature used only in tests, to a test-only FeatureSpecification" - : "This test is running on the legacy test framework; historical features from production code will not be available." - + " You need to port the test to the new test plugins in order to use historical features from production code." - + " If this is a legacy feature used only in tests, you can add it to a test-only FeatureSpecification"; - this.historicalFeaturesPredicate = minNodeVersion.>map(v -> featureId -> { - assert allHistoricalFeatures.contains(featureId) : Strings.format("Unknown historical feature %s: %s", featureId, errorMessage); - return hasHistoricalFeature(historicalFeatures, v, featureId); - }).orElse(featureId -> { - // We can safely assume that new non-semantic versions (serverless) support all historical features - assert allHistoricalFeatures.contains(featureId) : Strings.format("Unknown historical feature %s: %s", featureId, errorMessage); - return true; - }); - this.clusterStateFeatures = clusterStateFeatures; - } - - private static boolean hasHistoricalFeature(NavigableMap> historicalFeatures, Version version, String featureId) { - var features = historicalFeatures.floorEntry(version); - return features != null && features.getValue().contains(featureId); - } - - boolean clusterHasFeature(String featureId) { - if (clusterStateFeatures.contains(featureId)) { - return true; - } - return historicalFeaturesPredicate.test(featureId); - } +public interface TestFeatureService { + boolean clusterHasFeature(String featureId); } diff --git a/test/framework/src/test/java/org/elasticsearch/common/util/concurrent/DeterministicTaskQueueTests.java b/test/framework/src/test/java/org/elasticsearch/common/util/concurrent/DeterministicTaskQueueTests.java index 2f920f3e58fa1..f4677dc603e64 100644 --- a/test/framework/src/test/java/org/elasticsearch/common/util/concurrent/DeterministicTaskQueueTests.java +++ b/test/framework/src/test/java/org/elasticsearch/common/util/concurrent/DeterministicTaskQueueTests.java @@ -254,7 +254,7 @@ public void testRunTasksUpToTimeInOrder() { IntStream.range(0, randomIntBetween(0, 10)) .forEach( i -> taskQueue.scheduleAt( - randomLongBetween(cutoffTimeInMillis + 1, 2 * cutoffTimeInMillis), + randomLongBetween(cutoffTimeInMillis + 1, 2 * cutoffTimeInMillis + 1), () -> seenNumbers.add(i + nRunnableTasks + nDeferredTasksUpToCutoff) ) ); diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlDocsTestClient.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlDocsTestClient.java index c95fc5c131df0..38d090e455ebe 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlDocsTestClient.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlDocsTestClient.java @@ -10,7 +10,6 @@ import org.apache.http.HttpEntity; import org.apache.http.HttpHost; -import org.elasticsearch.Version; import org.elasticsearch.client.NodeSelector; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -27,7 +26,6 @@ import java.util.Map; import java.util.Objects; import java.util.function.BiPredicate; -import java.util.function.Predicate; /** * Used to execute REST requests according to the docs snippets that need to be tests. Wraps a @@ -40,12 +38,9 @@ public ClientYamlDocsTestClient( final ClientYamlSuiteRestSpec restSpec, final RestClient restClient, final List hosts, - final Version esVersion, - final Predicate clusterFeaturesPredicate, - final String os, final CheckedSupplier clientBuilderWithSniffedNodes ) { - super(restSpec, restClient, hosts, esVersion, clusterFeaturesPredicate, os, clientBuilderWithSniffedNodes); + super(restSpec, restClient, hosts, clientBuilderWithSniffedNodes); } @Override diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java index d30f65718943e..c57a9f3107393 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestClient.java @@ -20,7 +20,6 @@ import org.apache.http.util.EntityUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; import org.elasticsearch.client.NodeSelector; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; @@ -47,7 +46,6 @@ import java.util.Map.Entry; import java.util.Set; import java.util.function.BiPredicate; -import java.util.function.Predicate; import java.util.stream.Collectors; import static com.carrotsearch.randomizedtesting.RandomizedTest.frequently; @@ -64,44 +62,20 @@ public class ClientYamlTestClient implements Closeable { private final ClientYamlSuiteRestSpec restSpec; private final Map restClients = new HashMap<>(); - private final Version esVersion; - private final String os; private final CheckedSupplier clientBuilderWithSniffedNodes; - private final Predicate clusterFeaturesPredicate; ClientYamlTestClient( final ClientYamlSuiteRestSpec restSpec, final RestClient restClient, final List hosts, - final Version esVersion, - final Predicate clusterFeaturesPredicate, - final String os, final CheckedSupplier clientBuilderWithSniffedNodes ) { - this.clusterFeaturesPredicate = clusterFeaturesPredicate; assert hosts.size() > 0; this.restSpec = restSpec; this.restClients.put(NodeSelector.ANY, restClient); - this.esVersion = esVersion; - this.os = os; this.clientBuilderWithSniffedNodes = clientBuilderWithSniffedNodes; } - /** - * @return the version of the oldest node in the cluster - */ - public Version getEsVersion() { - return esVersion; - } - - public boolean clusterHasFeature(String featureId) { - return clusterFeaturesPredicate.test(featureId); - } - - public String getOs() { - return os; - } - /** * Calls an api with the provided parameters and body */ diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java index a584280119ef3..10bf2fb4b0a9f 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContext.java @@ -15,10 +15,10 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; import org.elasticsearch.client.NodeSelector; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.test.rest.Stash; +import org.elasticsearch.test.rest.TestFeatureService; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestApi; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; @@ -30,6 +30,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.function.BiPredicate; /** @@ -50,26 +51,48 @@ public class ClientYamlTestExecutionContext { private ClientYamlTestResponse response; + private final Set nodesVersions; + + private final Set osSet; + private final TestFeatureService testFeatureService; + private final boolean randomizeContentType; private final BiPredicate pathPredicate; public ClientYamlTestExecutionContext( ClientYamlTestCandidate clientYamlTestCandidate, ClientYamlTestClient clientYamlTestClient, - boolean randomizeContentType + boolean randomizeContentType, + final Set nodesVersions, + final TestFeatureService testFeatureService, + final Set osSet ) { - this(clientYamlTestCandidate, clientYamlTestClient, randomizeContentType, (ignoreApi, ignorePath) -> true); + this( + clientYamlTestCandidate, + clientYamlTestClient, + randomizeContentType, + nodesVersions, + testFeatureService, + osSet, + (ignoreApi, ignorePath) -> true + ); } public ClientYamlTestExecutionContext( ClientYamlTestCandidate clientYamlTestCandidate, ClientYamlTestClient clientYamlTestClient, boolean randomizeContentType, + final Set nodesVersions, + final TestFeatureService testFeatureService, + final Set osSet, BiPredicate pathPredicate ) { this.clientYamlTestClient = clientYamlTestClient; this.clientYamlTestCandidate = clientYamlTestCandidate; this.randomizeContentType = randomizeContentType; + this.nodesVersions = nodesVersions; + this.testFeatureService = testFeatureService; + this.osSet = osSet; this.pathPredicate = pathPredicate; } @@ -224,14 +247,14 @@ public Stash stash() { } /** - * @return the version of the oldest node in the cluster + * @return the distinct node versions running in the cluster */ - public Version esVersion() { - return clientYamlTestClient.getEsVersion(); + public Set nodesVersions() { + return nodesVersions; } public String os() { - return clientYamlTestClient.getOs(); + return osSet.iterator().next(); } public ClientYamlTestCandidate getClientYamlTestCandidate() { @@ -239,6 +262,6 @@ public ClientYamlTestCandidate getClientYamlTestCandidate() { } public boolean clusterHasFeature(String featureId) { - return clientYamlTestClient.clusterHasFeature(featureId); + return testFeatureService.clusterHasFeature(featureId); } } diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java index 2e1631cc8c337..ad05a81cc2574 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java @@ -26,12 +26,13 @@ import org.elasticsearch.client.sniff.ElasticsearchNodesSniffer; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.VersionId; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.core.IOUtils; -import org.elasticsearch.core.Tuple; import org.elasticsearch.test.ClasspathUtils; import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.test.rest.TestFeatureService; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestApi; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestSpec; import org.elasticsearch.test.rest.yaml.section.ClientYamlTestSection; @@ -61,7 +62,7 @@ import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; -import java.util.function.Predicate; +import java.util.stream.Collectors; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; @@ -139,21 +140,38 @@ public void initAndResetContext() throws Exception { validateSpec(restSpec); restSpecification = restSpec; final List hosts = getClusterHosts(); - Tuple versionVersionTuple = readVersionsFromCatNodes(adminClient()); - final Version esVersion = versionVersionTuple.v1(); - final Version masterVersion = versionVersionTuple.v2(); + final Set nodesVersions = getCachedNodesVersions(); final String os = readOsFromNodesInfo(adminClient()); - logger.info( - "initializing client, minimum es version [{}], master version, [{}], hosts {}, os [{}]", - esVersion, - masterVersion, - hosts, - os + logger.info("initializing client, node versions [{}], hosts {}, os [{}]", nodesVersions, hosts, os); + + var semanticNodeVersions = nodesVersions.stream() + .map(ESRestTestCase::parseLegacyVersion) + .flatMap(Optional::stream) + .collect(Collectors.toSet()); + final TestFeatureService testFeatureService = createTestFeatureService( + getClusterStateFeatures(adminClient()), + semanticNodeVersions + ); + + logger.info("initializing client, node versions [{}], hosts {}, os [{}]", nodesVersions, hosts, os); + + clientYamlTestClient = initClientYamlTestClient(restSpec, client(), hosts); + restTestExecutionContext = createRestTestExecutionContext( + testCandidate, + clientYamlTestClient, + nodesVersions, + testFeatureService, + Set.of(os) + ); + adminExecutionContext = new ClientYamlTestExecutionContext( + testCandidate, + clientYamlTestClient, + false, + nodesVersions, + testFeatureService, + Set.of(os) ); - clientYamlTestClient = initClientYamlTestClient(restSpec, client(), hosts, esVersion, ESRestTestCase::clusterHasFeature, os); - restTestExecutionContext = createRestTestExecutionContext(testCandidate, clientYamlTestClient); - adminExecutionContext = new ClientYamlTestExecutionContext(testCandidate, clientYamlTestClient, false); final String[] blacklist = resolvePathsProperty(REST_TESTS_BLACKLIST, null); blacklistPathMatchers = new ArrayList<>(); for (final String entry : blacklist) { @@ -179,28 +197,27 @@ public void initAndResetContext() throws Exception { */ protected ClientYamlTestExecutionContext createRestTestExecutionContext( ClientYamlTestCandidate clientYamlTestCandidate, - ClientYamlTestClient clientYamlTestClient + ClientYamlTestClient clientYamlTestClient, + final Set nodesVersions, + final TestFeatureService testFeatureService, + final Set osSet ) { - return new ClientYamlTestExecutionContext(clientYamlTestCandidate, clientYamlTestClient, randomizeContentType()); + return new ClientYamlTestExecutionContext( + clientYamlTestCandidate, + clientYamlTestClient, + randomizeContentType(), + nodesVersions, + testFeatureService, + osSet + ); } protected ClientYamlTestClient initClientYamlTestClient( final ClientYamlSuiteRestSpec restSpec, final RestClient restClient, - final List hosts, - final Version esVersion, - final Predicate clusterFeaturesPredicate, - final String os + final List hosts ) { - return new ClientYamlTestClient( - restSpec, - restClient, - hosts, - esVersion, - clusterFeaturesPredicate, - os, - this::getClientBuilderWithSniffedHosts - ); + return new ClientYamlTestClient(restSpec, restClient, hosts, this::getClientBuilderWithSniffedHosts); } @AfterClass @@ -307,13 +324,15 @@ static Map> loadSuites(String... paths) throws Exception { for (String strPath : paths) { Path path = root.resolve(strPath); if (Files.isDirectory(path)) { - Files.walk(path).forEach(file -> { - if (file.toString().endsWith(".yml")) { - addSuite(root, file, files); - } else if (file.toString().endsWith(".yaml")) { - throw new IllegalArgumentException("yaml files are no longer supported: " + file); - } - }); + try (var filesStream = Files.walk(path)) { + filesStream.forEach(file -> { + if (file.toString().endsWith(".yml")) { + addSuite(root, file, files); + } else if (file.toString().endsWith(".yaml")) { + throw new IllegalArgumentException("yaml files are no longer supported: " + file); + } + }); + } } else { path = root.resolve(strPath + ".yml"); assert Files.exists(path) : "Path " + path + " does not exist in YAML test root"; @@ -390,36 +409,7 @@ private static void validateSpec(ClientYamlSuiteRestSpec restSpec) { } } - Tuple readVersionsFromCatNodes(RestClient restClient) throws IOException { - // we simply go to the _cat/nodes API and parse all versions in the cluster - final Request request = new Request("GET", "/_cat/nodes"); - request.addParameter("h", "version,master"); - request.setOptions(getCatNodesVersionMasterRequestOptions()); - Response response = restClient.performRequest(request); - ClientYamlTestResponse restTestResponse = new ClientYamlTestResponse(response); - String nodesCatResponse = restTestResponse.getBodyAsString(); - String[] split = nodesCatResponse.split("\n"); - Version version = null; - Version masterVersion = null; - for (String perNode : split) { - final String[] versionAndMaster = perNode.split("\\s+"); - assert versionAndMaster.length == 2 : "invalid line: " + perNode + " length: " + versionAndMaster.length; - final Version currentVersion = Version.fromString(versionAndMaster[0]); - final boolean master = versionAndMaster[1].trim().equals("*"); - if (master) { - assert masterVersion == null; - masterVersion = currentVersion; - } - if (version == null) { - version = currentVersion; - } else if (version.onOrAfter(currentVersion)) { - version = currentVersion; - } - } - return new Tuple<>(version, masterVersion); - } - - String readOsFromNodesInfo(RestClient restClient) throws IOException { + static String readOsFromNodesInfo(RestClient restClient) throws IOException { final Request request = new Request("GET", "/_nodes/os"); Response response = restClient.performRequest(request); ClientYamlTestResponse restTestResponse = new ClientYamlTestResponse(response); @@ -447,10 +437,6 @@ String readOsFromNodesInfo(RestClient restClient) throws IOException { return osPrettyNames.last(); } - protected RequestOptions getCatNodesVersionMasterRequestOptions() { - return RequestOptions.DEFAULT; - } - public void test() throws IOException { // skip test if it matches one of the blacklist globs for (BlacklistedPathPatternMatcher blacklistedPathMatcher : blacklistPathMatchers) { @@ -461,20 +447,31 @@ public void test() throws IOException { ); } + // Try to extract the minimum node version. Assume CURRENT if nodes have non-semantic versions + // TODO: after https://github.com/elastic/elasticsearch/pull/103404 is merged, we can push this logic into SkipVersionContext. + // This way will have version parsing only when we actually have to skip on a version, we can remove the default and throw an + // IllegalArgumentException instead (attempting to skip on version where version is not semantic) + var oldestNodeVersion = restTestExecutionContext.nodesVersions() + .stream() + .map(ESRestTestCase::parseLegacyVersion) + .flatMap(Optional::stream) + .min(VersionId::compareTo) + .orElse(Version.CURRENT); + // skip test if the whole suite (yaml file) is disabled assumeFalse( testCandidate.getSetupSection().getSkipSection().getSkipMessage(testCandidate.getSuitePath()), - testCandidate.getSetupSection().getSkipSection().skip(restTestExecutionContext.esVersion()) + testCandidate.getSetupSection().getSkipSection().skip(oldestNodeVersion) ); // skip test if the whole suite (yaml file) is disabled assumeFalse( testCandidate.getTeardownSection().getSkipSection().getSkipMessage(testCandidate.getSuitePath()), - testCandidate.getTeardownSection().getSkipSection().skip(restTestExecutionContext.esVersion()) + testCandidate.getTeardownSection().getSkipSection().skip(oldestNodeVersion) ); // skip test if test section is disabled assumeFalse( testCandidate.getTestSection().getSkipSection().getSkipMessage(testCandidate.getTestPath()), - testCandidate.getTestSection().getSkipSection().skip(restTestExecutionContext.esVersion()) + testCandidate.getTestSection().getSkipSection().skip(oldestNodeVersion) ); // skip test if os is excluded assumeFalse( diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java index bd038cc4dcd58..f57d90e911ea2 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java @@ -17,10 +17,12 @@ import org.elasticsearch.client.NodeSelector; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.VersionId; import org.elasticsearch.common.logging.HeaderWarning; import org.elasticsearch.core.Tuple; import org.elasticsearch.core.UpdateForV9; import org.elasticsearch.rest.action.admin.indices.RestPutIndexTemplateAction; +import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.RestTestLegacyFeatures; import org.elasticsearch.test.rest.yaml.ClientYamlTestExecutionContext; import org.elasticsearch.test.rest.yaml.ClientYamlTestResponse; @@ -39,6 +41,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.Set; import java.util.TreeMap; import java.util.function.Predicate; @@ -188,10 +191,16 @@ public static DoSection parse(XContentParser parser) throws IOException { } else if (token.isValue()) { if ("body".equals(paramName)) { String body = parser.text(); - XContentParser bodyParser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, body); - // multiple bodies are supported e.g. in case of bulk provided as a whole string - while (bodyParser.nextToken() != null) { - apiCallSection.addBody(bodyParser.mapOrdered()); + try ( + XContentParser bodyParser = JsonXContent.jsonXContent.createParser( + XContentParserConfiguration.EMPTY, + body + ) + ) { + // multiple bodies are supported e.g. in case of bulk provided as a whole string + while (bodyParser.nextToken() != null) { + apiCallSection.addBody(bodyParser.mapOrdered()); + } } } else { apiCallSection.addParam(paramName, parser.text()); @@ -370,8 +379,14 @@ public void execute(ClientYamlTestExecutionContext executionContext) throws IOEx // This is really difficult to express just with features, so I will break it down into 2 parts: version check for v7, // and feature check for v8. This way the version check can be removed once we move to v9 @UpdateForV9 - var fixedInV7 = executionContext.esVersion().major == Version.V_7_17_0.major - && executionContext.esVersion().onOrAfter(Version.V_7_17_2); + var fixedInV7 = executionContext.nodesVersions() + .stream() + .map(ESRestTestCase::parseLegacyVersion) + .flatMap(Optional::stream) + .min(VersionId::compareTo) + .map(v -> v.major == Version.V_7_17_0.major && v.onOrAfter(Version.V_7_17_2)) + .orElse(false); + var fixedProductionHeader = fixedInV7 || executionContext.clusterHasFeature(RestTestLegacyFeatures.REST_ELASTIC_PRODUCT_HEADER_PRESENT.id()); if (fixedProductionHeader) { diff --git a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java index 49cb509608ec1..6e8397c816b3b 100644 --- a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java +++ b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java @@ -9,25 +9,39 @@ package org.elasticsearch.test.rest.yaml; import org.apache.http.HttpEntity; -import org.elasticsearch.Version; import org.elasticsearch.client.NodeSelector; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.VersionUtils; +import org.elasticsearch.test.rest.TestFeatureService; import java.io.IOException; import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.Set; import java.util.concurrent.atomic.AtomicReference; import static org.hamcrest.Matchers.is; public class ClientYamlTestExecutionContextTests extends ESTestCase { + private static class MockTestFeatureService implements TestFeatureService { + @Override + public boolean clusterHasFeature(String featureId) { + return true; + } + } + public void testHeadersSupportStashedValueReplacement() throws IOException { final AtomicReference> headersRef = new AtomicReference<>(); - final Version version = VersionUtils.randomVersion(random()); - final ClientYamlTestExecutionContext context = new ClientYamlTestExecutionContext(null, null, randomBoolean()) { + final String version = randomAlphaOfLength(10); + final ClientYamlTestExecutionContext context = new ClientYamlTestExecutionContext( + null, + null, + randomBoolean(), + Set.of(version), + new MockTestFeatureService(), + Set.of("os") + ) { @Override ClientYamlTestResponse callApiInternal( String apiName, @@ -39,11 +53,6 @@ ClientYamlTestResponse callApiInternal( headersRef.set(headers); return null; } - - @Override - public Version esVersion() { - return version; - } }; final Map headers = new HashMap<>(); headers.put("foo", "$bar"); @@ -62,8 +71,15 @@ public Version esVersion() { } public void testStashHeadersOnException() throws IOException { - final Version version = VersionUtils.randomVersion(random()); - final ClientYamlTestExecutionContext context = new ClientYamlTestExecutionContext(null, null, randomBoolean()) { + final String version = randomAlphaOfLength(10); + final ClientYamlTestExecutionContext context = new ClientYamlTestExecutionContext( + null, + null, + randomBoolean(), + Set.of(version), + new MockTestFeatureService(), + Set.of("os") + ) { @Override ClientYamlTestResponse callApiInternal( String apiName, @@ -74,11 +90,6 @@ ClientYamlTestResponse callApiInternal( ) { throw new RuntimeException("boom!"); } - - @Override - public Version esVersion() { - return version; - } }; final Map headers = new HashMap<>(); headers.put("Accept", "application/json"); diff --git a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java index 501f83bb02e1f..0cb9a3e29e63f 100644 --- a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java +++ b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/DoSectionTests.java @@ -16,7 +16,6 @@ import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.logging.HeaderWarning; import org.elasticsearch.core.Strings; -import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.rest.yaml.ClientYamlTestExecutionContext; import org.elasticsearch.test.rest.yaml.ClientYamlTestResponse; import org.elasticsearch.xcontent.XContentLocation; @@ -31,6 +30,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.regex.Pattern; import static java.util.Collections.emptyList; @@ -610,7 +610,7 @@ public void testNodeSelectorByVersionRange() throws IOException { doSection.getApiCallSection().getNodeSelector() ) ).thenReturn(mockResponse); - when(context.esVersion()).thenReturn(VersionUtils.randomVersion(random())); + when(context.nodesVersions()).thenReturn(Set.of(randomAlphaOfLength(10))); when(mockResponse.getHeaders("X-elastic-product")).thenReturn(List.of("Elasticsearch")); doSection.execute(context); verify(context).callApi( diff --git a/x-pack/plugin/analytics/src/internalClusterTest/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsWithRequestBreakerIT.java b/x-pack/plugin/analytics/src/internalClusterTest/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsWithRequestBreakerIT.java index 6f13b3b4bc528..4c8666365f603 100644 --- a/x-pack/plugin/analytics/src/internalClusterTest/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsWithRequestBreakerIT.java +++ b/x-pack/plugin/analytics/src/internalClusterTest/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsWithRequestBreakerIT.java @@ -61,7 +61,7 @@ public void testRequestBreaker() throws Exception { new MultiValuesSourceFieldConfig.Builder().setFieldName("field1.keyword").build() ) ) - ).get(); + ).get().decRef(); } catch (ElasticsearchException e) { if (ExceptionsHelper.unwrap(e, CircuitBreakingException.class) == null) { throw e; diff --git a/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistry.java b/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistry.java index 44621ee211838..f528d99133756 100644 --- a/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistry.java +++ b/x-pack/plugin/apm-data/src/main/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistry.java @@ -134,7 +134,9 @@ protected List getIngestPipelines() { private static ComponentTemplate loadComponentTemplate(String name, int version) { try { final byte[] content = loadVersionedResourceUTF8("/component-templates/" + name + ".yaml", version); - return ComponentTemplate.parse(YamlXContent.yamlXContent.createParser(XContentParserConfiguration.EMPTY, content)); + try (var parser = YamlXContent.yamlXContent.createParser(XContentParserConfiguration.EMPTY, content)) { + return ComponentTemplate.parse(parser); + } } catch (Exception e) { throw new RuntimeException("failed to load APM Ingest plugin's component template: " + name, e); } @@ -143,7 +145,9 @@ private static ComponentTemplate loadComponentTemplate(String name, int version) private static ComposableIndexTemplate loadIndexTemplate(String name, int version) { try { final byte[] content = loadVersionedResourceUTF8("/index-templates/" + name + ".yaml", version); - return ComposableIndexTemplate.parse(YamlXContent.yamlXContent.createParser(XContentParserConfiguration.EMPTY, content)); + try (var parser = YamlXContent.yamlXContent.createParser(XContentParserConfiguration.EMPTY, content)) { + return ComposableIndexTemplate.parse(parser); + } } catch (Exception e) { throw new RuntimeException("failed to load APM Ingest plugin's index template: " + name, e); } diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm.error@mappings.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm.error@mappings.yaml index c946403c795dd..e6353853bc4d5 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm.error@mappings.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/logs-apm.error@mappings.yaml @@ -6,21 +6,29 @@ _meta: template: mappings: properties: + # error.* error.custom: type: flattened error.exception.attributes: type: flattened error.exception.stacktrace: type: flattened + error.log.stacktrace: + type: flattened error.grouping_name: type: keyword script: | def logMessage = params['_source'].error?.log?.message; - if (logMessage != null) { + if (logMessage != null && logMessage != "") { emit(logMessage); return; } def exception = params['_source'].error?.exception; - if (exception != null && exception.length > 0) { + def exceptionMessage = exception != null && exception.length > 0 ? exception[0]?.message : null; + if (exceptionMessage != null && exceptionMessage != "") { emit(exception[0].message); } + + # http.* + http.request.body: + type: flattened diff --git a/x-pack/plugin/apm-data/src/main/resources/component-templates/traces-apm@mappings.yaml b/x-pack/plugin/apm-data/src/main/resources/component-templates/traces-apm@mappings.yaml index 558a5da81e4f7..eb2da017d97b7 100644 --- a/x-pack/plugin/apm-data/src/main/resources/component-templates/traces-apm@mappings.yaml +++ b/x-pack/plugin/apm-data/src/main/resources/component-templates/traces-apm@mappings.yaml @@ -6,15 +6,22 @@ _meta: template: mappings: properties: + # NOTE(axw) processor.event may be either "span" or "transaction". + # + # This field should eventually be removed, and we should end up + # with only spans. Some of those spans may be identified as local + # roots, equivalent in concept to transactions. processor.event: type: keyword + + # event.* event.success_count: type: byte index: false - span.duration.us: - type: long - transaction.duration.us: - type: long + + # http.* + http.request.body: + type: flattened http.response.transfer_size: type: long index: false @@ -24,10 +31,22 @@ template: http.response.decoded_body_size: type: long index: false + + # span.* + span.duration.us: + type: long span.representative_count: type: scaled_float scaling_factor: 1000 index: false + span.stacktrace: + type: flattened + + # transaction.* + transaction.custom: + type: flattened + transaction.duration.us: + type: long transaction.representative_count: type: scaled_float scaling_factor: 1000 diff --git a/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java b/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java index 9189cdff74547..4ab7396fb1a9e 100644 --- a/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java +++ b/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java @@ -51,13 +51,14 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import static org.elasticsearch.xpack.core.XPackSettings.APM_DATA_ENABLED; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; @@ -137,31 +138,30 @@ public void testThatIndependentTemplatesAreAddedImmediatelyIfMissing() throws Ex assertThat(actualInstalledIndexTemplates.get(), equalTo(0)); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/102797") - public void testIngestPipelines() { + public void testIngestPipelines() throws Exception { DiscoveryNode node = DiscoveryNodeUtils.create("node"); DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node).build(); final List pipelineConfigs = apmIndexTemplateRegistry.getIngestPipelines(); assertThat(pipelineConfigs, is(not(empty()))); - pipelineConfigs.forEach(ingestPipelineConfig -> { - AtomicInteger putPipelineRequestsLocal = new AtomicInteger(0); - client.setVerifier((a, r, l) -> { - if (r instanceof PutPipelineRequest && ingestPipelineConfig.getId().equals(((PutPipelineRequest) r).getId())) { - putPipelineRequestsLocal.incrementAndGet(); + final Set expectedPipelines = apmIndexTemplateRegistry.getIngestPipelines() + .stream() + .map(IngestPipelineConfig::getId) + .collect(Collectors.toSet()); + final Set installedPipelines = ConcurrentHashMap.newKeySet(pipelineConfigs.size()); + client.setVerifier((a, r, l) -> { + if (r instanceof PutPipelineRequest putPipelineRequest) { + if (expectedPipelines.contains(putPipelineRequest.getId())) { + installedPipelines.add(putPipelineRequest.getId()); } - return AcknowledgedResponse.TRUE; - }); - - apmIndexTemplateRegistry.clusterChanged( - createClusterChangedEvent(Map.of(), Map.of(), ingestPipelineConfig.getPipelineDependencies(), nodes) - ); - try { - assertBusy(() -> assertThat(putPipelineRequestsLocal.get(), greaterThanOrEqualTo(1))); - } catch (Exception e) { - throw new RuntimeException(e); } + return AcknowledgedResponse.TRUE; + }); + + assertBusy(() -> { + apmIndexTemplateRegistry.clusterChanged(createClusterChangedEvent(Map.of(), Map.of(), List.copyOf(installedPipelines), nodes)); + assertThat(installedPipelines, equalTo(expectedPipelines)); }); } @@ -310,7 +310,7 @@ private ClusterChangedEvent createClusterChangedEvent( private ClusterChangedEvent createClusterChangedEvent( Map existingComponentTemplates, Map existingComposableTemplates, - List ingestPipelines, + List existingIngestPipelines, Map existingPolicies, DiscoveryNodes nodes ) { @@ -318,7 +318,7 @@ private ClusterChangedEvent createClusterChangedEvent( Settings.EMPTY, existingComponentTemplates, existingComposableTemplates, - ingestPipelines, + existingIngestPipelines, existingPolicies, nodes ); diff --git a/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/10_apm.yml b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/10_apm.yml index b8fdebf9a938b..f4397ca18c101 100644 --- a/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/10_apm.yml +++ b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/10_apm.yml @@ -82,110 +82,3 @@ setup: - length: {hits.hits: 1} - match: {hits.hits.0.fields.event\.success_count: [1]} - match: {hits.hits.0.fields.span\.duration\.us: [123]} - ---- -"Test metrics-apm.internal-* data stream rerouting": - - do: - bulk: - index: metrics-apm.internal-testing - refresh: true - body: - - create: {} - - "@timestamp": "2017-06-22" - data_stream.type: metrics - data_stream.dataset: apm.internal - data_stream.namespace: testing - metricset: - name: transaction - - create: {} - - "@timestamp": "2017-06-22" - data_stream.type: metrics - data_stream.dataset: apm.internal - data_stream.namespace: testing - metricset: - name: service_destination - - create: {} - - "@timestamp": "2017-06-22" - data_stream.type: metrics - data_stream.dataset: apm.internal - data_stream.namespace: testing - metricset: - name: app_config # should not be rerouted - - do: - indices.get_data_stream: - name: metrics-apm.transaction.1m-testing - - do: - indices.get_data_stream: - name: metrics-apm.service_destination.1m-testing - - do: - indices.get_data_stream: - name: metrics-apm.internal-testing - - do: - search: - index: metrics-apm* - - length: {hits.hits: 3} - - match: {hits.hits.0._source.data_stream.dataset: "apm.internal"} - - match: {hits.hits.1._source.data_stream.dataset: "apm.service_destination.1m"} - - match: {hits.hits.1._source.metricset.interval: "1m"} - - match: {hits.hits.2._source.data_stream.dataset: "apm.transaction.1m"} - - match: {hits.hits.2._source.metricset.interval: "1m"} - ---- -"Test metrics-apm.app-* dynamic mapping": - - do: - bulk: - index: metrics-apm.app.svc1-testing - refresh: true - body: - - create: {} - - "@timestamp": "2017-06-22" - data_stream.type: metrics - data_stream.dataset: apm.app.svc1 - data_stream.namespace: testing - metricset: - name: app - samples: - - name: double_metric - type: gauge - value: 123 - - name: summary_metric - type: summary - value_count: 123 - sum: 456.789 - - name: histogram_metric - type: histogram - counts: [1, 2, 3] - values: [1.5, 2.5, 3.5] - - set: - items.0.create._index: index - - do: - # Wait for cluster state changes to be applied before - # querying field mappings. - cluster.health: - wait_for_events: languid - - do: - indices.get_field_mapping: - index: metrics-apm.app.svc1-testing - fields: [double_metric, summary_metric, histogram_metric] - - match: - $body: - $index: - mappings: - double_metric: - full_name: double_metric - mapping: - double_metric: - type: double - index: false - summary_metric: - full_name: summary_metric - mapping: - summary_metric: - type: aggregate_metric_double - metrics : [sum, value_count] - default_metric: value_count - histogram_metric: - full_name: histogram_metric - mapping: - histogram_metric: - type: histogram diff --git a/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_error_grouping.yml b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_error_grouping.yml new file mode 100644 index 0000000000000..f7cd386227fe8 --- /dev/null +++ b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_error_grouping.yml @@ -0,0 +1,56 @@ +--- +setup: + - do: + cluster.health: + wait_for_events: languid + +--- +"Test logs-apm.error-* error grouping": + - do: + bulk: + index: logs-apm.error-testing + refresh: true + body: + # No error object field + - create: {} + - '{"@timestamp": "2017-06-22"}' + + # Empty error object + - create: {} + - '{"@timestamp": "2017-06-22", "error": {}}' + + # Empty error.log object + - create: {} + - '{"@timestamp": "2017-06-22", "error": {"log": {}}}' + + # Empty error.exception array + - create: {} + - '{"@timestamp": "2017-06-22", "error": {"exception": []}}' + + # Empty error.exception object + - create: {} + - '{"@timestamp": "2017-06-22", "error": {"exception": [{}]}}' + + # Non-empty error.log.message used + - create: {} + - '{"@timestamp": "2017-06-22", "error": {"log": {"message": "log_used"}, "exception": [{"message": "ignored"}]}}' + + # Non-empty error.exception.message used + - create: {} + - '{"@timestamp": "2017-06-22", "error": {"log": {"message": ""}, "exception": [{"message": "exception_used"}]}}' + + - is_false: errors + + - do: + search: + index: logs-apm.error-testing + body: + fields: ["error.grouping_name"] + - length: { hits.hits: 7 } + - match: { hits.hits.0.fields: null } + - match: { hits.hits.1.fields: null } + - match: { hits.hits.2.fields: null } + - match: { hits.hits.3.fields: null } + - match: { hits.hits.4.fields: null } + - match: { hits.hits.5.fields: {"error.grouping_name": ["log_used"]} } + - match: { hits.hits.6.fields: {"error.grouping_name": ["exception_used"]} } diff --git a/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_flattened_fields.yml b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_flattened_fields.yml new file mode 100644 index 0000000000000..adb248b23fe5b --- /dev/null +++ b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_flattened_fields.yml @@ -0,0 +1,107 @@ +--- +setup: + - do: + cluster.health: + wait_for_events: languid + +--- +"Test traces-apm-* flattened fields": + - do: + bulk: + index: traces-apm-testing + refresh: true + body: + # http.request.body should be mapped as flattened, allowing + # differing types to be used in http.request.body.original. + - create: {} + - '{"@timestamp": "2017-06-22", "http.request.body": {"original": "text"}}' + - create: {} + - '{"@timestamp": "2017-06-22", "http.request.body": {"original": {"field": "value"}}}' + + # span.stacktrace is a complex object whose structure may + # change over time, and which is always treated as an object. + # Moreover, stacktraces may contain dynamic "vars" whose + # types may change from one document to the next. + - create: {} + - '{"@timestamp": "2017-06-22", "span.stacktrace": [{"vars": {"a": 123}}]}' + - create: {} + - '{"@timestamp": "2017-06-22", "span.stacktrace": [{"vars": {"a": "b"}}]}' + + # transaction.custom is a complex object of fields with + # arbitrary field types that may change from one document + # to the next. + - create: {} + - '{"@timestamp": "2017-06-22", "transaction.custom": {"a": {"b": 123}}}' + - create: {} + - '{"@timestamp": "2017-06-22", "transaction.custom": {"a": "b"}}' + + - is_false: errors + + - do: + search: + index: traces-apm-testing + body: + fields: ["http.request.body", "span.stacktrace", "transaction.custom"] + - length: { hits.hits: 6 } + - match: { hits.hits.0.fields: {"http.request.body": [{"original": "text"}]} } + - match: { hits.hits.1.fields: {"http.request.body": [{"original": {"field": "value"}}]} } + - match: { hits.hits.2.fields: {"span.stacktrace": [{"vars": {"a": 123}}]} } + - match: { hits.hits.3.fields: {"span.stacktrace": [{"vars": {"a": "b"}}]} } + - match: { hits.hits.4.fields: {"transaction.custom": [{"a": {"b": 123}}]} } + - match: { hits.hits.5.fields: {"transaction.custom": [{"a": "b"}]} } + +--- +"Test logs-apm.error-* flattened fields": + - do: + bulk: + index: logs-apm.error-testing + refresh: true + body: + # http.request.body has the same requirements as http.request.body + # in traces-apm-* data streams. + - create: {} + - '{"@timestamp": "2017-06-22", "http.request.body": {"original": "text"}}' + - create: {} + - '{"@timestamp": "2017-06-22", "http.request.body": {"original": {"field": "value"}}}' + + # error.{exception,log}.stacktrace have the same requirements as span.stacktrace. + - create: {} + - '{"@timestamp": "2017-06-22", "error.exception.stacktrace": [{"vars": {"a": 123}}]}' + - create: {} + - '{"@timestamp": "2017-06-22", "error.exception.stacktrace": [{"vars": {"a": "b"}}]}' + - create: {} + - '{"@timestamp": "2017-06-22", "error.log.stacktrace": [{"vars": {"a": 123}}]}' + - create: {} + - '{"@timestamp": "2017-06-22", "error.log.stacktrace": [{"vars": {"a": "b"}}]}' + + # error.exception.attributes is a complex object with arbitrary field types + # that may change from one document to the next. + - create: {} + - '{"@timestamp": "2017-06-22", "error.exception": [{"attributes": {"a": 123}}]}' + - create: {} + - '{"@timestamp": "2017-06-22", "error.exception": [{"attributes": {"a": "b"}}]}' + + # error.custom has the same requirements as transaction.custom. + - create: {} + - '{"@timestamp": "2017-06-22", "error.custom": {"a": {"b": 123}}}' + - create: {} + - '{"@timestamp": "2017-06-22", "error.custom": {"a": "b"}}' + + - is_false: errors + + - do: + search: + index: logs-apm.error-testing + body: + fields: ["http.request.body", "error.log.*", "error.exception.*", "error.custom"] + - length: { hits.hits: 10 } + - match: { hits.hits.0.fields: {"http.request.body": [{"original": "text"}]} } + - match: { hits.hits.1.fields: {"http.request.body": [{"original": {"field": "value"}}]} } + - match: { hits.hits.2.fields: {"error.exception.stacktrace": [{"vars": {"a": 123}}]} } + - match: { hits.hits.3.fields: {"error.exception.stacktrace": [{"vars": {"a": "b"}}]} } + - match: { hits.hits.4.fields: {"error.log.stacktrace": [{"vars": {"a": 123}}]} } + - match: { hits.hits.5.fields: {"error.log.stacktrace": [{"vars": {"a": "b"}}]} } + - match: { hits.hits.6.fields: {"error.exception.attributes": [{"a": 123}]} } + - match: { hits.hits.7.fields: {"error.exception.attributes": [{"a": "b"}]} } + - match: { hits.hits.8.fields: {"error.custom": [{"a": {"b": 123}}]} } + - match: { hits.hits.9.fields: {"error.custom": [{"a": "b"}]} } diff --git a/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_metrics_mapping.yml b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_metrics_mapping.yml new file mode 100644 index 0000000000000..85858a9c5ed2e --- /dev/null +++ b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_metrics_mapping.yml @@ -0,0 +1,65 @@ +--- +setup: + - do: + cluster.health: + wait_for_events: languid + +--- +"Test metrics-apm.app-* dynamic mapping": + - do: + bulk: + index: metrics-apm.app.svc1-testing + refresh: true + body: + - create: {} + - "@timestamp": "2017-06-22" + data_stream.type: metrics + data_stream.dataset: apm.app.svc1 + data_stream.namespace: testing + metricset: + name: app + samples: + - name: double_metric + type: gauge + value: 123 + - name: summary_metric + type: summary + value_count: 123 + sum: 456.789 + - name: histogram_metric + type: histogram + counts: [1, 2, 3] + values: [1.5, 2.5, 3.5] + - set: + items.0.create._index: index + - do: + # Wait for cluster state changes to be applied before + # querying field mappings. + cluster.health: + wait_for_events: languid + - do: + indices.get_field_mapping: + index: metrics-apm.app.svc1-testing + fields: [double_metric, summary_metric, histogram_metric] + - match: + $body: + $index: + mappings: + double_metric: + full_name: double_metric + mapping: + double_metric: + type: double + index: false + summary_metric: + full_name: summary_metric + mapping: + summary_metric: + type: aggregate_metric_double + metrics : [sum, value_count] + default_metric: value_count + histogram_metric: + full_name: histogram_metric + mapping: + histogram_metric: + type: histogram diff --git a/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_metrics_rerouting.yml b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_metrics_rerouting.yml new file mode 100644 index 0000000000000..f5f2307570563 --- /dev/null +++ b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_metrics_rerouting.yml @@ -0,0 +1,52 @@ +--- +setup: + - do: + cluster.health: + wait_for_events: languid + +--- +"Test metrics-apm.internal-* data stream rerouting": + - do: + bulk: + index: metrics-apm.internal-testing + refresh: true + body: + - create: {} + - "@timestamp": "2017-06-22" + data_stream.type: metrics + data_stream.dataset: apm.internal + data_stream.namespace: testing + metricset: + name: transaction + - create: {} + - "@timestamp": "2017-06-22" + data_stream.type: metrics + data_stream.dataset: apm.internal + data_stream.namespace: testing + metricset: + name: service_destination + - create: {} + - "@timestamp": "2017-06-22" + data_stream.type: metrics + data_stream.dataset: apm.internal + data_stream.namespace: testing + metricset: + name: app_config # should not be rerouted + - do: + indices.get_data_stream: + name: metrics-apm.transaction.1m-testing + - do: + indices.get_data_stream: + name: metrics-apm.service_destination.1m-testing + - do: + indices.get_data_stream: + name: metrics-apm.internal-testing + - do: + search: + index: metrics-apm* + - length: {hits.hits: 3} + - match: {hits.hits.0._source.data_stream.dataset: "apm.internal"} + - match: {hits.hits.1._source.data_stream.dataset: "apm.service_destination.1m"} + - match: {hits.hits.1._source.metricset.interval: "1m"} + - match: {hits.hits.2._source.data_stream.dataset: "apm.transaction.1m"} + - match: {hits.hits.2._source.metricset.interval: "1m"} diff --git a/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_traces_ingest.yml b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_traces_ingest.yml new file mode 100644 index 0000000000000..ea7f948abf0b8 --- /dev/null +++ b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/20_traces_ingest.yml @@ -0,0 +1,99 @@ +--- +setup: + - do: + cluster.health: + wait_for_events: languid + +--- +"Test traces-apm-* processor.event inference": + - do: + bulk: + index: traces-apm-testing + refresh: true + body: + # `processor.event: transaction` is inferred from presence of `transaction.type` + - create: {} + - '{"@timestamp": "2017-06-22", "transaction": {"type": "foo"}}' + + # `processor.event: span` is inferred otherwise + - create: {} + - '{"@timestamp": "2017-06-22"}' + + - is_false: errors + + - do: + search: + index: traces-apm-testing + body: + fields: ["processor.event"] + - length: { hits.hits: 2 } + - match: { hits.hits.0.fields: {"processor.event": ["transaction"]} } + - match: { hits.hits.1.fields: {"processor.event": ["span"]} } + +--- +"Test traces-apm-* setting *.duration.us from event.duration": + - do: + bulk: + index: traces-apm-testing + refresh: true + body: + - create: {} + - '{"@timestamp": "2017-06-22", "transaction": {"type": "foo"}, "event": {"duration": 1234}}' + + - create: {} + - '{"@timestamp": "2017-06-22", "event": {"duration": 1234}}' + + # If event.duration is omitted, it is assumed to be zero. + - create: {} + - '{"@timestamp": "2017-06-22"}' + + # An existing field will not be overwritten. + - create: {} + - '{"@timestamp": "2017-06-22", "span": {"duration": {"us": 789}}}' + + - is_false: errors + + - do: + search: + index: traces-apm-testing + body: + fields: ["event.duration", "*.duration.us"] + - length: { hits.hits: 4 } + - match: { hits.hits.0.fields: {"transaction.duration.us": [1]} } + - match: { hits.hits.1.fields: {"span.duration.us": [1]} } + - match: { hits.hits.2.fields: {"span.duration.us": [0]} } + - match: { hits.hits.3.fields: {"span.duration.us": [789]} } + +--- +"Test traces-apm-* setting event.success_count from event.outcome": + - do: + bulk: + index: traces-apm-testing + refresh: true + body: + # No event.outcome, no event.success_count + - create: {} + - '{"@timestamp": "2017-06-22"}' + + # event.outcome: unknown, no event.success_count + - create: {} + - '{"@timestamp": "2017-06-22", "event": {"outcome": "unknown"}}' + + - create: {} + - '{"@timestamp": "2017-06-22", "event": {"outcome": "success"}}' + + - create: {} + - '{"@timestamp": "2017-06-22", "event": {"outcome": "failure"}}' + + - is_false: errors + + - do: + search: + index: traces-apm-testing + body: + fields: ["event.success_count"] + - length: { hits.hits: 4 } + - match: { hits.hits.0.fields: null } + - match: { hits.hits.1.fields: null } + - match: { hits.hits.2.fields: {"event.success_count": [1]} } + - match: { hits.hits.3.fields: {"event.success_count": [0]} } diff --git a/x-pack/plugin/async-search/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/search/AsyncSearchSecurityIT.java b/x-pack/plugin/async-search/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/search/AsyncSearchSecurityIT.java index fb3696a79a579..21a2c2295c809 100644 --- a/x-pack/plugin/async-search/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/search/AsyncSearchSecurityIT.java +++ b/x-pack/plugin/async-search/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/search/AsyncSearchSecurityIT.java @@ -229,7 +229,7 @@ public void testRejectPointInTimeWithIndices() throws Exception { try { final Request request = new Request("POST", "/_async_search"); setRunAsHeader(request, authorizedUser); - request.addParameter("wait_for_completion_timeout", "true"); + request.addParameter("wait_for_completion_timeout", "1s"); request.addParameter("keep_on_completion", "true"); if (randomBoolean()) { request.addParameter("index", "index-" + authorizedUser); diff --git a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchIntegTestCase.java b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchIntegTestCase.java index 3f888685f33db..25ff78f5c0ed2 100644 --- a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchIntegTestCase.java +++ b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchIntegTestCase.java @@ -193,7 +193,11 @@ protected void ensureTaskNotRunning(String id) throws Exception { assertBusy(() -> { try { AsyncSearchResponse resp = getAsyncSearch(id); - assertFalse(resp.isRunning()); + try { + assertFalse(resp.isRunning()); + } finally { + resp.decRef(); + } } catch (Exception exc) { if (ExceptionsHelper.unwrapCause(exc.getCause()) instanceof ResourceNotFoundException == false) { throw exc; diff --git a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/MutableSearchResponse.java b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/MutableSearchResponse.java index dc6c780c64644..ed2f4a78e259c 100644 --- a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/MutableSearchResponse.java +++ b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/MutableSearchResponse.java @@ -18,7 +18,6 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.xpack.core.search.action.AsyncSearchResponse; import org.elasticsearch.xpack.core.search.action.AsyncStatusResponse; @@ -156,18 +155,15 @@ void addQueryFailure(int shardIndex, ShardSearchFailure shardSearchFailure) { } private SearchResponse buildResponse(long taskStartTimeNanos, InternalAggregations reducedAggs) { - InternalSearchResponse internal = new InternalSearchResponse( + long tookInMillis = TimeValue.timeValueNanos(System.nanoTime() - taskStartTimeNanos).getMillis(); + return new SearchResponse( new SearchHits(SearchHits.EMPTY, totalHits, Float.NaN), reducedAggs, null, - null, false, false, - reducePhase - ); - long tookInMillis = TimeValue.timeValueNanos(System.nanoTime() - taskStartTimeNanos).getMillis(); - return new SearchResponse( - internal, + null, + reducePhase, null, totalShards, successfulShards, diff --git a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/RestGetAsyncSearchAction.java b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/RestGetAsyncSearchAction.java index 8ee4af819def0..7d35c072c2ba2 100644 --- a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/RestGetAsyncSearchAction.java +++ b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/RestGetAsyncSearchAction.java @@ -12,7 +12,7 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import org.elasticsearch.xpack.core.async.GetAsyncResultRequest; import org.elasticsearch.xpack.core.search.action.AsyncSearchResponse; import org.elasticsearch.xpack.core.search.action.GetAsyncSearchAction; @@ -44,7 +44,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli if (request.hasParam("keep_alive")) { get.setKeepAlive(request.paramAsTime("keep_alive", get.getKeepAlive())); } - return channel -> client.execute(GetAsyncSearchAction.INSTANCE, get, new RestChunkedToXContentListener<>(channel) { + return channel -> client.execute(GetAsyncSearchAction.INSTANCE, get, new RestRefCountedChunkedToXContentListener<>(channel) { @Override protected RestStatus getRestStatus(AsyncSearchResponse asyncSearchResponse) { return asyncSearchResponse.status(); diff --git a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/RestSubmitAsyncSearchAction.java b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/RestSubmitAsyncSearchAction.java index 66eb8ce6fb518..8f554d4d8705c 100644 --- a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/RestSubmitAsyncSearchAction.java +++ b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/RestSubmitAsyncSearchAction.java @@ -13,7 +13,7 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import org.elasticsearch.usage.SearchUsageHolder; import org.elasticsearch.xpack.core.search.action.AsyncSearchResponse; import org.elasticsearch.xpack.core.search.action.SubmitAsyncSearchAction; @@ -78,14 +78,13 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli submit.setKeepOnCompletion(request.paramAsBoolean("keep_on_completion", submit.isKeepOnCompletion())); } return channel -> { - RestChunkedToXContentListener listener = new RestChunkedToXContentListener<>(channel) { + RestCancellableNodeClient cancelClient = new RestCancellableNodeClient(client, request.getHttpChannel()); + cancelClient.execute(SubmitAsyncSearchAction.INSTANCE, submit, new RestRefCountedChunkedToXContentListener<>(channel) { @Override protected RestStatus getRestStatus(AsyncSearchResponse asyncSearchResponse) { return asyncSearchResponse.status(); } - }; - RestCancellableNodeClient cancelClient = new RestCancellableNodeClient(client, request.getHttpChannel()); - cancelClient.execute(SubmitAsyncSearchAction.INSTANCE, submit, listener); + }); }; } diff --git a/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/AsyncSearchResponseTests.java b/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/AsyncSearchResponseTests.java index dae7d79913690..f3d6f352db186 100644 --- a/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/AsyncSearchResponseTests.java +++ b/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/AsyncSearchResponseTests.java @@ -11,7 +11,6 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchResponseSections; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.Strings; @@ -25,8 +24,8 @@ import org.elasticsearch.script.ScriptException; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.search.SearchShardTarget; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.xcontent.ToXContent; @@ -129,15 +128,13 @@ static SearchResponse randomSearchResponse(boolean ccs) { int totalShards = randomIntBetween(1, Integer.MAX_VALUE); int successfulShards = randomIntBetween(0, totalShards); int skippedShards = randomIntBetween(0, successfulShards); - InternalSearchResponse internalSearchResponse = InternalSearchResponse.EMPTY_WITH_TOTAL_HITS; SearchResponse.Clusters clusters; if (ccs) { clusters = createCCSClusterObjects(20, 19, true, 10, 1, 2); } else { clusters = SearchResponse.Clusters.EMPTY; } - return new SearchResponse( - internalSearchResponse, + return SearchResponseUtils.emptyWithTotalHits( null, totalShards, successfulShards, @@ -211,9 +208,14 @@ public void testToXContentWithSearchResponseAfterCompletion() throws IOException long expectedCompletionTime = startTimeMillis + took; SearchHits hits = SearchHits.EMPTY_WITHOUT_TOTAL_HITS; - SearchResponseSections sections = new SearchResponseSections(hits, null, null, false, null, null, 2); SearchResponse searchResponse = new SearchResponse( - sections, + hits, + null, + null, + false, + null, + null, + 2, null, 10, 9, @@ -316,11 +318,25 @@ public void testToXContentWithCCSSearchResponseWhileRunning() throws IOException long took = 22968L; SearchHits hits = SearchHits.EMPTY_WITHOUT_TOTAL_HITS; - SearchResponseSections sections = new SearchResponseSections(hits, null, null, false, null, null, 2); SearchResponse.Clusters clusters = createCCSClusterObjects(3, 3, true); - SearchResponse searchResponse = new SearchResponse(sections, null, 10, 9, 1, took, ShardSearchFailure.EMPTY_ARRAY, clusters); + SearchResponse searchResponse = new SearchResponse( + hits, + null, + null, + false, + null, + null, + 2, + null, + 10, + 9, + 1, + took, + ShardSearchFailure.EMPTY_ARRAY, + clusters + ); AsyncSearchResponse asyncSearchResponse = new AsyncSearchResponse( "id", @@ -462,7 +478,6 @@ public void testToXContentWithCCSSearchResponseAfterCompletion() throws IOExcept long expectedCompletionTime = startTimeMillis + took; SearchHits hits = SearchHits.EMPTY_WITHOUT_TOTAL_HITS; - SearchResponseSections sections = new SearchResponseSections(hits, null, null, true, null, null, 2); SearchResponse.Clusters clusters = createCCSClusterObjects(4, 3, true); SearchResponse.Cluster updated = clusters.swapCluster( @@ -532,7 +547,22 @@ public void testToXContentWithCCSSearchResponseAfterCompletion() throws IOExcept ); assertNotNull("Set cluster failed for cluster " + cluster2.getClusterAlias(), updated); - SearchResponse searchResponse = new SearchResponse(sections, null, 10, 9, 1, took, new ShardSearchFailure[0], clusters); + SearchResponse searchResponse = new SearchResponse( + hits, + null, + null, + true, + null, + null, + 2, + null, + 10, + 9, + 1, + took, + new ShardSearchFailure[0], + clusters + ); AsyncSearchResponse asyncSearchResponse = new AsyncSearchResponse( "id", @@ -659,9 +689,14 @@ public void testToXContentWithSearchResponseWhileRunning() throws IOException { long took = 22968L; SearchHits hits = SearchHits.EMPTY_WITHOUT_TOTAL_HITS; - SearchResponseSections sections = new SearchResponseSections(hits, null, null, false, null, null, 2); SearchResponse searchResponse = new SearchResponse( - sections, + hits, + null, + null, + false, + null, + null, + 2, null, 10, 9, diff --git a/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/AsyncSearchTaskTests.java b/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/AsyncSearchTaskTests.java index 9dccdf39128ea..f119e590cc75c 100644 --- a/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/AsyncSearchTaskTests.java +++ b/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/AsyncSearchTaskTests.java @@ -28,7 +28,6 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.terms.StringTerms; import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.client.NoOpClient; @@ -405,17 +404,14 @@ private static SearchResponse newSearchResponse( int skippedShards, ShardSearchFailure... failures ) { - InternalSearchResponse response = new InternalSearchResponse( + return new SearchResponse( SearchHits.EMPTY_WITH_TOTAL_HITS, InternalAggregations.EMPTY, null, - null, false, null, - 1 - ); - return new SearchResponse( - response, + null, + 1, null, totalShards, successfulShards, diff --git a/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/AsyncStatusResponseTests.java b/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/AsyncStatusResponseTests.java index 5aab26b3eba58..653ae8cafc531 100644 --- a/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/AsyncStatusResponseTests.java +++ b/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/AsyncStatusResponseTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -316,10 +316,8 @@ public void testGetStatusFromStoredSearchFailedShardsScenario() { int totalShards = randomIntBetween(1, Integer.MAX_VALUE); int successfulShards = randomIntBetween(0, totalShards); int skippedShards = randomIntBetween(0, successfulShards); - InternalSearchResponse internalSearchResponse = InternalSearchResponse.EMPTY_WITH_TOTAL_HITS; SearchResponse.Clusters clusters = new SearchResponse.Clusters(100, 99, 1); - SearchResponse searchResponse = new SearchResponse( - internalSearchResponse, + SearchResponse searchResponse = SearchResponseUtils.emptyWithTotalHits( null, totalShards, successfulShards, @@ -343,10 +341,8 @@ public void testGetStatusFromStoredSearchWithEmptyClustersSuccessfullyCompleted( int totalShards = randomIntBetween(1, Integer.MAX_VALUE); int successfulShards = randomIntBetween(0, totalShards); int skippedShards = randomIntBetween(0, successfulShards); - InternalSearchResponse internalSearchResponse = InternalSearchResponse.EMPTY_WITH_TOTAL_HITS; - SearchResponse searchResponse = new SearchResponse( - internalSearchResponse, + SearchResponse searchResponse = SearchResponseUtils.emptyWithTotalHits( null, totalShards, successfulShards, @@ -370,7 +366,6 @@ public void testGetStatusFromStoredSearchWithNonEmptyClustersSuccessfullyComplet int totalShards = randomIntBetween(1, Integer.MAX_VALUE); int successfulShards = randomIntBetween(0, totalShards); int skippedShards = randomIntBetween(0, successfulShards); - InternalSearchResponse internalSearchResponse = InternalSearchResponse.EMPTY_WITH_TOTAL_HITS; int totalClusters; int successfulClusters; @@ -390,8 +385,7 @@ public void testGetStatusFromStoredSearchWithNonEmptyClustersSuccessfullyComplet skippedClusters = totalClusters - (successfulClusters + partial); clusters = AsyncSearchResponseTests.createCCSClusterObjects(80, 80, true, successfulClusters, skippedClusters, partial); } - SearchResponse searchResponse = new SearchResponse( - internalSearchResponse, + SearchResponse searchResponse = SearchResponseUtils.emptyWithTotalHits( null, totalShards, successfulShards, @@ -421,7 +415,6 @@ public void testGetStatusFromStoredSearchWithNonEmptyClustersStillRunning() { int totalShards = randomIntBetween(1, Integer.MAX_VALUE); int successfulShards = randomIntBetween(0, totalShards); int skippedShards = randomIntBetween(0, successfulShards); - InternalSearchResponse internalSearchResponse = InternalSearchResponse.EMPTY_WITH_TOTAL_HITS; int successful = randomInt(10); int partial = randomInt(10); int skipped = randomInt(10); @@ -437,8 +430,7 @@ public void testGetStatusFromStoredSearchWithNonEmptyClustersStillRunning() { } SearchResponse.Clusters clusters = AsyncSearchResponseTests.createCCSClusterObjects(100, 99, true, successful, skipped, partial); - SearchResponse searchResponse = new SearchResponse( - internalSearchResponse, + SearchResponse searchResponse = SearchResponseUtils.emptyWithTotalHits( null, totalShards, successfulShards, diff --git a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportDeleteAutoscalingPolicyActionIT.java b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportDeleteAutoscalingPolicyActionIT.java index f65710cffe9f9..f0c96255dfe3b 100644 --- a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportDeleteAutoscalingPolicyActionIT.java +++ b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportDeleteAutoscalingPolicyActionIT.java @@ -43,7 +43,7 @@ public void testDeletePolicy() { final GetAutoscalingPolicyAction.Request getRequest = new GetAutoscalingPolicyAction.Request(policy.name()); final ResourceNotFoundException e = expectThrows( ResourceNotFoundException.class, - () -> client().execute(GetAutoscalingPolicyAction.INSTANCE, getRequest).actionGet() + client().execute(GetAutoscalingPolicyAction.INSTANCE, getRequest) ); assertThat(e.getMessage(), equalTo("autoscaling policy with name [" + policy.name() + "] does not exist")); } diff --git a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportGetAutoscalingPolicyActionIT.java b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportGetAutoscalingPolicyActionIT.java index 201b56f1db3a1..0b23a69179f36 100644 --- a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportGetAutoscalingPolicyActionIT.java +++ b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportGetAutoscalingPolicyActionIT.java @@ -38,7 +38,7 @@ public void testGetNonExistentPolicy() { final GetAutoscalingPolicyAction.Request getRequest = new GetAutoscalingPolicyAction.Request(name); final ResourceNotFoundException e = expectThrows( ResourceNotFoundException.class, - () -> client().execute(GetAutoscalingPolicyAction.INSTANCE, getRequest).actionGet() + client().execute(GetAutoscalingPolicyAction.INSTANCE, getRequest) ); assertThat(e.getMessage(), containsString("autoscaling policy with name [" + name + "] does not exist")); } diff --git a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/FrozenStorageDeciderIT.java b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/FrozenStorageDeciderIT.java index 13056ed2e4d5e..13e7d3aca1501 100644 --- a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/FrozenStorageDeciderIT.java +++ b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/FrozenStorageDeciderIT.java @@ -31,8 +31,8 @@ public void testScale() throws Exception { capacity().results().get("frozen").requiredCapacity().total().storage(), equalTo( ByteSizeValue.ofBytes( - (long) (statsResponse.getPrimaries().store.totalDataSetSize().getBytes() - * FrozenStorageDeciderService.DEFAULT_PERCENTAGE) / 100 + (long) (statsResponse.getPrimaries().store.totalDataSetSizeInBytes() * FrozenStorageDeciderService.DEFAULT_PERCENTAGE) + / 100 ) ) ); diff --git a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ProactiveStorageIT.java b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ProactiveStorageIT.java index c5e062df5e77c..d84c5977cba93 100644 --- a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ProactiveStorageIT.java +++ b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ProactiveStorageIT.java @@ -67,7 +67,7 @@ public void testScaleUp() throws IOException, InterruptedException { capacity(); IndicesStatsResponse stats = indicesAdmin().prepareStats(dsName).clear().setStore(true).get(); - long used = stats.getTotal().getStore().getSizeInBytes(); + long used = stats.getTotal().getStore().sizeInBytes(); long maxShardSize = Arrays.stream(stats.getShards()).mapToLong(s -> s.getStats().getStore().sizeInBytes()).max().orElseThrow(); // As long as usage is above low watermark, we will trigger a proactive scale up, since the simulated shards have an in-sync // set and therefore allocating these do not skip the low watermark check in the disk threshold decider. diff --git a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageIT.java b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageIT.java index 5c097cdc24ed1..5f724509ec98a 100644 --- a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageIT.java +++ b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageIT.java @@ -78,7 +78,7 @@ public void testScaleUp() throws InterruptedException { capacity(); IndicesStatsResponse stats = indicesAdmin().prepareStats(indexName).clear().setStore(true).get(); - long used = stats.getTotal().getStore().getSizeInBytes(); + long used = stats.getTotal().getStore().sizeInBytes(); long minShardSize = Arrays.stream(stats.getShards()).mapToLong(s -> s.getStats().getStore().sizeInBytes()).min().orElseThrow(); long maxShardSize = Arrays.stream(stats.getShards()).mapToLong(s -> s.getStats().getStore().sizeInBytes()).max().orElseThrow(); long enoughSpace = used + HIGH_WATERMARK_BYTES + 1; @@ -274,14 +274,14 @@ public void testScaleWhileShrinking() throws Exception { refresh(); IndicesStatsResponse stats = indicesAdmin().prepareStats(indexName).clear().setStore(true).get(); - long used = stats.getTotal().getStore().getSizeInBytes(); + long used = stats.getTotal().getStore().sizeInBytes(); long maxShardSize = Arrays.stream(stats.getShards()).mapToLong(s -> s.getStats().getStore().sizeInBytes()).max().orElseThrow(); Map byNode = Arrays.stream(stats.getShards()) .collect( Collectors.groupingBy( s -> s.getShardRouting().currentNodeId(), - Collectors.summingLong(s -> s.getStats().getStore().getSizeInBytes()) + Collectors.summingLong(s -> s.getStats().getStore().sizeInBytes()) ) ); @@ -427,7 +427,7 @@ public void testScaleDuringSplitOrClone() throws Exception { refresh(); IndicesStatsResponse stats = indicesAdmin().prepareStats(indexName).clear().setStore(true).get(); - long used = stats.getTotal().getStore().getSizeInBytes(); + long used = stats.getTotal().getStore().sizeInBytes(); long enoughSpace = used + HIGH_WATERMARK_BYTES + 1; diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java index 1766d8fe47820..7ca37f376045f 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/BlobCacheMetrics.java @@ -20,19 +20,19 @@ public class BlobCacheMetrics { public BlobCacheMetrics(MeterRegistry meterRegistry) { this( meterRegistry.registerLongCounter( - "elasticsearch.blob_cache.miss_that_triggered_read", + "es.blob_cache.miss_that_triggered_read.total", "The number of times there was a cache miss that triggered a read from the blob store", "count" ), meterRegistry.registerLongCounter( - "elasticsearch.blob_cache.count_of_evicted_used_regions", + "es.blob_cache.count_of_evicted_used_regions.total", "The number of times a cache entry was evicted where the frequency was not zero", "entries" ), meterRegistry.registerLongHistogram( - "elasticsearch.blob_cache.cache_miss_load_times", - "The timing data for populating entries in the blob store resulting from a cache miss.", - "count" + "es.blob_cache.cache_miss_load_times.histogram", + "The time in microseconds for populating entries in the blob store resulting from a cache miss, expressed as a histogram.", + "micros" ) ); } diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java index be95f5c883de8..5e8933f86ae7d 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java @@ -55,6 +55,7 @@ import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.LongAdder; import java.util.function.IntConsumer; @@ -815,7 +816,7 @@ public int populateAndRead( ) throws Exception { // We are interested in the total time that the system spends when fetching a result (including time spent queuing), so we start // our measurement here. - final long startTime = threadPool.relativeTimeInMillis(); + final long startTime = threadPool.relativeTimeInNanos(); RangeMissingHandler writerInstrumentationDecorator = ( SharedBytes.IO channel, int channelPos, @@ -823,7 +824,7 @@ public int populateAndRead( int length, IntConsumer progressUpdater) -> { writer.fillCacheRange(channel, channelPos, relativePos, length, progressUpdater); - var elapsedTime = threadPool.relativeTimeInMillis() - startTime; + var elapsedTime = TimeUnit.NANOSECONDS.toMicros(threadPool.relativeTimeInNanos() - startTime); SharedBlobCacheService.this.blobCacheMetrics.getCacheMissLoadTimes().record(elapsedTime); SharedBlobCacheService.this.blobCacheMetrics.getCacheMissCounter().increment(); }; diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java index 5ad54fe3ba101..de6992a67813d 100644 --- a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java @@ -538,7 +538,7 @@ public void testCcrRepositoryFetchesSnapshotShardSizeFromIndexShardStoreStats() assertThat(indexShardSnapshotStatus.getStage(), is(IndexShardSnapshotStatus.Stage.DONE)); assertThat( indexShardSnapshotStatus.getTotalSize(), - equalTo(indexStats.getIndexShards().get(shardId).getPrimary().getStore().getSizeInBytes()) + equalTo(indexStats.getIndexShards().get(shardId).getPrimary().getStore().sizeInBytes()) ); } @@ -595,7 +595,7 @@ public void testCcrRepositoryFetchesSnapshotShardSizeFromIndexShardStoreStats() assertThat( "Snapshot shard size fetched for follower shard [" + shardId + "] does not match leader store size", fetchedSnapshotShardSizes.get(shardId), - equalTo(indexStats.getIndexShards().get(shardId).getPrimary().getStore().getSizeInBytes()) + equalTo(indexStats.getIndexShards().get(shardId).getPrimary().getStore().sizeInBytes()) ); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java index b90b203e2d29f..c99726803e00e 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java @@ -517,7 +517,7 @@ public IndexShardSnapshotStatus.Copy getShardSnapshotStatus(SnapshotId snapshotI final ShardRouting shardRouting = shardStats.getShardRouting(); if (shardRouting.shardId().id() == shardId.getId() && shardRouting.primary() && shardRouting.active()) { // we only care about the shard size here for shard allocation, populate the rest with dummy values - final long totalSize = shardStats.getStats().getStore().getSizeInBytes(); + final long totalSize = shardStats.getStats().getStore().sizeInBytes(); return IndexShardSnapshotStatus.newDone(0L, 0L, 1, 1, totalSize, totalSize, DUMMY_GENERATION); } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCcrStatsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCcrStatsAction.java index 6e5c2e4f396ca..e394f708b07f5 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCcrStatsAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestCcrStatsAction.java @@ -13,7 +13,7 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import org.elasticsearch.xpack.ccr.Ccr; import org.elasticsearch.xpack.core.ccr.action.CcrStatsAction; @@ -43,7 +43,7 @@ protected RestChannelConsumer prepareRequest(final RestRequest restRequest, fina request, new ThreadedActionListener<>( client.threadPool().executor(Ccr.CCR_THREAD_POOL_NAME), - new RestChunkedToXContentListener<>(channel) + new RestRefCountedChunkedToXContentListener<>(channel) ) ); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowInfoAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowInfoAction.java index 6607b532c56e0..4b3ac9f605d3f 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowInfoAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowInfoAction.java @@ -11,7 +11,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import org.elasticsearch.xpack.core.ccr.action.FollowInfoAction; import java.util.List; @@ -34,7 +34,7 @@ public String getName() { protected RestChannelConsumer prepareRequest(final RestRequest restRequest, final NodeClient client) { final FollowInfoAction.Request request = new FollowInfoAction.Request(); request.setFollowerIndices(Strings.splitStringByCommaToArray(restRequest.param("index"))); - return channel -> client.execute(FollowInfoAction.INSTANCE, request, new RestChunkedToXContentListener<>(channel)); + return channel -> client.execute(FollowInfoAction.INSTANCE, request, new RestRefCountedChunkedToXContentListener<>(channel)); } } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowStatsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowStatsAction.java index 07fdaef94637c..7592db0480b92 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowStatsAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/rest/RestFollowStatsAction.java @@ -14,7 +14,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import org.elasticsearch.xpack.ccr.Ccr; import org.elasticsearch.xpack.core.ccr.action.FollowStatsAction; @@ -45,7 +45,7 @@ protected RestChannelConsumer prepareRequest(final RestRequest restRequest, fina request, new ThreadedActionListener<>( client.threadPool().executor(Ccr.CCR_THREAD_POOL_NAME), - new RestChunkedToXContentListener<>(channel) + new RestRefCountedChunkedToXContentListener<>(channel) ) ); } diff --git a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotIT.java b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotIT.java index d1599c8b6a827..9d3821d64626f 100644 --- a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotIT.java +++ b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotIT.java @@ -144,14 +144,14 @@ public void testSnapshotAndRestoreWithNested() throws Exception { assertMappings(sourceIdx, requireRouting, true); SearchPhaseExecutionException e = expectThrows( SearchPhaseExecutionException.class, - () -> prepareSearch(sourceIdx).setQuery(QueryBuilders.idsQuery().addIds("" + randomIntBetween(0, builders.length))).get() + prepareSearch(sourceIdx).setQuery(QueryBuilders.idsQuery().addIds("" + randomIntBetween(0, builders.length))) ); assertTrue(e.toString().contains("_source only indices can't be searched or filtered")); // can-match phase pre-filters access to non-existing field assertHitCount(prepareSearch(sourceIdx).setQuery(QueryBuilders.termQuery("field1", "bar")), 0); // make sure deletes do not work String idToDelete = "" + randomIntBetween(0, builders.length); - expectThrows(ClusterBlockException.class, () -> client().prepareDelete(sourceIdx, idToDelete).setRouting("r" + idToDelete).get()); + expectThrows(ClusterBlockException.class, client().prepareDelete(sourceIdx, idToDelete).setRouting("r" + idToDelete)); internalCluster().ensureAtLeastNumDataNodes(2); setReplicaCount(1, sourceIdx); ensureGreen(sourceIdx); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java index 133819cd601d7..f10e7cf170bde 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackSettings.java @@ -160,6 +160,12 @@ public Iterator> settings() { Property.NodeScope ); + /** Optional setting to prevent startup if required providers are not discovered at runtime */ + public static final Setting> FIPS_REQUIRED_PROVIDERS = Setting.stringListSetting( + "xpack.security.fips_mode.required_providers", + Property.NodeScope + ); + /** * Setting for enabling the enrollment process, ie the enroll APIs are enabled, and the initial cluster node generates and displays * enrollment tokens (for Kibana and sometimes for ES nodes) when starting up for the first time. diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/EnterpriseSearchFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/EnterpriseSearchFeatureSetUsage.java index 505d85c764b17..e88d52e6d8080 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/EnterpriseSearchFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/EnterpriseSearchFeatureSetUsage.java @@ -23,7 +23,7 @@ public class EnterpriseSearchFeatureSetUsage extends XPackFeatureSet.Usage { static final TransportVersion BEHAVIORAL_ANALYTICS_TRANSPORT_VERSION = TransportVersions.V_8_8_1; - static final TransportVersion QUERY_RULES_TRANSPORT_VERSION = TransportVersions.V_8_500_046; + static final TransportVersion QUERY_RULES_TRANSPORT_VERSION = TransportVersions.V_8_500_061; public static final String SEARCH_APPLICATIONS = "search_applications"; public static final String ANALYTICS_COLLECTIONS = "analytics_collections"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/AsyncResultsService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/AsyncResultsService.java index 945084395448a..efc31aacf5e20 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/AsyncResultsService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/AsyncResultsService.java @@ -15,6 +15,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.TriConsumer; +import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.TimeValue; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.TaskManager; @@ -144,7 +145,17 @@ private void getSearchResponseFromIndex( long nowInMillis, ActionListener listener ) { - store.getResponse(searchId, true, listener.delegateFailure((l, response) -> sendFinalResponse(request, response, nowInMillis, l))); + store.getResponse(searchId, true, listener.delegateFailure((l, response) -> { + try { + sendFinalResponse(request, response, nowInMillis, l); + } finally { + if (response instanceof StoredAsyncResponse storedAsyncResponse + && storedAsyncResponse.getResponse() instanceof RefCounted refCounted) { + refCounted.decRef(); + } + } + + })); } private void sendFinalResponse(GetAsyncResultRequest request, Response response, long nowInMillis, ActionListener listener) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/StoredAsyncTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/StoredAsyncTask.java index 9f75ac0f5f564..9f420f7521cdb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/StoredAsyncTask.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/StoredAsyncTask.java @@ -23,7 +23,7 @@ public abstract class StoredAsyncTask extends C private final AsyncExecutionId asyncExecutionId; private final Map originHeaders; private volatile long expirationTimeMillis; - private final List> completionListeners; + protected final List> completionListeners; @SuppressWarnings("this-escape") public StoredAsyncTask( @@ -79,7 +79,8 @@ public synchronized void removeCompletionListener(ActionListener liste */ public synchronized void onResponse(Response response) { for (ActionListener listener : completionListeners) { - listener.onResponse(response); + response.incRef(); + ActionListener.respondAndRelease(listener, response); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditor.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditor.java index 03a22c88c577e..1c633197181b8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditor.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/common/notifications/AbstractAuditor.java @@ -67,11 +67,9 @@ protected AbstractAuditor( ) { this(client, auditIndex, templateConfig.getTemplateName(), () -> { - try { + try (var parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, templateConfig.loadBytes())) { return new PutComposableIndexTemplateAction.Request(templateConfig.getTemplateName()).indexTemplate( - ComposableIndexTemplate.parse( - JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, templateConfig.loadBytes()) - ) + ComposableIndexTemplate.parse(parser) ).masterNodeTimeout(MASTER_TIMEOUT); } catch (IOException e) { throw new ElasticsearchParseException("unable to parse composable template " + templateConfig.getTemplateName(), e); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleIndexerAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleIndexerAction.java index 10ae1846e91dc..ef93ab914f08f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleIndexerAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleIndexerAction.java @@ -69,7 +69,7 @@ public Request() {} public Request(StreamInput in) throws IOException { super(in); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_040) && in.readBoolean()) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061) && in.readBoolean()) { this.indexStartTimeMillis = in.readVLong(); this.indexEndTimeMillis = in.readVLong(); } else { @@ -132,7 +132,7 @@ public Task createTask(long id, String type, String action, TaskId parentTaskId, @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_040)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { out.writeBoolean(true); out.writeVLong(indexStartTimeMillis); out.writeVLong(indexEndTimeMillis); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleShardStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleShardStatus.java index 8f254043cf7c2..2700ed844d063 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleShardStatus.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleShardStatus.java @@ -144,7 +144,7 @@ public DownsampleShardStatus(StreamInput in) throws IOException { numSent = in.readLong(); numIndexed = in.readLong(); numFailed = in.readLong(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_040) && in.readBoolean()) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061) && in.readBoolean()) { totalShardDocCount = in.readVLong(); lastSourceTimestamp = in.readVLong(); lastTargetTimestamp = in.readVLong(); @@ -254,7 +254,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(numSent); out.writeLong(numIndexed); out.writeLong(numFailed); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_040)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { out.writeBoolean(true); out.writeVLong(totalShardDocCount); out.writeVLong(lastSourceTimestamp); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleAction.java index 22a2c3a880ce5..818b45c2b5d00 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleAction.java @@ -91,7 +91,7 @@ public DownsampleAction(final DateHistogramInterval fixedInterval, final TimeVal public DownsampleAction(StreamInput in) throws IOException { this( new DateHistogramInterval(in), - in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_054) + in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061) ? TimeValue.parseTimeValue(in.readString(), WAIT_TIMEOUT_FIELD.getPreferredName()) : DEFAULT_WAIT_TIMEOUT ); @@ -100,7 +100,7 @@ public DownsampleAction(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { fixedInterval.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_054)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { out.writeString(waitTimeout.getStringRep()); } else { out.writeString(DEFAULT_WAIT_TIMEOUT.getStringRep()); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponse.java index 890045101c35c..c3c9fa88a1a96 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponse.java @@ -227,24 +227,28 @@ private IndexLifecycleExplainResponse( if (policyName == null) { throw new IllegalArgumentException("[" + POLICY_NAME_FIELD.getPreferredName() + "] cannot be null for managed index"); } - // check to make sure that step details are either all null or all set. - long numNull = Stream.of(phase, action, step).filter(Objects::isNull).count(); - if (numNull > 0 && numNull < 3) { - throw new IllegalArgumentException( - "managed index response must have complete step details [" - + PHASE_FIELD.getPreferredName() - + "=" - + phase - + ", " - + ACTION_FIELD.getPreferredName() - + "=" - + action - + ", " - + STEP_FIELD.getPreferredName() - + "=" - + step - + "]" - ); + + // If at least one detail is null, but not *all* are null + if (Stream.of(phase, action, step).anyMatch(Objects::isNull) + && Stream.of(phase, action, step).allMatch(Objects::isNull) == false) { + // …and it's not in the error step + if (ErrorStep.NAME.equals(step) == false) { + throw new IllegalArgumentException( + "managed index response must have complete step details [" + + PHASE_FIELD.getPreferredName() + + "=" + + phase + + ", " + + ACTION_FIELD.getPreferredName() + + "=" + + action + + ", " + + STEP_FIELD.getPreferredName() + + "=" + + step + + "]" + ); + } } } else { if (policyName != null diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/PutInferenceModelAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/PutInferenceModelAction.java index e6e4ea1001f68..d09b96f897e06 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/PutInferenceModelAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/inference/action/PutInferenceModelAction.java @@ -20,6 +20,8 @@ import org.elasticsearch.xcontent.ToXContentObject; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.ml.job.messages.Messages; +import org.elasticsearch.xpack.core.ml.utils.MlStrings; import java.io.IOException; import java.util.Objects; @@ -82,7 +84,16 @@ public void writeTo(StreamOutput out) throws IOException { @Override public ActionRequestValidationException validate() { - return null; + ActionRequestValidationException validationException = new ActionRequestValidationException(); + if (MlStrings.isValidId(this.modelId) == false) { + validationException.addValidationError(Messages.getMessage(Messages.INVALID_ID, "model_id", this.modelId)); + } + + if (validationException.validationErrors().isEmpty() == false) { + return validationException; + } else { + return null; + } } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlTasks.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlTasks.java index 7cef2bed04ce3..6209ead0cc6a1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlTasks.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlTasks.java @@ -229,9 +229,13 @@ public static SnapshotUpgradeState getSnapshotUpgradeState(@Nullable PersistentT public static DatafeedState getDatafeedState(String datafeedId, @Nullable PersistentTasksCustomMetadata tasks) { PersistentTasksCustomMetadata.PersistentTask task = getDatafeedTask(datafeedId, tasks); + return getDatafeedState(task); + } + + public static DatafeedState getDatafeedState(PersistentTasksCustomMetadata.PersistentTask task) { if (task == null) { // If we haven't started a datafeed then there will be no persistent task, - // which is the same as if the datafeed was't started + // which is the same as if the datafeed wasn't started return DatafeedState.STOPPED; } DatafeedState taskState = (DatafeedState) task.getState(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelDefinitionPartAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelDefinitionPartAction.java index 61b39e40a065c..5341efeec1094 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelDefinitionPartAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelDefinitionPartAction.java @@ -91,7 +91,7 @@ public Request(StreamInput in) throws IOException { this.part = in.readVInt(); this.totalDefinitionLength = in.readVLong(); this.totalParts = in.readVInt(); - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_043)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { this.allowOverwriting = in.readBoolean(); } else { this.allowOverwriting = false; @@ -148,7 +148,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(part); out.writeVLong(totalDefinitionLength); out.writeVInt(totalParts); - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_043)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { out.writeBoolean(allowOverwriting); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelVocabularyAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelVocabularyAction.java index 71d4ebdcb6ea5..c153cbc2c039b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelVocabularyAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelVocabularyAction.java @@ -91,7 +91,7 @@ public Request(StreamInput in) throws IOException { } else { this.scores = List.of(); } - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_043)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { this.allowOverwriting = in.readBoolean(); } else { this.allowOverwriting = false; @@ -139,7 +139,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { out.writeCollection(scores, StreamOutput::writeDouble); } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_043)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { out.writeBoolean(allowOverwriting); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ModelAliasMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/ModelAliasMetadata.java similarity index 99% rename from x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ModelAliasMetadata.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/ModelAliasMetadata.java index 0847479489ec2..1d6c5e564a442 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ModelAliasMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/ModelAliasMetadata.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.ml.inference; +package org.elasticsearch.xpack.core.ml.inference; import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/RoutingInfo.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/RoutingInfo.java index fd2f3627e3fb1..826b0785aa563 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/RoutingInfo.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/RoutingInfo.java @@ -86,6 +86,10 @@ public int getTargetAllocations() { return targetAllocations; } + public int getFailedAllocations() { + return state == RoutingState.FAILED ? targetAllocations : 0; + } + public RoutingState getState() { return state; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java index d27d325a5c596..8147dabda7b48 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignment.java @@ -287,6 +287,10 @@ public int totalTargetAllocations() { return nodeRoutingTable.values().stream().mapToInt(RoutingInfo::getTargetAllocations).sum(); } + public int totalFailedAllocations() { + return nodeRoutingTable.values().stream().mapToInt(RoutingInfo::getFailedAllocations).sum(); + } + @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentMetadata.java similarity index 98% rename from x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentMetadata.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentMetadata.java index aabedfc4351b5..36fec9ec7b243 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentMetadata.java @@ -5,7 +5,7 @@ * 2.0. */ -package org.elasticsearch.xpack.ml.inference.assignment; +package org.elasticsearch.xpack.core.ml.inference.assignment; import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.ResourceNotFoundException; @@ -23,7 +23,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import java.io.IOException; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentUtils.java similarity index 76% rename from x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentUtils.java rename to x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentUtils.java index 3640d8dcb2808..fa0ce4a095ba0 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentUtils.java @@ -5,15 +5,10 @@ * 2.0. */ -package org.elasticsearch.xpack.ml.inference.assignment; +package org.elasticsearch.xpack.core.ml.inference.assignment; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingInfo; -import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingInfoUpdate; -import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingState; -import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingStateAndReason; -import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; -import org.elasticsearch.xpack.ml.inference.ModelAliasMetadata; +import org.elasticsearch.xpack.core.ml.inference.ModelAliasMetadata; import java.util.List; import java.util.Optional; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/CategorizationAnalyzerConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/CategorizationAnalyzerConfig.java index ac934a71ec311..0337000a201f2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/CategorizationAnalyzerConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/CategorizationAnalyzerConfig.java @@ -294,11 +294,14 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws */ public Map asMap(NamedXContentRegistry xContentRegistry) throws IOException { String strRep = Strings.toString(this); - XContentParser parser = JsonXContent.jsonXContent.createParser( - XContentParserConfiguration.EMPTY.withRegistry(xContentRegistry).withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), - strRep - ); - return parser.mapOrdered(); + try ( + XContentParser parser = JsonXContent.jsonXContent.createParser( + XContentParserConfiguration.EMPTY.withRegistry(xContentRegistry).withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), + strRep + ) + ) { + return parser.mapOrdered(); + } } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAlias.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAlias.java index 5d1b2ef9a08e5..466aa907b790b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAlias.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/MlIndexAndAlias.java @@ -329,11 +329,9 @@ public static void installIndexTemplateIfRequired( } PutComposableIndexTemplateAction.Request request; - try { + try (var parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, templateConfig.loadBytes())) { request = new PutComposableIndexTemplateAction.Request(templateConfig.getTemplateName()).indexTemplate( - ComposableIndexTemplate.parse( - JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, templateConfig.loadBytes()) - ) + ComposableIndexTemplate.parse(parser) ).masterNodeTimeout(masterTimeout); } catch (IOException e) { throw new ElasticsearchParseException("unable to parse composable template " + templateConfig.getTemplateName(), e); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/AsyncStatusResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/AsyncStatusResponse.java index c2d15e54ed667..7596fe75b4173 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/AsyncStatusResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/AsyncStatusResponse.java @@ -139,7 +139,7 @@ public AsyncStatusResponse(StreamInput in) throws IOException { } else { this.clusters = null; } - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_040)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { this.completionTimeMillis = in.readOptionalVLong(); } else { this.completionTimeMillis = null; @@ -164,7 +164,7 @@ public void writeTo(StreamOutput out) throws IOException { // optional since only CCS uses is; it is null for local-only searches out.writeOptionalWriteable(clusters); } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_040)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { out.writeOptionalVLong(completionTimeMillis); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/EnrollmentToken.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/EnrollmentToken.java index b5444449af1f4..466caa11771a5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/EnrollmentToken.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/EnrollmentToken.java @@ -109,11 +109,14 @@ public static EnrollmentToken decodeFromString(String encoded) throws IOExceptio if (Strings.isNullOrEmpty(encoded)) { throw new IOException("Cannot decode enrollment token from an empty string"); } - final XContentParser jsonParser = JsonXContent.jsonXContent.createParser( - XContentParserConfiguration.EMPTY, - Base64.getDecoder().decode(encoded) - ); - return EnrollmentToken.PARSER.parse(jsonParser, null); + try ( + XContentParser jsonParser = JsonXContent.jsonXContent.createParser( + XContentParserConfiguration.EMPTY, + Base64.getDecoder().decode(encoded) + ) + ) { + return EnrollmentToken.PARSER.parse(jsonParser, null); + } } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/ActionTypes.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/ActionTypes.java new file mode 100644 index 0000000000000..fbc08a0dee8aa --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/ActionTypes.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.action; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; + +/** + * A collection of actions types for the Security plugin that need to be available in xpack.core.security and thus cannot be stored + * directly with their transport action implementation. + */ +public final class ActionTypes { + private ActionTypes() {}; + + public static final ActionType RELOAD_REMOTE_CLUSTER_CREDENTIALS_ACTION = ActionType.localOnly( + "cluster:admin/xpack/security/remote_cluster_credentials/reload" + ); +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CrossClusterApiKeyRoleDescriptorBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CrossClusterApiKeyRoleDescriptorBuilder.java index 0763c208abf64..9695aeae283e2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CrossClusterApiKeyRoleDescriptorBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CrossClusterApiKeyRoleDescriptorBuilder.java @@ -91,10 +91,9 @@ public RoleDescriptor build() { } public static CrossClusterApiKeyRoleDescriptorBuilder parse(String access) throws IOException { - return CrossClusterApiKeyRoleDescriptorBuilder.PARSER.parse( - JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, access), - null - ); + try (var parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, access)) { + return CrossClusterApiKeyRoleDescriptorBuilder.PARSER.parse(parser, null); + } } static void validate(RoleDescriptor roleDescriptor) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequest.java index d76696dc4fe99..71e0c98fb0012 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequest.java @@ -26,7 +26,7 @@ */ public final class GetApiKeyRequest extends ActionRequest { - static TransportVersion API_KEY_ACTIVE_ONLY_PARAM_TRANSPORT_VERSION = TransportVersions.V_8_500_054; + static TransportVersion API_KEY_ACTIVE_ONLY_PARAM_TRANSPORT_VERSION = TransportVersions.V_8_500_061; private final String realmName; private final String userName; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponse.java index 5c75bf685c330..73ee4d1f27299 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponse.java @@ -20,7 +20,7 @@ public class AuthenticateResponse extends ActionResponse implements ToXContent { - public static final TransportVersion VERSION_OPERATOR_FIELD = TransportVersions.V_8_500_040; + public static final TransportVersion VERSION_OPERATOR_FIELD = TransportVersions.V_8_500_061; private final Authentication authentication; private final boolean operator; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/TemplateRoleName.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/TemplateRoleName.java index 83a36510aa201..cf42d73c75131 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/TemplateRoleName.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/TemplateRoleName.java @@ -116,32 +116,35 @@ public void validate(ScriptService scriptService) { } private static List convertJsonToList(String evaluation) throws IOException { - final XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(XContentParserConfiguration.EMPTY, evaluation); - XContentParser.Token token = parser.currentToken(); - if (token == null) { - token = parser.nextToken(); - } - if (token == XContentParser.Token.VALUE_STRING) { - return Collections.singletonList(parser.text()); - } else if (token == XContentParser.Token.START_ARRAY) { - return parser.list().stream().filter(Objects::nonNull).map(o -> { - if (o instanceof String) { - return (String) o; - } else { - throw new XContentParseException( - "Roles array may only contain strings but found [" + o.getClass().getName() + "] [" + o + "]" - ); - } - }).collect(Collectors.toList()); - } else { - throw new XContentParseException("Roles template must generate a string or an array of strings, but found [" + token + "]"); + try ( + XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(XContentParserConfiguration.EMPTY, evaluation) + ) { + XContentParser.Token token = parser.currentToken(); + if (token == null) { + token = parser.nextToken(); + } + if (token == XContentParser.Token.VALUE_STRING) { + return Collections.singletonList(parser.text()); + } else if (token == XContentParser.Token.START_ARRAY) { + return parser.list().stream().filter(Objects::nonNull).map(o -> { + if (o instanceof String) { + return (String) o; + } else { + throw new XContentParseException( + "Roles array may only contain strings but found [" + o.getClass().getName() + "] [" + o + "]" + ); + } + }).collect(Collectors.toList()); + } else { + throw new XContentParseException("Roles template must generate a string or an array of strings, but found [" + token + "]"); + } } } private String parseTemplate(ScriptService scriptService, Map parameters) throws IOException { - final XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, template, XContentType.JSON); - return MustacheTemplateEvaluator.evaluate(scriptService, parser, parameters); + try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, template, XContentType.JSON)) { + return MustacheTemplateEvaluator.evaluate(scriptService, parser, parameters); + } } private static BytesReference extractTemplate(XContentParser parser, Void ignore) throws IOException { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionParser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionParser.java index 5ec28dc68181e..e3f6b1aa450a4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionParser.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionParser.java @@ -57,8 +57,8 @@ public static RoleMapperExpression parseObject(XContentParser parser, String id) * @param content The XContent (typically JSON) DSL representation of the expression */ public RoleMapperExpression parse(String name, XContentSource content) throws IOException { - try (InputStream stream = content.getBytes().streamInput()) { - return parse(name, content.parser(NamedXContentRegistry.EMPTY, stream)); + try (InputStream stream = content.getBytes().streamInput(); var parser = content.parser(NamedXContentRegistry.EMPTY, stream)) { + return parse(name, parser); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java index 4968352439fb0..dc4e5d15d265f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/IndexPrivilege.java @@ -78,7 +78,11 @@ public final class IndexPrivilege extends Privilege { private static final Automaton READ_CROSS_CLUSTER_AUTOMATON = patterns( "internal:transport/proxy/indices:data/read/*", ClusterSearchShardsAction.NAME, - TransportSearchShardsAction.TYPE.name() + TransportSearchShardsAction.TYPE.name(), + // cross clusters query for ESQL + "internal:data/read/esql/open_exchange", + "internal:data/read/esql/exchange", + "indices:data/read/esql/cluster" ); private static final Automaton CREATE_AUTOMATON = patterns( "indices:data/write/index*", diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/SystemPrivilege.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/SystemPrivilege.java index 2616b63df7c01..013d7cc21a54a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/SystemPrivilege.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/SystemPrivilege.java @@ -12,6 +12,7 @@ import org.elasticsearch.index.seqno.RetentionLeaseSyncAction; import org.elasticsearch.persistent.CompletionPersistentTaskAction; import org.elasticsearch.transport.TransportActionProxy; +import org.elasticsearch.xpack.core.security.action.ActionTypes; import org.elasticsearch.xpack.core.security.support.StringMatcher; import java.util.Collections; @@ -43,7 +44,8 @@ public final class SystemPrivilege extends Privilege { "indices:data/read/*", // needed for SystemIndexMigrator "indices:admin/refresh", // needed for SystemIndexMigrator "indices:admin/aliases", // needed for SystemIndexMigrator - TransportSearchShardsAction.TYPE.name() // added so this API can be called with the system user by other APIs + TransportSearchShardsAction.TYPE.name(), // added so this API can be called with the system user by other APIs + ActionTypes.RELOAD_REMOTE_CLUSTER_CREDENTIALS_ACTION.name() // needed for Security plugin reload of remote cluster credentials ); private static final Predicate PREDICATE = (action) -> { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java index 474ba25e3e117..8004848f59235 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/KibanaOwnedReservedRoleDescriptors.java @@ -251,8 +251,12 @@ static RoleDescriptor kibanaSystem(String name) { "indices:admin/data_stream/lifecycle/put" ) .build(), - // Endpoint specific action responses. Kibana reads from these to display responses to the user. - RoleDescriptor.IndicesPrivileges.builder().indices(".logs-endpoint.action.responses-*").privileges("read").build(), + // Endpoint specific action responses. Kibana reads and writes (for third party agents) to the index + // to display action responses to the user. + RoleDescriptor.IndicesPrivileges.builder() + .indices(".logs-endpoint.action.responses-*") + .privileges("auto_configure", "read", "write") + .build(), // Endpoint specific actions. Kibana reads and writes to this index to track new actions and display them. RoleDescriptor.IndicesPrivileges.builder() .indices(".logs-endpoint.actions-*") diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java index b0f1c78b0c99d..ddc565c3f46a6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java @@ -646,10 +646,8 @@ public void onFailure(Exception e) { protected static Map parseComposableTemplates(IndexTemplateConfig... config) { return Arrays.stream(config).collect(Collectors.toUnmodifiableMap(IndexTemplateConfig::getTemplateName, indexTemplateConfig -> { - try { - return ComposableIndexTemplate.parse( - JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, indexTemplateConfig.loadBytes()) - ); + try (var parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, indexTemplateConfig.loadBytes())) { + return ComposableIndexTemplate.parse(parser); } catch (IOException e) { throw new AssertionError(e); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/QueryConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/QueryConfig.java index 2df00837f9a3a..8bf8a40c69b2a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/QueryConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/QueryConfig.java @@ -106,11 +106,14 @@ private static QueryBuilder queryFromXContent( NamedXContentRegistry namedXContentRegistry, DeprecationHandler deprecationHandler ) throws IOException { - QueryBuilder query = null; + final QueryBuilder query; XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().map(source); - XContentParser sourceParser = XContentType.JSON.xContent() - .createParser(namedXContentRegistry, deprecationHandler, BytesReference.bytes(xContentBuilder).streamInput()); - query = AbstractQueryBuilder.parseTopLevelQuery(sourceParser); + try ( + XContentParser sourceParser = XContentType.JSON.xContent() + .createParser(namedXContentRegistry, deprecationHandler, BytesReference.bytes(xContentBuilder).streamInput()) + ) { + query = AbstractQueryBuilder.parseTopLevelQuery(sourceParser); + } return query; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/AggregationConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/AggregationConfig.java index 763f328ecfa0b..095ada7ced411 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/AggregationConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/AggregationConfig.java @@ -139,13 +139,15 @@ private static AggregatorFactories.Builder aggregationsFromXContent( NamedXContentRegistry namedXContentRegistry, DeprecationHandler deprecationHandler ) throws IOException { - AggregatorFactories.Builder aggregations = null; - + final AggregatorFactories.Builder aggregations; XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().map(source); - XContentParser sourceParser = XContentType.JSON.xContent() - .createParser(namedXContentRegistry, deprecationHandler, BytesReference.bytes(xContentBuilder).streamInput()); - sourceParser.nextToken(); - aggregations = AggregatorFactories.parseAggregators(sourceParser); + try ( + XContentParser sourceParser = XContentType.JSON.xContent() + .createParser(namedXContentRegistry, deprecationHandler, BytesReference.bytes(xContentBuilder).streamInput()) + ) { + sourceParser.nextToken(); + aggregations = AggregatorFactories.parseAggregators(sourceParser); + } return aggregations; } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ClientHelperTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ClientHelperTests.java index b98a8abc019d0..df4f7828d1fed 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ClientHelperTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ClientHelperTests.java @@ -24,7 +24,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.TransportVersionUtils; import org.elasticsearch.threadpool.ThreadPool; @@ -248,16 +248,7 @@ public void testExecuteWithHeadersNoHeaders() { PlainActionFuture searchFuture = new PlainActionFuture<>(); searchFuture.onResponse( - new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, - null, - 0, - 0, - 0, - 0L, - ShardSearchFailure.EMPTY_ARRAY, - SearchResponse.Clusters.EMPTY - ) + SearchResponseUtils.emptyWithTotalHits(null, 0, 0, 0, 0L, ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY) ); when(client.search(any())).thenReturn(searchFuture); assertExecutionWithOrigin(Collections.emptyMap(), client); @@ -272,16 +263,7 @@ public void testExecuteWithHeaders() { PlainActionFuture searchFuture = new PlainActionFuture<>(); searchFuture.onResponse( - new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, - null, - 0, - 0, - 0, - 0L, - ShardSearchFailure.EMPTY_ARRAY, - SearchResponse.Clusters.EMPTY - ) + SearchResponseUtils.emptyWithTotalHits(null, 0, 0, 0, 0L, ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY) ); when(client.search(any())).thenReturn(searchFuture); Map headers = Map.of( @@ -307,16 +289,7 @@ public void testExecuteWithHeadersNoSecurityHeaders() { PlainActionFuture searchFuture = new PlainActionFuture<>(); searchFuture.onResponse( - new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, - null, - 0, - 0, - 0, - 0L, - ShardSearchFailure.EMPTY_ARRAY, - SearchResponse.Clusters.EMPTY - ) + SearchResponseUtils.emptyWithTotalHits(null, 0, 0, 0, 0L, ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY) ); when(client.search(any())).thenReturn(searchFuture); Map unrelatedHeaders = Map.of(randomAlphaOfLength(10), "anything"); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/async/AsyncTaskServiceTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/async/AsyncTaskServiceTests.java index bc191349ea601..e6bf5d067741b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/async/AsyncTaskServiceTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/async/AsyncTaskServiceTests.java @@ -216,11 +216,13 @@ public void testAutoCreateIndex() throws Exception { // To begin with, the results index should be auto-created. AsyncExecutionId id = new AsyncExecutionId("0", new TaskId("N/A", 0)); AsyncSearchResponse resp = new AsyncSearchResponse(id.getEncoded(), true, true, 0L, 0L); - { + try { PlainActionFuture future = new PlainActionFuture<>(); indexService.createResponse(id.getDocId(), Collections.emptyMap(), resp, future); future.get(); assertSettings(); + } finally { + resp.decRef(); } // Delete the index, so we can test subsequent auto-create behaviour diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java index 88ee3d5529741..c63d7fa145472 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchResponseSections; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; @@ -116,18 +115,24 @@ protected void doNextSearch(long waitTimeInNanos, ActionListener return; } - final SearchResponseSections sections = new SearchResponseSections( - new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), 0), - null, - null, - false, - null, - null, - 1 - ); ActionListener.respondAndRelease( nextPhase, - new SearchResponse(sections, null, 1, 1, 0, 0, ShardSearchFailure.EMPTY_ARRAY, null) + new SearchResponse( + new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), 0), + null, + null, + false, + null, + null, + 1, + null, + 1, + 1, + 0, + 0, + ShardSearchFailure.EMPTY_ARRAY, + null + ) ); } @@ -256,15 +261,6 @@ public boolean waitingForLatchCountDown() { @Override protected void doNextSearch(long waitTimeInNanos, ActionListener nextPhase) { ++searchOps; - final SearchResponseSections sections = new SearchResponseSections( - new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), 0), - null, - null, - false, - null, - null, - 1 - ); if (processOps == 3) { awaitForLatch(); @@ -272,7 +268,22 @@ protected void doNextSearch(long waitTimeInNanos, ActionListener ActionListener.respondAndRelease( nextPhase, - new SearchResponse(sections, null, 1, 1, 0, 0, ShardSearchFailure.EMPTY_ARRAY, null) + new SearchResponse( + new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), 0), + null, + null, + false, + null, + null, + 1, + null, + 1, + 1, + 0, + 0, + ShardSearchFailure.EMPTY_ARRAY, + null + ) ); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/action/PutInferenceModelActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/action/PutInferenceModelActionTests.java new file mode 100644 index 0000000000000..10f35bf33f631 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/inference/action/PutInferenceModelActionTests.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.inference.action; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.inference.TaskType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.core.ml.utils.MlStringsTests; +import org.junit.Before; + +import java.util.Locale; + +public class PutInferenceModelActionTests extends ESTestCase { + public static String TASK_TYPE; + public static String MODEL_ID; + public static XContentType X_CONTENT_TYPE; + public static BytesReference BYTES; + + @Before + public void setup() throws Exception { + TASK_TYPE = TaskType.ANY.toString(); + MODEL_ID = randomAlphaOfLengthBetween(1, 10).toLowerCase(Locale.ROOT); + X_CONTENT_TYPE = randomFrom(XContentType.values()); + BYTES = new BytesArray(randomAlphaOfLengthBetween(1, 10)); + } + + public void testValidate() { + // valid model ID + var request = new PutInferenceModelAction.Request(TASK_TYPE, MODEL_ID + "_-0", BYTES, X_CONTENT_TYPE); + ActionRequestValidationException validationException = request.validate(); + assertNull(validationException); + + // invalid model IDs + + var invalidRequest = new PutInferenceModelAction.Request(TASK_TYPE, "", BYTES, X_CONTENT_TYPE); + validationException = invalidRequest.validate(); + assertNotNull(validationException); + + var invalidRequest2 = new PutInferenceModelAction.Request( + TASK_TYPE, + randomAlphaOfLengthBetween(1, 10) + randomFrom(MlStringsTests.SOME_INVALID_CHARS), + BYTES, + X_CONTENT_TYPE + ); + validationException = invalidRequest2.validate(); + assertNotNull(validationException); + + var invalidRequest3 = new PutInferenceModelAction.Request(TASK_TYPE, null, BYTES, X_CONTENT_TYPE); + validationException = invalidRequest3.validate(); + assertNotNull(validationException); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelDefinitionPartActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelDefinitionPartActionRequestTests.java index 4ffa2e27fe60c..ee304f966c9b4 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelDefinitionPartActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelDefinitionPartActionRequestTests.java @@ -72,7 +72,7 @@ protected Writeable.Reader instanceReader() { @Override protected Request mutateInstanceForVersion(Request instance, TransportVersion version) { - if (version.before(TransportVersions.V_8_500_043)) { + if (version.before(TransportVersions.V_8_500_061)) { return new Request( instance.getModelId(), instance.getDefinition(), diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/RoutingInfoTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/RoutingInfoTests.java index 28ebf8b2445c5..830f7dde7c7d8 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/RoutingInfoTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/assignment/RoutingInfoTests.java @@ -69,4 +69,17 @@ public void testIsRoutable_GivenStartedWithNonZeroAllocations() { RoutingInfo routingInfo = new RoutingInfo(randomIntBetween(1, 10), 1, RoutingState.STARTED, ""); assertThat(routingInfo.isRoutable(), is(true)); } + + public void testGetFailedAllocations() { + int targetAllocations = randomIntBetween(1, 10); + RoutingInfo routingInfo = new RoutingInfo( + randomIntBetween(0, targetAllocations), + targetAllocations, + randomFrom(RoutingState.STARTING, RoutingState.STARTED, RoutingState.STOPPING), + "" + ); + assertThat(routingInfo.getFailedAllocations(), is(0)); + routingInfo = new RoutingInfo(randomIntBetween(0, targetAllocations), targetAllocations, RoutingState.FAILED, ""); + assertThat(routingInfo.getFailedAllocations(), is(targetAllocations)); + } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java index 880b62689dee2..c8fbe00d07618 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/JobTests.java @@ -108,12 +108,14 @@ public void testToXContentForInternalStorage() throws IOException { ToXContent.MapParams params = new ToXContent.MapParams(Collections.singletonMap(ToXContentParams.FOR_INTERNAL_STORAGE, "true")); BytesReference serializedJob = XContentHelper.toXContent(config, XContentType.JSON, params, false); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(XContentParserConfiguration.EMPTY.withRegistry(xContentRegistry()), serializedJob.streamInput()); - - Job parsedConfig = Job.LENIENT_PARSER.apply(parser, null).build(); - // When we are writing for internal storage, we do not include the datafeed config - assertThat(parsedConfig.getDatafeedConfig().isPresent(), is(false)); + try ( + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(XContentParserConfiguration.EMPTY.withRegistry(xContentRegistry()), serializedJob.streamInput()) + ) { + Job parsedConfig = Job.LENIENT_PARSER.apply(parser, null).build(); + // When we are writing for internal storage, we do not include the datafeed config + assertThat(parsedConfig.getDatafeedConfig().isPresent(), is(false)); + } } public void testFutureConfigParse() throws IOException { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java index e1a9b20c048c4..c9370545036ff 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java @@ -362,10 +362,9 @@ private Set collectResultsDocFieldNames() throws IOException { private Set collectFieldNames(String mapping) throws IOException { BufferedInputStream inputStream = new BufferedInputStream(new ByteArrayInputStream(mapping.getBytes(StandardCharsets.UTF_8))); - XContentParser parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, inputStream); Set fieldNames = new HashSet<>(); boolean isAfterPropertiesStart = false; - try { + try (XContentParser parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, inputStream)) { XContentParser.Token token = parser.nextToken(); while (token != null) { switch (token) { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/MlStringsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/utils/MlStringsTests.java similarity index 87% rename from x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/MlStringsTests.java rename to x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/utils/MlStringsTests.java index fb60ac39bdef1..04681fe6e0cd0 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/MlStringsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/utils/MlStringsTests.java @@ -4,10 +4,9 @@ * 2.0; you may not use this file except in compliance with the Elastic License * 2.0. */ -package org.elasticsearch.xpack.ml.utils; +package org.elasticsearch.xpack.core.ml.utils; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.core.ml.utils.MlStrings; import java.util.Arrays; import java.util.Collections; @@ -22,6 +21,37 @@ public class MlStringsTests extends ESTestCase { + public static final String[] SOME_INVALID_CHARS = { + "%", + " ", + "!", + "@", + "#", + "$", + "^", + "&", + "*", + "(", + ")", + "+", + "=", + "{", + "}", + "[", + "]", + "|", + "\\", + ":", + ";", + "\"", + "'", + "<", + ">", + ",", + "?", + "/", + "~" }; + public void testDoubleQuoteIfNotAlphaNumeric() { assertEquals("foo2", MlStrings.doubleQuoteIfNotAlphaNumeric("foo2")); assertEquals("\"fo o\"", MlStrings.doubleQuoteIfNotAlphaNumeric("fo o")); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleRestrictionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleRestrictionTests.java index fe64192cb0601..244e21f3f036c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleRestrictionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleRestrictionTests.java @@ -72,9 +72,10 @@ public void testToXContent() throws Exception { final Restriction restriction = randomWorkflowsRestriction(1, 5); final XContentType xContentType = randomFrom(XContentType.values()); final BytesReference xContentValue = toShuffledXContent(restriction, xContentType, ToXContent.EMPTY_PARAMS, false); - final XContentParser parser = xContentType.xContent().createParser(XContentParserConfiguration.EMPTY, xContentValue.streamInput()); - final Restriction parsed = Restriction.parse(randomAlphaOfLengthBetween(3, 6), parser); - assertThat(parsed, equalTo(restriction)); + try (XContentParser parser = xContentType.xContent().createParser(XContentParserConfiguration.EMPTY, xContentValue.streamInput())) { + final Restriction parsed = Restriction.parse(randomAlphaOfLengthBetween(3, 6), parser); + assertThat(parsed, equalTo(restriction)); + } } public void testSerialization() throws IOException { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index 831dc58e14003..5611bb5d548e9 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -34,7 +34,7 @@ import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsAction; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsAction; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction; -import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresAction; +import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateAction; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesAction; @@ -899,8 +899,8 @@ public void testKibanaSystemRole() { ); assertThat(kibanaRole.indices().allowedIndicesMatcher(GetIndexAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(indexAbstraction), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportIndexAction.NAME).test(indexAbstraction), is(false)); - assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportDeleteAction.NAME).test(indexAbstraction), is(false)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportIndexAction.NAME).test(indexAbstraction), is(true)); + assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportDeleteAction.NAME).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportSearchAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportMultiSearchAction.TYPE.name()).test(indexAbstraction), is(true)); assertThat(kibanaRole.indices().allowedIndicesMatcher(TransportGetAction.TYPE.name()).test(indexAbstraction), is(true)); @@ -1150,7 +1150,8 @@ public void testKibanaSystemRole() { is(true) ); - final boolean isAlsoAutoCreateIndex = indexName.startsWith(".logs-endpoint.actions-"); + final boolean isAlsoAutoCreateIndex = indexName.startsWith(".logs-endpoint.actions-") + || indexName.startsWith(".logs-endpoint.action.responses-"); assertThat(kibanaRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(indexAbstraction), is(false)); assertThat(kibanaRole.indices().allowedIndicesMatcher(AutoCreateAction.NAME).test(indexAbstraction), is(isAlsoAutoCreateIndex)); assertThat(kibanaRole.indices().allowedIndicesMatcher(CreateDataStreamAction.NAME).test(indexAbstraction), is(false)); @@ -2144,13 +2145,13 @@ public void testRemoteMonitoringCollectorRole() { ); assertThat( remoteMonitoringCollectorRole.indices() - .allowedIndicesMatcher(IndicesShardStoresAction.NAME) + .allowedIndicesMatcher(TransportIndicesShardStoresAction.TYPE.name()) .test(mockIndexAbstraction(randomFrom(TestRestrictedIndices.SAMPLE_RESTRICTED_NAMES))), is(true) ); assertThat( remoteMonitoringCollectorRole.indices() - .allowedIndicesMatcher(IndicesShardStoresAction.NAME) + .allowedIndicesMatcher(TransportIndicesShardStoresAction.TYPE.name()) .test(mockIndexAbstraction(XPackPlugin.ASYNC_RESULTS_INDEX + randomAlphaOfLengthBetween(0, 2))), is(true) ); @@ -2265,7 +2266,7 @@ private void assertMonitoringOnRestrictedIndices(Role role) { IndicesStatsAction.NAME, IndicesSegmentsAction.NAME, GetSettingsAction.NAME, - IndicesShardStoresAction.NAME, + TransportIndicesShardStoresAction.TYPE.name(), RecoveryAction.NAME ); for (final String indexMonitoringActionName : indexMonitoringActionNamesList) { diff --git a/x-pack/plugin/core/template-resources/src/main/resources/fleet-agents.json b/x-pack/plugin/core/template-resources/src/main/resources/fleet-agents.json index 5de8ce4bef402..5facc229bf503 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/fleet-agents.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/fleet-agents.json @@ -270,6 +270,9 @@ "download_percent": { "type": "double" }, + "download_rate": { + "type": "double" + }, "failed_state": { "type": "keyword" }, @@ -281,6 +284,18 @@ "ignore_above": 1024 } } + }, + "retry_error_msg": { + "type": "text", + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 1024 + } + } + }, + "retry_until": { + "type": "date" } } } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-events.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-events.json index c809d920bdf23..d8a27813734e4 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-events.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-events.json @@ -26,7 +26,8 @@ }, "_meta": { "index-template-version": ${xpack.profiling.template.version}, - "index-version": ${xpack.profiling.index.events.version} + "index-version": ${xpack.profiling.index.events.version}, + "managed": true }, "dynamic": false, "properties": { diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-executables.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-executables.json index ef5fc7159d090..0f1c24d96c092 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-executables.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-executables.json @@ -15,7 +15,8 @@ }, "_meta": { "index-template-version": ${xpack.profiling.template.version}, - "index-version": ${xpack.profiling.index.executables.version} + "index-version": ${xpack.profiling.index.executables.version}, + "managed": true }, "dynamic": false, "properties": { diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-hosts.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-hosts.json index 752b57d933f19..f452682c620c4 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-hosts.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-hosts.json @@ -21,7 +21,8 @@ }, "_meta": { "index-template-version": ${xpack.profiling.template.version}, - "index-version": ${xpack.profiling.index.hosts.version} + "index-version": ${xpack.profiling.index.hosts.version}, + "managed": true }, "dynamic": false, "properties": { diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-hot-tier.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-hot-tier.json index fcd6667773ea3..68f0fd09f18d3 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-hot-tier.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-hot-tier.json @@ -6,5 +6,9 @@ } } }, + "_meta": { + "index-template-version": ${xpack.profiling.template.version}, + "managed": true + }, "version": ${xpack.profiling.template.version} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-ilm.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-ilm.json index bf3dd67893564..3847e1775442a 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-ilm.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-ilm.json @@ -8,5 +8,9 @@ } } }, + "_meta": { + "index-template-version": ${xpack.profiling.template.version}, + "managed": true + }, "version": ${xpack.profiling.template.version} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-metrics.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-metrics.json index 6bc69c8ddc5f2..ac4a6def2a70b 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-metrics.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-metrics.json @@ -21,7 +21,8 @@ }, "_meta": { "index-template-version": ${xpack.profiling.template.version}, - "index-version": ${xpack.profiling.index.metrics.version} + "index-version": ${xpack.profiling.index.metrics.version}, + "managed": true }, /* We intentionally allow dynamic mappings for metrics. Which metrics are added is guarded by diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-stackframes.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-stackframes.json index 84c31d11589a4..c28a548f95418 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-stackframes.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-stackframes.json @@ -23,7 +23,8 @@ }, "_meta": { "index-template-version": ${xpack.profiling.template.version}, - "index-version": ${xpack.profiling.index.stackframes.version} + "index-version": ${xpack.profiling.index.stackframes.version}, + "managed": true }, "dynamic": false, "properties": { diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-stacktraces.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-stacktraces.json index 7999ad1b8f062..470edd710136d 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-stacktraces.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-stacktraces.json @@ -21,7 +21,8 @@ }, "_meta": { "index-template-version": ${xpack.profiling.template.version}, - "index-version": ${xpack.profiling.index.stacktraces.version} + "index-version": ${xpack.profiling.index.stacktraces.version}, + "managed": true }, "dynamic": false, "properties": { diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-symbols.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-symbols.json index b8c9f9db93db8..48b88492a777d 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-symbols.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/component-template/profiling-symbols.json @@ -19,7 +19,8 @@ }, "_meta": { "index-template-version": ${xpack.profiling.template.version}, - "index-version": ${xpack.profiling.index.symbols.version} + "index-version": ${xpack.profiling.index.symbols.version}, + "managed": true }, "dynamic": false, "properties": { diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-events.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-events.json index 795e2d318ddb9..e2d17c8327704 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-events.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-events.json @@ -13,7 +13,8 @@ "ignore_missing_component_templates": ["profiling-ilm@custom"], "priority": 100, "_meta": { - "description": "Index template for profiling-events" + "description": "Index template for profiling-events", + "managed": true }, "version": ${xpack.profiling.template.version} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-executables.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-executables.json index 41df385022fdf..57fd114c57e27 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-executables.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-executables.json @@ -11,7 +11,8 @@ "ignore_missing_component_templates": ["profiling-ilm@custom"], "priority": 100, "_meta": { - "description": "Index template for .profiling-executables" + "description": "Index template for .profiling-executables", + "managed": true }, "version": ${xpack.profiling.template.version} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-hosts.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-hosts.json index 6189c86bb2999..526d8090b0ac6 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-hosts.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-hosts.json @@ -11,7 +11,8 @@ "ignore_missing_component_templates": ["profiling-ilm@custom"], "priority": 100, "_meta": { - "description": "Template for profiling-hosts" + "description": "Index template for profiling-hosts", + "managed": true }, "version": ${xpack.profiling.template.version} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-metrics.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-metrics.json index b9154bfa54334..d09de006d025d 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-metrics.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-metrics.json @@ -11,7 +11,8 @@ "ignore_missing_component_templates": ["profiling-ilm@custom"], "priority": 100, "_meta": { - "description": "Template for profiling-metrics" + "description": "Index template for profiling-metrics", + "managed": true }, "version": ${xpack.profiling.template.version} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-returnpads-private.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-returnpads-private.json index 7fbb2f8d903a5..72d8cf6e1dfc2 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-returnpads-private.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-returnpads-private.json @@ -21,7 +21,9 @@ }, "_meta": { "index-template-version": ${xpack.profiling.template.version}, - "index-version": ${xpack.profiling.index.returnpads.private.version} + "index-version": ${xpack.profiling.index.returnpads.private.version}, + "description": "Index template for .profiling-returnpads-private", + "managed": true }, "dynamic": false, "properties": { diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-sq-executables.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-sq-executables.json index f164af84e07ae..6f32af12c84bf 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-sq-executables.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-sq-executables.json @@ -17,7 +17,9 @@ }, "_meta": { "index-template-version": ${xpack.profiling.template.version}, - "index-version": ${xpack.profiling.index.sq.executables.version} + "index-version": ${xpack.profiling.index.sq.executables.version}, + "description": "Index template for .profiling-sq-executables", + "managed": true }, "dynamic": false, "properties": { diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-sq-leafframes.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-sq-leafframes.json index 78a3296093c52..d3c5b0af215e6 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-sq-leafframes.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-sq-leafframes.json @@ -17,7 +17,9 @@ }, "_meta": { "index-template-version": ${xpack.profiling.template.version}, - "index-version": ${xpack.profiling.index.sq.leafframes.version} + "index-version": ${xpack.profiling.index.sq.leafframes.version}, + "description": "Index template for .profiling-sq-leafframes", + "managed": true }, "dynamic": false, "properties": { diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-stackframes.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-stackframes.json index eed570de0a608..694ae6ba92a57 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-stackframes.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-stackframes.json @@ -11,7 +11,8 @@ "ignore_missing_component_templates": ["profiling-ilm@custom"], "priority": 100, "_meta": { - "description": "Index template for .profiling-stackframes" + "description": "Index template for .profiling-stackframes", + "managed": true }, "version": ${xpack.profiling.template.version} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-stacktraces.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-stacktraces.json index 3797b87b17f10..c4c920a76c375 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-stacktraces.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-stacktraces.json @@ -11,7 +11,8 @@ "ignore_missing_component_templates": ["profiling-ilm@custom"], "priority": 100, "_meta": { - "description": "Index template for .profiling-stacktraces" + "description": "Index template for .profiling-stacktraces", + "managed": true }, "version": ${xpack.profiling.template.version} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-symbols-global.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-symbols-global.json index cd6c597f19689..a7bae1adbb548 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-symbols-global.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-symbols-global.json @@ -18,7 +18,8 @@ }, "priority": 100, "_meta": { - "description": "Index template for .profiling-symbols-global" + "description": "Index template for .profiling-symbols-global", + "managed": true }, "version": ${xpack.profiling.template.version} } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-symbols-private.json b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-symbols-private.json index 58a3b7196ac61..999bf7721b897 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-symbols-private.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/profiling/index-template/profiling-symbols-private.json @@ -7,7 +7,8 @@ ], "priority": 100, "_meta": { - "description": "Index template for .profiling-symbols-private" + "description": "Index template for .profiling-symbols-private", + "managed": true }, "version": ${xpack.profiling.template.version} } diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/logging/DeprecationIndexingTemplateRegistry.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/logging/DeprecationIndexingTemplateRegistry.java index 6b4882bae9fd8..065053f117de0 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/logging/DeprecationIndexingTemplateRegistry.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/logging/DeprecationIndexingTemplateRegistry.java @@ -70,11 +70,8 @@ public DeprecationIndexingTemplateRegistry( DEPRECATION_INDEXING_TEMPLATE_VERSION_VARIABLE ) )) { - try { - componentTemplates.put( - config.getTemplateName(), - ComponentTemplate.parse(JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, config.loadBytes())) - ); + try (var parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, config.loadBytes())) { + componentTemplates.put(config.getTemplateName(), ComponentTemplate.parse(parser)); } catch (IOException e) { throw new AssertionError(e); } diff --git a/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/downsample/10_basic.yml b/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/downsample/10_basic.yml index 70f66f38d39b9..0eb93c59c5b1d 100644 --- a/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/downsample/10_basic.yml +++ b/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/downsample/10_basic.yml @@ -46,9 +46,17 @@ setup: multi-counter: type: long time_series_metric: counter + scaled-counter: + type: scaled_float + scaling_factor: 100 + time_series_metric: counter multi-gauge: type: integer time_series_metric: gauge + scaled-gauge: + type: scaled_float + scaling_factor: 100 + time_series_metric: gauge network: properties: tx: @@ -63,21 +71,21 @@ setup: index: test body: - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:50:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "multi-counter" : [10, 11, 12], "multi-gauge": [100, 200, 150], "network": {"tx": 2001818691, "rx": 802133794}, "created_at": "2021-04-28T19:34:00.000Z", "running": false, "number_of_containers": 2, "tags": ["backend", "prod"], "values": [2, 3, 6]}}}' + - '{"@timestamp": "2021-04-28T18:50:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.1", "multi-counter" : [10, 11, 12], "scaled-counter": 10.0, "multi-gauge": [100, 200, 150], "scaled-gauge": 100.0, "network": {"tx": 2001818691, "rx": 802133794}, "created_at": "2021-04-28T19:34:00.000Z", "running": false, "number_of_containers": 2, "tags": ["backend", "prod"], "values": [2, 3, 6]}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:50:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.26", "multi-counter" : [21, 22, 23], "multi-gauge": [90, 91, 95], "network": {"tx": 2005177954, "rx": 801479970}, "created_at": "2021-04-28T19:35:00.000Z", "running": true, "number_of_containers": 2, "tags": ["backend", "prod", "us-west1"], "values": [1, 1, 3]}}}' + - '{"@timestamp": "2021-04-28T18:50:24.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.26", "multi-counter" : [21, 22, 23], "scaled-counter": 20.0, "multi-gauge": [90, 91, 95], "scaled-gauge": 90.0, "network": {"tx": 2005177954, "rx": 801479970}, "created_at": "2021-04-28T19:35:00.000Z", "running": true, "number_of_containers": 2, "tags": ["backend", "prod", "us-west1"], "values": [1, 1, 3]}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T20:50:44.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.41", "multi-counter" : [1, 5, 10], "multi-gauge": [103, 110, 109], "network": {"tx": 2006223737, "rx": 802337279}, "created_at": "2021-04-28T19:36:00.000Z", "running": true, "number_of_containers": 2, "tags": ["backend", "prod", "us-west2"], "values": [4, 1, 2]}}}' + - '{"@timestamp": "2021-04-28T20:50:44.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.41", "multi-counter" : [1, 5, 10], "scaled-counter": 1.0, "multi-gauge": [103, 110, 109], "scaled-gauge": 104.0, "network": {"tx": 2006223737, "rx": 802337279}, "created_at": "2021-04-28T19:36:00.000Z", "running": true, "number_of_containers": 2, "tags": ["backend", "prod", "us-west2"], "values": [4, 1, 2]}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T20:51:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.22", "multi-counter" : [101, 102, 105], "multi-gauge": [100, 100, 100], "network": {"tx": 2012916202, "rx": 803685721}, "created_at": "2021-04-28T19:37:00.000Z", "running": true, "number_of_containers": 2, "tags": ["backend", "prod"], "values": [2, 3, 1]}}}' + - '{"@timestamp": "2021-04-28T20:51:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "ip": "10.10.55.22", "multi-counter" : [101, 102, 105], "scaled-counter": 100.0, "multi-gauge": [100, 100, 100], "scaled-gauge": 102.0, "network": {"tx": 2012916202, "rx": 803685721}, "created_at": "2021-04-28T19:37:00.000Z", "running": true, "number_of_containers": 2, "tags": ["backend", "prod"], "values": [2, 3, 1]}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:50:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.33", "multi-counter" : [7, 11, 44], "multi-gauge": [100, 100, 102], "network": {"tx": 1434521831, "rx": 530575198}, "created_at": "2021-04-28T19:42:00.000Z", "running": false, "number_of_containers": 1, "tags": ["backend", "test"], "values": [2, 3, 4]}}}' + - '{"@timestamp": "2021-04-28T18:50:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.33", "multi-counter" : [7, 11, 44], "scaled-counter": 7.0, "multi-gauge": [100, 100, 102], "scaled-gauge": 100.0, "network": {"tx": 1434521831, "rx": 530575198}, "created_at": "2021-04-28T19:42:00.000Z", "running": false, "number_of_containers": 1, "tags": ["backend", "test"], "values": [2, 3, 4]}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T18:50:23.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.56", "multi-counter" : [0, 0, 1], "multi-gauge": [101, 102, 102], "network": {"tx": 1434577921, "rx": 530600088}, "created_at": "2021-04-28T19:43:00.000Z", "running": false, "number_of_containers": 1, "tags": ["backend", "test", "us-west2"], "values": [2, 1, 1]}}}' + - '{"@timestamp": "2021-04-28T18:50:23.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.56", "multi-counter" : [0, 0, 1], "scaled-counter": 0.0, "multi-gauge": [101, 102, 102], "scaled-gauge": 101.0, "network": {"tx": 1434577921, "rx": 530600088}, "created_at": "2021-04-28T19:43:00.000Z", "running": false, "number_of_containers": 1, "tags": ["backend", "test", "us-west2"], "values": [2, 1, 1]}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T19:50:53.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.37", "multi-counter" : [1000, 1001, 1002], "multi-gauge": [99, 100, 110], "network": {"tx": 1434587694, "rx": 530604797}, "created_at": "2021-04-28T19:44:00.000Z", "running": true, "number_of_containers": 1, "tags": ["backend", "test", "us-west1"], "values": [4, 5, 2]}}}' + - '{"@timestamp": "2021-04-28T19:50:53.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.37", "multi-counter" : [1000, 1001, 1002], "scaled-counter": 1000.0, "multi-gauge": [99, 100, 110], "scaled-gauge": 99.0, "network": {"tx": 1434587694, "rx": 530604797}, "created_at": "2021-04-28T19:44:00.000Z", "running": true, "number_of_containers": 1, "tags": ["backend", "test", "us-west1"], "values": [4, 5, 2]}}}' - '{"index": {}}' - - '{"@timestamp": "2021-04-28T19:51:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.120", "multi-counter" : [76, 77, 78], "multi-gauge": [95, 98, 100], "network": {"tx": 1434595272, "rx": 530605511}, "created_at": "2021-04-28T19:45:00.000Z", "running": true, "number_of_containers": 1, "tags": ["backend", "test", "us-west1"], "values": [3, 2, 1]}}}' + - '{"@timestamp": "2021-04-28T19:51:03.142Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "ip": "10.10.55.120", "multi-counter" : [76, 77, 78], "scaled-counter": 70.0, "multi-gauge": [95, 98, 100], "scaled-gauge": 95.0, "network": {"tx": 1434595272, "rx": 530605511}, "created_at": "2021-04-28T19:45:00.000Z", "running": true, "number_of_containers": 1, "tags": ["backend", "test", "us-west1"], "values": [3, 2, 1]}}}' - do: indices.put_settings: @@ -314,10 +322,15 @@ setup: - match: { hits.hits.0._source.metricset: pod } - match: { hits.hits.0._source.@timestamp: 2021-04-28T18:00:00.000Z } - match: { hits.hits.0._source.k8s.pod.multi-counter: 21 } + - match: { hits.hits.0._source.k8s.pod.scaled-counter: 20.0 } - match: { hits.hits.0._source.k8s.pod.multi-gauge.min: 90 } - match: { hits.hits.0._source.k8s.pod.multi-gauge.max: 200 } - match: { hits.hits.0._source.k8s.pod.multi-gauge.sum: 726 } - match: { hits.hits.0._source.k8s.pod.multi-gauge.value_count: 6 } + - match: { hits.hits.0._source.k8s.pod.scaled-gauge.min: 90.0 } + - match: { hits.hits.0._source.k8s.pod.scaled-gauge.max: 100.0 } + - match: { hits.hits.0._source.k8s.pod.scaled-gauge.sum: 190.0 } + - match: { hits.hits.0._source.k8s.pod.scaled-gauge.value_count: 2 } - match: { hits.hits.0._source.k8s.pod.network.tx.min: 2001818691 } - match: { hits.hits.0._source.k8s.pod.network.tx.max: 2005177954 } - match: { hits.hits.0._source.k8s.pod.network.tx.value_count: 2 } @@ -354,6 +367,13 @@ setup: - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.multi-gauge.time_series_metric: gauge } - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.multi-counter.type: long } - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.multi-counter.time_series_metric: counter } + - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.scaled-counter.type: scaled_float } + - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.scaled-counter.scaling_factor: 100 } + - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.scaled-counter.time_series_metric: counter } + - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.scaled-gauge.type: aggregate_metric_double } + - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.scaled-gauge.metrics: [ "min", "max", "sum", "value_count" ] } + - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.scaled-gauge.default_metric: max } + - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.scaled-gauge.time_series_metric: gauge } - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.uid.type: keyword } - match: { test-downsample.mappings.properties.k8s.properties.pod.properties.uid.time_series_dimension: true } @@ -387,6 +407,38 @@ setup: "fixed_interval": "1h" } +--- +"Downsample failure": + - skip: + version: " - 8.12.99" + reason: "#103615 merged to 8.13.0 and later" + features: allowed_warnings + + - do: + allowed_warnings: + - "index template [my-template1] has index patterns [failed-downsample-test] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-template1] will take precedence during new index creation" + indices.put_index_template: + name: my-template1 + body: + index_patterns: [failed-downsample-test] + template: + settings: + index: + routing: + allocation: + include: + does-not-exist: "yes" + + - do: + catch: /downsample task \[downsample-failed-downsample-test-0-1h\] failed/ + indices.downsample: + index: test + target_index: failed-downsample-test + body: > + { + "fixed_interval": "1h" + } + --- "Downsample to existing index": - skip: diff --git a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DownsampleClusterDisruptionIT.java b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DownsampleClusterDisruptionIT.java index 84ffefb067eed..e39cdfd2a651b 100644 --- a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DownsampleClusterDisruptionIT.java +++ b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DownsampleClusterDisruptionIT.java @@ -430,7 +430,7 @@ private void downsample(final String sourceIndex, final String downsampleIndex, assertAcked( internalCluster().client() .execute(DownsampleAction.INSTANCE, new DownsampleAction.Request(sourceIndex, downsampleIndex, TIMEOUT, config)) - .actionGet(TIMEOUT.millis()) + .actionGet(TIMEOUT) ); } diff --git a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardPersistentTaskExecutor.java b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardPersistentTaskExecutor.java index 06e69ab4702c1..ebf31bd32b48f 100644 --- a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardPersistentTaskExecutor.java +++ b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardPersistentTaskExecutor.java @@ -159,12 +159,13 @@ private void delegate( final DownsampleShardTaskParams params, final SearchHit[] lastDownsampledTsidHits ) { + DownsampleShardTask downsampleShardTask = (DownsampleShardTask) task; client.execute( DelegatingAction.INSTANCE, - new DelegatingAction.Request((DownsampleShardTask) task, lastDownsampledTsidHits, params), + new DelegatingAction.Request(downsampleShardTask, lastDownsampledTsidHits, params), ActionListener.wrap(empty -> {}, e -> { LOGGER.error("error while delegating", e); - markAsFailed(task, e); + markAsFailed(downsampleShardTask, e); }) ); } @@ -222,7 +223,8 @@ protected void doRun() throws Exception { }); } - private static void markAsFailed(AllocatedPersistentTask task, Exception e) { + private static void markAsFailed(DownsampleShardTask task, Exception e) { + task.setDownsampleShardIndexerStatus(DownsampleShardIndexerStatus.FAILED); task.updatePersistentTaskState( new DownsampleShardPersistentTaskState(DownsampleShardIndexerStatus.FAILED, null), ActionListener.running(() -> task.markAsFailed(e)) diff --git a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardTaskParams.java b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardTaskParams.java index 6a4ee88a0cdef..34b7d3c90b267 100644 --- a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardTaskParams.java +++ b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardTaskParams.java @@ -91,7 +91,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_8_500_054; + return TransportVersions.V_8_500_061; } @Override diff --git a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/RestDownsampleAction.java b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/RestDownsampleAction.java index 76f19388e7ee7..8324265c3a786 100644 --- a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/RestDownsampleAction.java +++ b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/RestDownsampleAction.java @@ -35,7 +35,10 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient String sourceIndex = restRequest.param("index"); String targetIndex = restRequest.param("target_index"); String timeout = restRequest.param("timeout"); - DownsampleConfig config = DownsampleConfig.fromXContent(restRequest.contentParser()); + DownsampleConfig config; + try (var parser = restRequest.contentParser()) { + config = DownsampleConfig.fromXContent(parser); + } DownsampleAction.Request request = new DownsampleAction.Request( sourceIndex, targetIndex, diff --git a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java index 4dc5195f8345a..e7bd2f0c0fb27 100644 --- a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java +++ b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java @@ -400,6 +400,19 @@ private void performShardDownsampling( @Override public void onResponse(PersistentTasksCustomMetadata.PersistentTask persistentTask) { + if (persistentTask != null) { + var runningPersistentTaskState = (DownsampleShardPersistentTaskState) persistentTask.getState(); + if (runningPersistentTaskState != null) { + if (runningPersistentTaskState.failed()) { + onFailure(new ElasticsearchException("downsample task [" + persistentTaskId + "] failed")); + return; + } else if (runningPersistentTaskState.cancelled()) { + onFailure(new ElasticsearchException("downsample task [" + persistentTaskId + "] cancelled")); + return; + } + } + } + logger.info("Downsampling task [" + persistentTaskId + " completed for shard " + params.shardId()); if (countDown.decrementAndGet() == 0) { logger.info("All downsampling tasks completed [" + numberOfShards + "]"); @@ -598,21 +611,23 @@ private static void addMetricFieldMapping(final XContentBuilder builder, final S final TimeSeriesParams.MetricType metricType = TimeSeriesParams.MetricType.fromString( fieldProperties.get(TIME_SERIES_METRIC_PARAM).toString() ); + builder.startObject(field); if (metricType == TimeSeriesParams.MetricType.COUNTER) { // For counters, we keep the same field type, because they store // only one value (the last value of the counter) - builder.startObject(field).field("type", fieldProperties.get("type")).field(TIME_SERIES_METRIC_PARAM, metricType).endObject(); + for (String fieldProperty : fieldProperties.keySet()) { + builder.field(fieldProperty, fieldProperties.get(fieldProperty)); + } } else { final String[] supportedAggsArray = metricType.supportedAggs(); // We choose max as the default metric final String defaultMetric = List.of(supportedAggsArray).contains("max") ? "max" : supportedAggsArray[0]; - builder.startObject(field) - .field("type", AggregateDoubleMetricFieldMapper.CONTENT_TYPE) + builder.field("type", AggregateDoubleMetricFieldMapper.CONTENT_TYPE) .array(AggregateDoubleMetricFieldMapper.Names.METRICS, supportedAggsArray) .field(AggregateDoubleMetricFieldMapper.Names.DEFAULT_METRIC, defaultMetric) - .field(TIME_SERIES_METRIC_PARAM, metricType) - .endObject(); + .field(TIME_SERIES_METRIC_PARAM, metricType); } + builder.endObject(); } private static void validateDownsamplingInterval(MapperService mapperService, DownsampleConfig config) { diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/EnrichShardMultiSearchAction.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/EnrichShardMultiSearchAction.java index df8ea5344708d..94e9033dcca4f 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/EnrichShardMultiSearchAction.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/EnrichShardMultiSearchAction.java @@ -55,7 +55,6 @@ import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xcontent.DeprecationHandler; @@ -305,7 +304,13 @@ private static BytesReference filterSource(FetchSourceContext fetchSourceContext private static SearchResponse createSearchResponse(TopDocs topDocs, SearchHit[] hits) { SearchHits searchHits = new SearchHits(hits, topDocs.totalHits, 0); return new SearchResponse( - new InternalSearchResponse(searchHits, null, null, null, false, null, 0), + searchHits, + null, + null, + false, + null, + null, + 0, null, 1, 1, diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/BasicEnrichTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/BasicEnrichTests.java index 84b96c31491cb..f5da8a9f37fd0 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/BasicEnrichTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/BasicEnrichTests.java @@ -414,7 +414,7 @@ public void testFailureAfterEnrich() throws Exception { IndexRequest indexRequest = new IndexRequest("my-index").id("1") .setPipeline(pipelineName) .source(Map.of(MATCH_FIELD, "non_existing")); - Exception e = expectThrows(IllegalArgumentException.class, () -> client().index(indexRequest).actionGet()); + Exception e = expectThrows(IllegalArgumentException.class, () -> client().index(indexRequest)); indexRequest.decRef(); assertThat(e.getMessage(), equalTo("field [users] not present as part of path [users]")); } diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyUpdateTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyUpdateTests.java index 73c68b92ff30c..1e4426661e06c 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyUpdateTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyUpdateTests.java @@ -74,7 +74,7 @@ public void testUpdatePolicyOnly() { createSourceIndices(client(), instance2); ResourceAlreadyExistsException exc = expectThrows( ResourceAlreadyExistsException.class, - () -> client().execute(PutEnrichPolicyAction.INSTANCE, new PutEnrichPolicyAction.Request("my_policy", instance2)).actionGet() + client().execute(PutEnrichPolicyAction.INSTANCE, new PutEnrichPolicyAction.Request("my_policy", instance2)) ); assertTrue(exc.getMessage().contains("policy [my_policy] already exists")); } diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichProcessorFactoryTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichProcessorFactoryTests.java index a26cab231f52c..5fa8659b609b1 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichProcessorFactoryTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichProcessorFactoryTests.java @@ -28,7 +28,6 @@ import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.profile.SearchProfileResults; import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.test.ESTestCase; @@ -257,26 +256,26 @@ protected void ActionListener listener ) { assert EnrichCoordinatorProxyAction.NAME.equals(action.name()); - var emptyResponse = new SearchResponse( - new InternalSearchResponse( + requestCounter[0]++; + ActionListener.respondAndRelease( + listener, + (Response) new SearchResponse( new SearchHits(new SearchHit[0], new TotalHits(0L, TotalHits.Relation.EQUAL_TO), 0.0f), InternalAggregations.EMPTY, new Suggest(Collections.emptyList()), - new SearchProfileResults(Collections.emptyMap()), false, false, - 1 - ), - "", - 1, - 1, - 0, - 0, - ShardSearchFailure.EMPTY_ARRAY, - SearchResponse.Clusters.EMPTY + new SearchProfileResults(Collections.emptyMap()), + 1, + "", + 1, + 1, + 0, + 0, + ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY + ) ); - requestCounter[0]++; - listener.onResponse((Response) emptyResponse); } }; EnrichProcessorFactory factory = new EnrichProcessorFactory(client, scriptService, enrichCache); diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichResiliencyTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichResiliencyTests.java index b3fe07a24299f..c9d644214ef25 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichResiliencyTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichResiliencyTests.java @@ -34,6 +34,7 @@ import java.util.List; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -156,7 +157,7 @@ public void testWriteThreadLivenessBackToBack() throws Exception { assertThat(firstFailure.getMessage(), containsString("Could not perform enrichment, enrich coordination queue at capacity")); client().admin().indices().refresh(new RefreshRequest(enrichedIndexName)).actionGet(); - assertEquals(successfulItems, client().search(new SearchRequest(enrichedIndexName)).actionGet().getHits().getTotalHits().value); + assertHitCount(client().search(new SearchRequest(enrichedIndexName)), successfulItems); } } @@ -284,7 +285,7 @@ public void testWriteThreadLivenessWithPipeline() throws Exception { assertThat(firstFailure.getMessage(), containsString("Could not perform enrichment, enrich coordination queue at capacity")); client().admin().indices().refresh(new RefreshRequest(enrichedIndexName)).actionGet(); - assertEquals(successfulItems, client().search(new SearchRequest(enrichedIndexName)).actionGet().getHits().getTotalHits().value); + assertHitCount(client().search(new SearchRequest(enrichedIndexName)), successfulItems); } } } diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/CoordinatorTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/CoordinatorTests.java index 079af561e00c9..8f23dde1d939f 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/CoordinatorTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/CoordinatorTests.java @@ -25,7 +25,6 @@ import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -373,16 +372,22 @@ public void testReduce() { } private static SearchResponse emptySearchResponse() { - InternalSearchResponse response = new InternalSearchResponse( + return new SearchResponse( new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), Float.NaN), InternalAggregations.EMPTY, null, - null, false, null, - 1 + null, + 1, + null, + 1, + 1, + 0, + 100, + ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY ); - return new SearchResponse(response, null, 1, 1, 0, 100, ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY); } private class MockLookupFunction implements BiConsumer> { diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/EnrichShardMultiSearchActionTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/EnrichShardMultiSearchActionTests.java index e9080fbaacfb3..7a2eb3e3b4c0a 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/EnrichShardMultiSearchActionTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/EnrichShardMultiSearchActionTests.java @@ -83,7 +83,7 @@ public void testNonEnrichIndex() throws Exception { request.add(new SearchRequest("index")); Exception e = expectThrows( ActionRequestValidationException.class, - () -> client().execute(EnrichShardMultiSearchAction.INSTANCE, new EnrichShardMultiSearchAction.Request(request)).actionGet() + client().execute(EnrichShardMultiSearchAction.INSTANCE, new EnrichShardMultiSearchAction.Request(request)) ); assertThat(e.getMessage(), equalTo("Validation Failed: 1: index [index] is not an enrich index;")); } @@ -95,7 +95,7 @@ public void testMultipleShards() throws Exception { request.add(new SearchRequest(indexName)); Exception e = expectThrows( IllegalStateException.class, - () -> client().execute(EnrichShardMultiSearchAction.INSTANCE, new EnrichShardMultiSearchAction.Request(request)).actionGet() + client().execute(EnrichShardMultiSearchAction.INSTANCE, new EnrichShardMultiSearchAction.Request(request)) ); assertThat(e.getMessage(), equalTo("index [.enrich-1] should have 1 shard, but has 2 shards")); } diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/TransportDeleteEnrichPolicyActionTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/TransportDeleteEnrichPolicyActionTests.java index d34fcf48aa68d..84700308662b9 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/TransportDeleteEnrichPolicyActionTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/action/TransportDeleteEnrichPolicyActionTests.java @@ -154,9 +154,7 @@ public void onFailure(final Exception e) { expectThrows( IndexNotFoundException.class, - () -> indicesAdmin().prepareGetIndex() - .setIndices(EnrichPolicy.getIndexName(name, 1001), EnrichPolicy.getIndexName(name, 1001)) - .get() + indicesAdmin().prepareGetIndex().setIndices(EnrichPolicy.getIndexName(name, 1001), EnrichPolicy.getIndexName(name, 1001)) ); if (destructiveRequiresName) { diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/400_connector_sync_job_post.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/400_connector_sync_job_post.yml index 0403842cb0728..582a523605663 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/400_connector_sync_job_post.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/400_connector_sync_job_post.yml @@ -41,8 +41,68 @@ setup: - exists: created_at - exists: last_seen +--- +'Create connector sync job with complex connector document': + + - do: + connector.update_pipeline: + connector_id: test-connector + body: + pipeline: + extract_binary_content: true + name: test-pipeline + reduce_whitespace: true + run_ml_inference: false + + - match: { result: updated } + + - do: + connector.update_configuration: + connector_id: test-connector + body: + configuration: + some_field: + default_value: null + depends_on: + - field: some_field + value: 31 + display: numeric + label: Very important field + options: [ ] + order: 4 + required: true + sensitive: false + tooltip: Wow, this tooltip is useful. + type: str + ui_restrictions: [ ] + validations: + - constraint: 0 + type: greater_than + value: 456 + + - match: { result: updated } + + - do: + connector_sync_job.post: + body: + id: test-connector + job_type: full + trigger_method: on_demand + + - set: { id: id } + + - match: { id: $id } + + - do: + connector_sync_job.get: + connector_sync_job_id: $id + + - match: { connector.id: test-connector } + - match: { connector.configuration.some_field.value: 456 } + - match: { connector.pipeline.name: test-pipeline } --- + 'Create connector sync job with missing job type - expect job type full as default': - do: connector_sync_job.post: diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/AnalyticsTemplateRegistry.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/AnalyticsTemplateRegistry.java index a1446606a21af..d9f433b8052bf 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/AnalyticsTemplateRegistry.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/AnalyticsTemplateRegistry.java @@ -65,11 +65,8 @@ public class AnalyticsTemplateRegistry extends IndexTemplateRegistry { TEMPLATE_VERSION_VARIABLE ) )) { - try { - componentTemplates.put( - config.getTemplateName(), - ComponentTemplate.parse(JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, config.loadBytes())) - ); + try (var parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, config.loadBytes())) { + componentTemplates.put(config.getTemplateName(), ComponentTemplate.parse(parser)); } catch (IOException e) { throw new AssertionError(e); } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistry.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistry.java index 642295061d17a..c57650541b416 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistry.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistry.java @@ -98,11 +98,8 @@ public class ConnectorTemplateRegistry extends IndexTemplateRegistry { ) )) { - try { - componentTemplates.put( - config.getTemplateName(), - ComponentTemplate.parse(JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, config.loadBytes())) - ); + try (var parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, config.loadBytes())) { + componentTemplates.put(config.getTemplateName(), ComponentTemplate.parse(parser)); } catch (IOException e) { throw new AssertionError(e); } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJob.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJob.java index d623d8dab3834..84d91b7fe0f08 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJob.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJob.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.application.connector.syncjob; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -282,7 +283,7 @@ public ConnectorSyncJob(StreamInput in) throws IOException { ); PARSER.declareField( constructorArg(), - (p, c) -> ConnectorSyncJob.syncJobConnectorFromXContent(p), + (p, c) -> ConnectorSyncJob.syncJobConnectorFromXContent(p, null), CONNECTOR_FIELD, ObjectParser.ValueType.OBJECT ); @@ -327,12 +328,21 @@ private static Instant parseNullableInstant(XContentParser p) throws IOException } @SuppressWarnings("unchecked") - private static final ConstructingObjectParser SYNC_JOB_CONNECTOR_PARSER = new ConstructingObjectParser<>( + private static final ConstructingObjectParser SYNC_JOB_CONNECTOR_PARSER = new ConstructingObjectParser<>( "sync_job_connector", true, - (args) -> { + (args, connectorId) -> { int i = 0; - return new Connector.Builder().setConnectorId((String) args[i++]) + + // Parse the connector ID from the arguments. The ID uniquely identifies the connector. + String parsedConnectorId = (String) args[i++]; + + // Determine the actual connector ID to use. If the context parameter `connectorId` is not null or empty, + // it takes precedence over the `parsedConnectorId` extracted from the arguments. + // This approach allows for flexibility in specifying the connector ID, either from a context or as a parsed argument. + String syncJobConnectorId = Strings.isNullOrEmpty(connectorId) ? parsedConnectorId : connectorId; + + return new Connector.Builder().setConnectorId(syncJobConnectorId) .setFiltering((List) args[i++]) .setIndexName((String) args[i++]) .setLanguage((String) args[i++]) @@ -344,7 +354,7 @@ private static Instant parseNullableInstant(XContentParser p) throws IOException ); static { - SYNC_JOB_CONNECTOR_PARSER.declareString(constructorArg(), Connector.ID_FIELD); + SYNC_JOB_CONNECTOR_PARSER.declareString(optionalConstructorArg(), Connector.ID_FIELD); SYNC_JOB_CONNECTOR_PARSER.declareObjectArray( optionalConstructorArg(), (p, c) -> ConnectorFiltering.fromXContent(p), @@ -378,16 +388,16 @@ public static ConnectorSyncJob fromXContent(XContentParser parser) throws IOExce return PARSER.parse(parser, null); } - public static Connector syncJobConnectorFromXContentBytes(BytesReference source, XContentType xContentType) { + public static Connector syncJobConnectorFromXContentBytes(BytesReference source, String connectorId, XContentType xContentType) { try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, source, xContentType)) { - return ConnectorSyncJob.syncJobConnectorFromXContent(parser); + return ConnectorSyncJob.syncJobConnectorFromXContent(parser, connectorId); } catch (IOException e) { throw new ElasticsearchParseException("Failed to parse a connector document.", e); } } - public static Connector syncJobConnectorFromXContent(XContentParser parser) throws IOException { - return SYNC_JOB_CONNECTOR_PARSER.parse(parser, null); + public static Connector syncJobConnectorFromXContent(XContentParser parser, String connectorId) throws IOException { + return SYNC_JOB_CONNECTOR_PARSER.parse(parser, connectorId); } public String getId() { diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexService.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexService.java index aac32dc5b6022..34567788e9331 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexService.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobIndexService.java @@ -38,10 +38,7 @@ import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.application.connector.Connector; -import org.elasticsearch.xpack.application.connector.ConnectorConfiguration; -import org.elasticsearch.xpack.application.connector.ConnectorFiltering; import org.elasticsearch.xpack.application.connector.ConnectorIndexService; -import org.elasticsearch.xpack.application.connector.ConnectorIngestPipeline; import org.elasticsearch.xpack.application.connector.ConnectorSyncStatus; import org.elasticsearch.xpack.application.connector.ConnectorTemplateRegistry; import org.elasticsearch.xpack.application.connector.syncjob.action.PostConnectorSyncJobAction; @@ -450,22 +447,16 @@ public void onResponse(GetResponse response) { onFailure(new ResourceNotFoundException("Connector with id '" + connectorId + "' does not exist.")); return; } - - Map source = response.getSource(); - - @SuppressWarnings("unchecked") - final Connector syncJobConnectorInfo = new Connector.Builder().setConnectorId(connectorId) - .setFiltering((List) source.get(Connector.FILTERING_FIELD.getPreferredName())) - .setIndexName((String) source.get(Connector.INDEX_NAME_FIELD.getPreferredName())) - .setLanguage((String) source.get(Connector.LANGUAGE_FIELD.getPreferredName())) - .setPipeline((ConnectorIngestPipeline) source.get(Connector.PIPELINE_FIELD.getPreferredName())) - .setServiceType((String) source.get(Connector.SERVICE_TYPE_FIELD.getPreferredName())) - .setConfiguration( - (Map) source.get(Connector.CONFIGURATION_FIELD.getPreferredName()) - ) - .build(); - - listener.onResponse(syncJobConnectorInfo); + try { + final Connector syncJobConnectorInfo = ConnectorSyncJob.syncJobConnectorFromXContentBytes( + response.getSourceAsBytesRef(), + connectorId, + XContentType.JSON + ); + listener.onResponse(syncJobConnectorInfo); + } catch (Exception e) { + listener.onFailure(e); + } } @Override diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRuleCriteria.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRuleCriteria.java index 39a2b1c6ab6d2..ef42a7d7c64f2 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRuleCriteria.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRuleCriteria.java @@ -37,7 +37,7 @@ public class QueryRuleCriteria implements Writeable, ToXContentObject { - public static final TransportVersion CRITERIA_METADATA_VALUES_TRANSPORT_VERSION = TransportVersions.V_8_500_046; + public static final TransportVersion CRITERIA_METADATA_VALUES_TRANSPORT_VERSION = TransportVersions.V_8_500_061; private final QueryRuleCriteriaType criteriaType; private final String criteriaMetadata; private final List criteriaValues; diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesetListItem.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesetListItem.java index 0a1ff919493c3..fcd0f6be8fbcb 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesetListItem.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesetListItem.java @@ -27,8 +27,7 @@ */ public class QueryRulesetListItem implements Writeable, ToXContentObject { - // TODO we need to actually bump transport version, but there's no point until main is merged. Placeholder for now. - public static final TransportVersion EXPANDED_RULESET_COUNT_TRANSPORT_VERSION = TransportVersions.V_8_500_052; + public static final TransportVersion EXPANDED_RULESET_COUNT_TRANSPORT_VERSION = TransportVersions.V_8_500_061; public static final ParseField RULESET_ID_FIELD = new ParseField("ruleset_id"); public static final ParseField RULE_TOTAL_COUNT_FIELD = new ParseField("rule_total_count"); diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/RuleQueryBuilder.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/RuleQueryBuilder.java index ebd78119ab7d5..78cde38ec8c4d 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/RuleQueryBuilder.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/RuleQueryBuilder.java @@ -73,7 +73,7 @@ public class RuleQueryBuilder extends AbstractQueryBuilder { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersions.V_8_500_040; + return TransportVersions.V_8_500_061; } public RuleQueryBuilder(QueryBuilder organicQuery, Map matchCriteria, String rulesetId) { diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/RestQuerySearchApplicationAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/RestQuerySearchApplicationAction.java index a7341c972156f..de0bb837acef8 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/RestQuerySearchApplicationAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/RestQuerySearchApplicationAction.java @@ -13,7 +13,7 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import org.elasticsearch.xpack.application.EnterpriseSearch; import org.elasticsearch.xpack.application.EnterpriseSearchBaseRestHandler; import org.elasticsearch.xpack.application.utils.LicenseUtils; @@ -53,7 +53,7 @@ protected RestChannelConsumer innerPrepareRequest(RestRequest restRequest, NodeC } return channel -> { RestCancellableNodeClient cancelClient = new RestCancellableNodeClient(client, restRequest.getHttpChannel()); - cancelClient.execute(QuerySearchApplicationAction.INSTANCE, request, new RestChunkedToXContentListener<>(channel)); + cancelClient.execute(QuerySearchApplicationAction.INSTANCE, request, new RestRefCountedChunkedToXContentListener<>(channel)); }; } } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/RestRenderSearchApplicationQueryAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/RestRenderSearchApplicationQueryAction.java index a4ce64181c48e..97f30d2ca8722 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/RestRenderSearchApplicationQueryAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/RestRenderSearchApplicationQueryAction.java @@ -42,7 +42,9 @@ protected RestChannelConsumer innerPrepareRequest(RestRequest restRequest, NodeC final String searchAppName = restRequest.param("name"); SearchApplicationSearchRequest request; if (restRequest.hasContent()) { - request = SearchApplicationSearchRequest.fromXContent(searchAppName, restRequest.contentParser()); + try (var parser = restRequest.contentParser()) { + request = SearchApplicationSearchRequest.fromXContent(searchAppName, parser); + } } else { request = new SearchApplicationSearchRequest(searchAppName); } diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTests.java index 5eed1e5d1b58a..b82db8d04d3a9 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/syncjob/ConnectorSyncJobTests.java @@ -400,7 +400,7 @@ public void testSyncJobConnectorFromXContent_WithAllFieldsSet() throws IOExcepti } """); - Connector connector = ConnectorSyncJob.syncJobConnectorFromXContentBytes(new BytesArray(content), XContentType.JSON); + Connector connector = ConnectorSyncJob.syncJobConnectorFromXContentBytes(new BytesArray(content), null, XContentType.JSON); assertThat(connector.getConnectorId(), equalTo("connector-id")); assertThat(connector.getFiltering().size(), equalTo(1)); @@ -474,7 +474,7 @@ public void testSyncJobConnectorFromXContent_WithAllNonOptionalFieldsSet_DoesNot } """); - ConnectorSyncJob.syncJobConnectorFromXContentBytes(new BytesArray(content), XContentType.JSON); + ConnectorSyncJob.syncJobConnectorFromXContentBytes(new BytesArray(content), null, XContentType.JSON); } private void assertTransportSerialization(ConnectorSyncJob testInstance) throws IOException { diff --git a/x-pack/plugin/eql/qa/common/src/main/resources/test_missing_events.toml b/x-pack/plugin/eql/qa/common/src/main/resources/test_missing_events.toml index bfe5465adebcf..0d546940c72a1 100644 --- a/x-pack/plugin/eql/qa/common/src/main/resources/test_missing_events.toml +++ b/x-pack/plugin/eql/qa/common/src/main/resources/test_missing_events.toml @@ -385,3 +385,16 @@ join_keys = ["foo", "foo", "foo", "foo", "baz", "baz"] +[[queries]] +name = "interleaved_3_missing" +query = ''' + sequence with maxspan=1h + ![ test1 where tag == "foobar" ] + [ test1 where tag == "normal" ] + ![ test1 where tag == "foobar" ] + [ test1 where tag == "normal" ] + ![ test1 where tag == "foobar" ] +''' +expected_event_ids = [-1, 1, -1, 2, -1, + -1, 2, -1, 4, -1] + diff --git a/x-pack/plugin/eql/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/eql/50_samples.yml b/x-pack/plugin/eql/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/eql/50_samples.yml new file mode 100644 index 0000000000000..0c413e809689a --- /dev/null +++ b/x-pack/plugin/eql/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/eql/50_samples.yml @@ -0,0 +1,80 @@ +--- +setup: + - do: + indices.create: + index: sample1 + body: + mappings: + properties: + ip: + type: ip + version: + type: version + missing_keyword: + type: keyword + type_test: + type: keyword + "@timestamp_pretty": + type: date + format: dd-MM-yyyy + event_type: + type: keyword + event: + properties: + category: + type: alias + path: event_type + host: + type: keyword + os: + type: keyword + bool: + type: boolean + uptime: + type: long + port: + type: long + - do: + bulk: + refresh: true + body: + - '{"index" : { "_index" : "sample1" }}' + - '{"@timestamp_pretty":"12-12-2022","type_test":"abc","event_type":"alert","os":"win10","port":1234,"missing_keyword":"test","ip":"10.0.0.1","host":"doom","id":11,"version":"1.0.0","uptime":0}' + - '{"index" : { "_index" : "sample1" }}' + - '{"@timestamp_pretty":"13-12-2022","event_type":"alert","type_test":"abc","os":"win10","port":1,"host":"CS","id":12,"version":"1.2.0","uptime":5}' + - '{"index" : { "_index" : "sample1" }}' + - '{"@timestamp_pretty":"12-12-2022","event_type":"alert","type_test":"abc","bool":false,"os":"win10","port":1234,"host":"farcry","id":13,"version":"2.0.0","uptime":1}' + - '{"index" : { "_index" : "sample1" }}' + - '{"@timestamp_pretty":"13-12-2022","event_type":"alert","type_test":"abc","os":"slack","port":12,"host":"GTA","id":14,"version":"10.0.0","uptime":3}' + - '{"index" : { "_index" : "sample1" }}' + - '{"@timestamp_pretty":"17-12-2022","event_type":"alert","os":"fedora","port":1234,"host":"sniper 3d","id":15,"version":"20.1.0","uptime":6}' + - '{"index" : { "_index" : "sample1" }}' + - '{"@timestamp_pretty":"17-12-2022","event_type":"alert","bool":true,"os":"redhat","port":65123,"host":"doom","id":16,"version":"20.10.0"}' + - '{"index" : { "_index" : "sample1" }}' + - '{"@timestamp_pretty":"17-12-2022","event_type":"failure","bool":true,"os":"redhat","port":1234,"missing_keyword":"yyy","host":"doom","id":17,"version":"20.2.0","uptime":15}' + - '{"index" : { "_index" : "sample1" }}' + - '{"@timestamp_pretty":"12-12-2022","event_type":"success","os":"win10","port":512,"missing_keyword":"test","host":"doom","id":18,"version":"1.2.3","uptime":16}' + - '{"index" : { "_index" : "sample1" }}' + - '{"@timestamp_pretty":"15-12-2022","event_type":"success","bool":true,"os":"win10","port":12,"missing_keyword":"test","host":"GTA","id":19,"version":"1.2.3"}' + - '{"index" : { "_index" : "sample1" }}' + - '{"event_type":"alert","bool":true,"os":"win10","port":1234,"missing_keyword":null,"ip":"10.0.0.5","host":"farcry","id":110,"version":"1.2.3","uptime":1}' + +--- +# Test an empty reply due to query filtering +"Execute some EQL.": + - do: + eql.search: + index: sample1 + body: + query: 'sample by host [any where uptime > 0] by os [any where port > 100] by os [any where bool == true] by os' + filter: + range: + "@timestamp_pretty": + gte: now-5m + lte: now + + - match: {timed_out: false} + - match: {hits.total.value: 0} + - match: {hits.total.relation: "eq"} + - match: {hits.sequences: []} + diff --git a/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/AsyncEqlSearchActionIT.java b/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/AsyncEqlSearchActionIT.java index 646a1e896c473..9be24062a194f 100644 --- a/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/AsyncEqlSearchActionIT.java +++ b/x-pack/plugin/eql/src/internalClusterTest/java/org/elasticsearch/xpack/eql/action/AsyncEqlSearchActionIT.java @@ -150,7 +150,7 @@ public void testBasicAsyncExecution() throws Exception { assertThat(response, notNullValue()); assertThat(response.hits().events().size(), equalTo(1)); } else { - Exception ex = expectThrows(Exception.class, future::actionGet); + Exception ex = expectThrows(Exception.class, future); assertThat(ex.getCause().getMessage(), containsString("by zero")); } AcknowledgedResponse deleteResponse = client().execute( diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchResponse.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchResponse.java index c22cf7d390628..3be9e23c38b46 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchResponse.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchResponse.java @@ -281,7 +281,7 @@ private Event(StreamInput in) throws IOException { } else { fetchFields = null; } - if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_040)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { missing = in.readBoolean(); } else { missing = index.isEmpty(); @@ -304,7 +304,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeMap(fetchFields, StreamOutput::writeWriteable); } } - if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_040)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_061)) { // for BWC, 8.9.1+ does not have "missing" attribute, but it considers events with an empty index "" as missing events // see https://github.com/elastic/elasticsearch/pull/98130 out.writeBoolean(missing); diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sample/SampleIterator.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sample/SampleIterator.java index a96102dad6cfb..f4b933300dcd7 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sample/SampleIterator.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sample/SampleIterator.java @@ -147,6 +147,12 @@ private void advance(ActionListener listener) { private void queryForCompositeAggPage(ActionListener listener, final SampleQueryRequest request) { client.query(request, listener.delegateFailureAndWrap((delegate, r) -> { + // either the fields values or the fields themselves are missing + // or the filter applied on the eql query matches no documents + if (r.hasAggregations() == false) { + payload(delegate); + return; + } Aggregation a = r.getAggregations().get(COMPOSITE_AGG_NAME); if (a instanceof InternalComposite == false) { throw new EqlIllegalArgumentException("Unexpected aggregation result type returned [{}]", a.getClass()); diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClient.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClient.java index 4e4817d4c041d..befb2c7503515 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClient.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClient.java @@ -100,6 +100,7 @@ private void searchWithPIT(MultiSearchRequest search, ActionListener, Accountable { private final SequenceKey key; private final Match[] matches; + private int firstStage; private int currentStage = 0; @SuppressWarnings({ "rawtypes", "unchecked" }) - public Sequence(SequenceKey key, int stages, Ordinal ordinal, HitReference firstHit) { + public Sequence(SequenceKey key, int stages, int firstStage, Ordinal ordinal, HitReference firstHit) { Check.isTrue(stages >= 2, "A sequence requires at least 2 criteria, given [{}]", stages); this.key = key; this.matches = new Match[stages]; - this.matches[0] = new Match(ordinal, firstHit); + this.matches[firstStage] = new Match(ordinal, firstHit); + this.firstStage = firstStage; + this.currentStage = firstStage; } public void putMatch(int stage, Ordinal ordinal, HitReference hit) { @@ -56,7 +59,7 @@ public Ordinal ordinal() { } public Ordinal startOrdinal() { - return matches[0].ordinal(); + return matches[firstStage].ordinal(); } public List hits() { diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/SequenceMatcher.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/SequenceMatcher.java index adb8ee1b43c02..1ad9002f88999 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/SequenceMatcher.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/SequenceMatcher.java @@ -168,7 +168,7 @@ boolean match(int stage, Iterable> hits) { if (isFirstPositiveStage(stage)) { log.trace("Matching hit {} - track sequence", ko.ordinal); - Sequence seq = new Sequence(ko.key, numberOfStages, ko.ordinal, hit); + Sequence seq = new Sequence(ko.key, numberOfStages, stage, ko.ordinal, hit); if (lastPositiveStage == stage) { tryComplete(seq); } else { diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchResponseTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchResponseTests.java index 080cc26d81eb2..edbeb3d0a0d8c 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchResponseTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchResponseTests.java @@ -289,7 +289,7 @@ private List mutateEvents(List original, TransportVersion version) e.id(), e.source(), version.onOrAfter(TransportVersions.V_7_13_0) ? e.fetchFields() : null, - version.onOrAfter(TransportVersions.V_8_500_040) ? e.missing() : e.index().isEmpty() + version.onOrAfter(TransportVersions.V_8_500_061) ? e.missing() : e.index().isEmpty() ) ); } diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/ImplicitTiebreakerTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/ImplicitTiebreakerTests.java index 85a34d7b6a943..f391e9bdae84b 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/ImplicitTiebreakerTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/ImplicitTiebreakerTests.java @@ -13,7 +13,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchResponse.Clusters; -import org.elasticsearch.action.search.SearchResponseSections; import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.DocValueFormat; @@ -83,8 +82,10 @@ public void query(QueryRequest r, ActionListener l) { ) ); SearchHits searchHits = new SearchHits(new SearchHit[] { searchHit }, new TotalHits(1, Relation.EQUAL_TO), 0.0f); - SearchResponseSections internal = new SearchResponseSections(searchHits, null, null, false, false, null, 0); - ActionListener.respondAndRelease(l, new SearchResponse(internal, null, 0, 1, 0, 0, null, Clusters.EMPTY)); + ActionListener.respondAndRelease( + l, + new SearchResponse(searchHits, null, null, false, false, null, 0, null, 0, 1, 0, 0, null, Clusters.EMPTY) + ); } @Override diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/SequenceSpecTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/SequenceSpecTests.java index 336526a1153a5..a7ac6637c2e56 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/SequenceSpecTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/SequenceSpecTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchResponse.Clusters; -import org.elasticsearch.action.search.SearchResponseSections; import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.document.DocumentField; @@ -221,8 +220,10 @@ public void query(QueryRequest r, ActionListener l) { new TotalHits(eah.hits.size(), Relation.EQUAL_TO), 0.0f ); - SearchResponseSections internal = new SearchResponseSections(searchHits, null, null, false, false, null, 0); - ActionListener.respondAndRelease(l, new SearchResponse(internal, null, 0, 1, 0, 0, null, Clusters.EMPTY)); + ActionListener.respondAndRelease( + l, + new SearchResponse(searchHits, null, null, false, false, null, 0, null, 0, 1, 0, 0, null, Clusters.EMPTY) + ); } @Override diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java index e787505f7dfe3..8c47bfeb8921d 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java @@ -18,7 +18,6 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchResponse.Clusters; -import org.elasticsearch.action.search.SearchResponseSections; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreakingException; @@ -222,12 +221,16 @@ protected void @SuppressWarnings("unchecked") void handleSearchRequest(ActionListener listener, SearchRequest searchRequest) { Aggregations aggs = new Aggregations(List.of(newInternalComposite())); - - SearchResponseSections internal = new SearchResponseSections(null, aggs, null, false, false, null, 0); ActionListener.respondAndRelease( listener, (Response) new SearchResponse( - internal, + null, + aggs, + null, + false, + false, + null, + 0, null, 2, 0, diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClientTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClientTests.java index e12eec4833199..9c9bbfcdc5127 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClientTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClientTests.java @@ -18,7 +18,6 @@ import org.elasticsearch.action.search.OpenPointInTimeResponse; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchResponseSections; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.NoopCircuitBreaker; @@ -243,9 +242,14 @@ void handleSearchRequest(ActionListener l) { new SearchSortValues(new Long[] { (long) ordinal, 1L }, new DocValueFormat[] { DocValueFormat.RAW, DocValueFormat.RAW }) ); SearchHits searchHits = new SearchHits(new SearchHit[] { searchHit }, new TotalHits(1, Relation.EQUAL_TO), 0.0f); - SearchResponseSections internal = new SearchResponseSections(searchHits, null, null, false, false, null, 0); - ActionListener.respondAndRelease(l, new SearchResponse(internal, null, 0, 1, 0, 0, null, Clusters.EMPTY)); + ActionListener.respondAndRelease( + l, + new SearchResponse(searchHits, null, null, false, false, null, 0, null, 0, 1, 0, 0, null, Clusters.EMPTY) + ); } @Override @@ -431,9 +431,14 @@ void handleSearchRequest(ActionListener void handleSearchRequest(ActionListener void handleSearchRequest(ActionListener *

- * The generation code also looks for the optional methods {@code combineStates} + * The generation code also looks for the optional methods {@code combineIntermediate} * and {@code evaluateFinal} which are used to combine intermediate states and * produce the final output. If the first is missing then the generated code will * call the {@code combine} method to combine intermediate states. If the second diff --git a/x-pack/plugin/esql/compute/build.gradle b/x-pack/plugin/esql/compute/build.gradle index 1851e2f449da0..d95a9ffd862f4 100644 --- a/x-pack/plugin/esql/compute/build.gradle +++ b/x-pack/plugin/esql/compute/build.gradle @@ -208,6 +208,28 @@ tasks.named('stringTemplates').configure { it.inputFile = arrayBlockInputFile it.outputFile = "org/elasticsearch/compute/data/BooleanArrayBlock.java" } + // BigArray block implementations + File bigArrayBlockInputFile = new File("${projectDir}/src/main/java/org/elasticsearch/compute/data/X-BigArrayBlock.java.st") + template { + it.properties = intProperties + it.inputFile = bigArrayBlockInputFile + it.outputFile = "org/elasticsearch/compute/data/IntBigArrayBlock.java" + } + template { + it.properties = longProperties + it.inputFile = bigArrayBlockInputFile + it.outputFile = "org/elasticsearch/compute/data/LongBigArrayBlock.java" + } + template { + it.properties = doubleProperties + it.inputFile = bigArrayBlockInputFile + it.outputFile = "org/elasticsearch/compute/data/DoubleBigArrayBlock.java" + } + template { + it.properties = booleanProperties + it.inputFile = bigArrayBlockInputFile + it.outputFile = "org/elasticsearch/compute/data/BooleanBigArrayBlock.java" + } // vector blocks File vectorBlockInputFile = new File("${projectDir}/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st") template { diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java index 6acddf6aa5cde..c00045d342fc2 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/AggregatorImplementer.java @@ -69,7 +69,6 @@ public class AggregatorImplementer { private final ExecutableElement init; private final ExecutableElement combine; private final ExecutableElement combineValueCount; - private final ExecutableElement combineStates; private final ExecutableElement combineIntermediate; private final ExecutableElement evaluateFinal; private final ClassName implementation; @@ -95,7 +94,6 @@ public AggregatorImplementer(Elements elements, TypeElement declarationType, Int return firstParamType.isPrimitive() || firstParamType.toString().equals(stateType.toString()); }); this.combineValueCount = findMethod(declarationType, "combineValueCount"); - this.combineStates = findMethod(declarationType, "combineStates"); this.combineIntermediate = findMethod(declarationType, "combineIntermediate"); this.evaluateFinal = findMethod(declarationType, "evaluateFinal"); @@ -399,34 +397,30 @@ private MethodSpec addIntermediateInput() { builder.addAnnotation(Override.class).addModifiers(Modifier.PUBLIC).addParameter(PAGE, "page"); builder.addStatement("assert channels.size() == intermediateBlockCount()"); builder.addStatement("assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size()"); - builder.addStatement("Block uncastBlock = page.getBlock(channels.get(0))"); - builder.beginControlFlow("if (uncastBlock.areAllValuesNull())"); - { - builder.addStatement("return"); - builder.endControlFlow(); - } - int count = 0; - for (var interState : intermediateState) { + for (int i = 0; i < intermediateState.size(); i++) { + var interState = intermediateState.get(i); + ClassName blockType = blockType(interState.elementType()); + builder.addStatement("Block $L = page.getBlock(channels.get($L))", interState.name + "Uncast", i); + builder.beginControlFlow("if ($L.areAllValuesNull())", interState.name + "Uncast"); + { + builder.addStatement("return"); + builder.endControlFlow(); + } builder.addStatement( - "$T " + interState.name() + " = page.<$T>getBlock(channels.get(" + count + ")).asVector()", + "$T $L = (($T) $L).asVector()", vectorType(interState.elementType()), - blockType(interState.elementType()) - ); - count++; - } - final String first = intermediateState.get(0).name(); - builder.addStatement("assert " + first + ".getPositionCount() == 1"); - if (intermediateState.size() > 1) { - builder.addStatement( - "assert " - + intermediateState.stream() - .map(IntermediateStateDesc::name) - .skip(1) - .map(s -> first + ".getPositionCount() == " + s + ".getPositionCount()") - .collect(joining(" && ")) + interState.name(), + blockType, + interState.name() + "Uncast" ); + builder.addStatement("assert $L.getPositionCount() == 1", interState.name()); } - if (hasPrimitiveState()) { + if (combineIntermediate != null) { + if (intermediateState.stream().map(IntermediateStateDesc::elementType).anyMatch(n -> n.equals("BYTES_REF"))) { + builder.addStatement("$T scratch = new $T()", BYTES_REF, BYTES_REF); + } + builder.addStatement("$T.combineIntermediate(state, " + intermediateStateRowAccess() + ")", declarationType); + } else if (hasPrimitiveState()) { assert intermediateState.size() == 2; assert intermediateState.get(1).name().equals("seen"); builder.beginControlFlow("if (seen.getBoolean(0))"); @@ -438,10 +432,7 @@ private MethodSpec addIntermediateInput() { builder.endControlFlow(); } } else { - if (intermediateState.stream().map(IntermediateStateDesc::elementType).anyMatch(n -> n.equals("BYTES_REF"))) { - builder.addStatement("$T scratch = new $T()", BYTES_REF, BYTES_REF); - } - builder.addStatement("$T.combineIntermediate(state, " + intermediateStateRowAccess() + ")", declarationType); + throw new IllegalArgumentException("Don't know how to combine intermediate input. Define combineIntermediate"); } return builder.build(); } @@ -468,7 +459,7 @@ private String primitiveStateMethod() { return "doubleValue"; default: throw new IllegalArgumentException( - "don't know how to fetch primitive values from " + stateType + ". define combineStates." + "don't know how to fetch primitive values from " + stateType + ". define combineIntermediate." ); } } @@ -493,7 +484,7 @@ private MethodSpec evaluateFinal() { .addParameter(DRIVER_CONTEXT, "driverContext"); if (stateTypeHasSeen) { builder.beginControlFlow("if (state.seen() == false)"); - builder.addStatement("blocks[offset] = $T.constantNullBlock(1, driverContext.blockFactory())", BLOCK); + builder.addStatement("blocks[offset] = driverContext.blockFactory().newConstantNullBlock(1)", BLOCK); builder.addStatement("return"); builder.endControlFlow(); } @@ -508,22 +499,13 @@ private MethodSpec evaluateFinal() { private void primitiveStateToResult(MethodSpec.Builder builder) { switch (stateType.toString()) { case "org.elasticsearch.compute.aggregation.IntState": - builder.addStatement( - "blocks[offset] = $T.newConstantBlockWith(state.intValue(), 1, driverContext.blockFactory())", - INT_BLOCK - ); + builder.addStatement("blocks[offset] = driverContext.blockFactory().newConstantIntBlockWith(state.intValue(), 1)"); return; case "org.elasticsearch.compute.aggregation.LongState": - builder.addStatement( - "blocks[offset] = $T.newConstantBlockWith(state.longValue(), 1, driverContext.blockFactory())", - LONG_BLOCK - ); + builder.addStatement("blocks[offset] = driverContext.blockFactory().newConstantLongBlockWith(state.longValue(), 1)"); return; case "org.elasticsearch.compute.aggregation.DoubleState": - builder.addStatement( - "blocks[offset] = $T.newConstantBlockWith(state.doubleValue(), 1, driverContext.blockFactory())", - DOUBLE_BLOCK - ); + builder.addStatement("blocks[offset] = driverContext.blockFactory().newConstantDoubleBlockWith(state.doubleValue(), 1)"); return; default: throw new IllegalArgumentException("don't know how to convert state to result: " + stateType); diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/ConsumeProcessor.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/ConsumeProcessor.java index 677740862cc04..6b218fab7affb 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/ConsumeProcessor.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/ConsumeProcessor.java @@ -40,7 +40,7 @@ public Set getSupportedAnnotationTypes() { "org.elasticsearch.xpack.esql.expression.function.FunctionInfo", "org.elasticsearch.xpack.esql.expression.function.Param", "org.elasticsearch.rest.ServerlessScope", - + "org.elasticsearch.xcontent.ParserConstructor", Fixed.class.getName() ); } diff --git a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/Methods.java b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/Methods.java index f651ab2a316aa..6cd72bd643c32 100644 --- a/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/Methods.java +++ b/x-pack/plugin/esql/compute/gen/src/main/java/org/elasticsearch/compute/gen/Methods.java @@ -194,7 +194,7 @@ static String vectorAccessorName(String elementTypeName) { case "DOUBLE" -> "getDouble"; case "BYTES_REF" -> "getBytesRef"; default -> throw new IllegalArgumentException( - "don't know how to fetch primitive values from " + elementTypeName + ". define combineStates." + "don't know how to fetch primitive values from " + elementTypeName + ". define combineIntermediate." ); }; } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/DoubleArrayState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/DoubleArrayState.java index f77f1893caa01..5b82950c7de37 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/DoubleArrayState.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/DoubleArrayState.java @@ -10,7 +10,6 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.DoubleArray; import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.DoubleBlock; import org.elasticsearch.compute.data.DoubleVector; import org.elasticsearch.compute.data.IntVector; @@ -60,14 +59,14 @@ void set(int groupId, double value) { Block toValuesBlock(org.elasticsearch.compute.data.IntVector selected, DriverContext driverContext) { if (false == trackingGroupIds()) { - try (DoubleVector.Builder builder = DoubleVector.newVectorBuilder(selected.getPositionCount(), driverContext.blockFactory())) { + try (DoubleVector.Builder builder = driverContext.blockFactory().newDoubleVectorFixedBuilder(selected.getPositionCount())) { for (int i = 0; i < selected.getPositionCount(); i++) { builder.appendDouble(values.get(selected.getInt(i))); } return builder.build().asBlock(); } } - try (DoubleBlock.Builder builder = DoubleBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory())) { + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(selected.getPositionCount())) { for (int i = 0; i < selected.getPositionCount(); i++) { int group = selected.getInt(i); if (hasValue(group)) { @@ -98,8 +97,8 @@ public void toIntermediate( ) { assert blocks.length >= offset + 2; try ( - var valuesBuilder = DoubleBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory()); - var hasValueBuilder = BooleanBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory()) + var valuesBuilder = driverContext.blockFactory().newDoubleBlockBuilder(selected.getPositionCount()); + var hasValueBuilder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount()) ) { for (int i = 0; i < selected.getPositionCount(); i++) { int group = selected.getInt(i); @@ -111,7 +110,7 @@ public void toIntermediate( hasValueBuilder.appendBoolean(hasValue(group)); } blocks[offset + 0] = valuesBuilder.build(); - blocks[offset + 1] = hasValueBuilder.build(); + blocks[offset + 1] = hasValueBuilder.build().asBlock(); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IntArrayState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IntArrayState.java index 82578090503ab..0234f36f6675c 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IntArrayState.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/IntArrayState.java @@ -10,7 +10,6 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.IntArray; import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.operator.DriverContext; @@ -59,14 +58,14 @@ void set(int groupId, int value) { Block toValuesBlock(org.elasticsearch.compute.data.IntVector selected, DriverContext driverContext) { if (false == trackingGroupIds()) { - try (IntVector.Builder builder = IntVector.newVectorBuilder(selected.getPositionCount(), driverContext.blockFactory())) { + try (IntVector.Builder builder = driverContext.blockFactory().newIntVectorFixedBuilder(selected.getPositionCount())) { for (int i = 0; i < selected.getPositionCount(); i++) { builder.appendInt(values.get(selected.getInt(i))); } return builder.build().asBlock(); } } - try (IntBlock.Builder builder = IntBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory())) { + try (IntBlock.Builder builder = driverContext.blockFactory().newIntBlockBuilder(selected.getPositionCount())) { for (int i = 0; i < selected.getPositionCount(); i++) { int group = selected.getInt(i); if (hasValue(group)) { @@ -97,8 +96,8 @@ public void toIntermediate( ) { assert blocks.length >= offset + 2; try ( - var valuesBuilder = IntBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory()); - var hasValueBuilder = BooleanBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory()) + var valuesBuilder = driverContext.blockFactory().newIntBlockBuilder(selected.getPositionCount()); + var hasValueBuilder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount()) ) { for (int i = 0; i < selected.getPositionCount(); i++) { int group = selected.getInt(i); @@ -110,7 +109,7 @@ public void toIntermediate( hasValueBuilder.appendBoolean(hasValue(group)); } blocks[offset + 0] = valuesBuilder.build(); - blocks[offset + 1] = hasValueBuilder.build(); + blocks[offset + 1] = hasValueBuilder.build().asBlock(); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/LongArrayState.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/LongArrayState.java index f77d22fb1d26a..860bf43eaad82 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/LongArrayState.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/aggregation/LongArrayState.java @@ -10,7 +10,6 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.LongArray; import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.LongVector; @@ -66,14 +65,14 @@ void increment(int groupId, long value) { Block toValuesBlock(org.elasticsearch.compute.data.IntVector selected, DriverContext driverContext) { if (false == trackingGroupIds()) { - try (LongVector.Builder builder = LongVector.newVectorBuilder(selected.getPositionCount(), driverContext.blockFactory())) { + try (LongVector.Builder builder = driverContext.blockFactory().newLongVectorFixedBuilder(selected.getPositionCount())) { for (int i = 0; i < selected.getPositionCount(); i++) { builder.appendLong(values.get(selected.getInt(i))); } return builder.build().asBlock(); } } - try (LongBlock.Builder builder = LongBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory())) { + try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(selected.getPositionCount())) { for (int i = 0; i < selected.getPositionCount(); i++) { int group = selected.getInt(i); if (hasValue(group)) { @@ -104,8 +103,8 @@ public void toIntermediate( ) { assert blocks.length >= offset + 2; try ( - var valuesBuilder = LongBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory()); - var hasValueBuilder = BooleanBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory()) + var valuesBuilder = driverContext.blockFactory().newLongBlockBuilder(selected.getPositionCount()); + var hasValueBuilder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount()) ) { for (int i = 0; i < selected.getPositionCount(); i++) { int group = selected.getInt(i); @@ -117,7 +116,7 @@ public void toIntermediate( hasValueBuilder.appendBoolean(hasValue(group)); } blocks[offset + 0] = valuesBuilder.build(); - blocks[offset + 1] = hasValueBuilder.build(); + blocks[offset + 1] = hasValueBuilder.build().asBlock(); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java index 7c2723163197a..f9b8358faee6b 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java @@ -8,25 +8,21 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.core.Releasables; -import java.util.Arrays; import java.util.BitSet; /** - * Block implementation that stores an array of boolean. + * Block implementation that stores values in a {@link BooleanArrayVector}. * This class is generated. Do not edit it. */ -public final class BooleanArrayBlock extends AbstractArrayBlock implements BooleanBlock { +final class BooleanArrayBlock extends AbstractArrayBlock implements BooleanBlock { private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(BooleanArrayBlock.class); - private final boolean[] values; + private final BooleanArrayVector vector; - public BooleanArrayBlock(boolean[] values, int positionCount, int[] firstValueIndexes, BitSet nulls, MvOrdering mvOrdering) { - this(values, positionCount, firstValueIndexes, nulls, mvOrdering, BlockFactory.getNonBreakingInstance()); - } - - public BooleanArrayBlock( + BooleanArrayBlock( boolean[] values, int positionCount, int[] firstValueIndexes, @@ -35,7 +31,7 @@ public BooleanArrayBlock( BlockFactory blockFactory ) { super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); - this.values = values; + this.vector = new BooleanArrayVector(values, values.length, blockFactory); } @Override @@ -45,7 +41,7 @@ public BooleanVector asVector() { @Override public boolean getBoolean(int valueIndex) { - return values[valueIndex]; + return vector.getBoolean(valueIndex); } @Override @@ -83,7 +79,7 @@ public BooleanBlock expand() { incRef(); return this; } - // TODO use reference counting to share the values + // TODO use reference counting to share the vector try (var builder = blockFactory().newBooleanBlockBuilder(firstValueIndexes[getPositionCount()])) { for (int pos = 0; pos < getPositionCount(); pos++) { if (isNull(pos)) { @@ -100,14 +96,13 @@ public BooleanBlock expand() { } } - public static long ramBytesEstimated(boolean[] values, int[] firstValueIndexes, BitSet nullsMask) { - return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(values) + BlockRamUsageEstimator.sizeOf(firstValueIndexes) - + BlockRamUsageEstimator.sizeOfBitSet(nullsMask); + private long ramBytesUsedOnlyBlock() { + return BASE_RAM_BYTES_USED + BlockRamUsageEstimator.sizeOf(firstValueIndexes) + BlockRamUsageEstimator.sizeOfBitSet(nullsMask); } @Override public long ramBytesUsed() { - return ramBytesEstimated(values, firstValueIndexes, nullsMask); + return ramBytesUsedOnlyBlock() + vector.ramBytesUsed(); } @Override @@ -130,13 +125,20 @@ public String toString() { + getPositionCount() + ", mvOrdering=" + mvOrdering() - + ", values=" - + Arrays.toString(values) + + ", vector=" + + vector + ']'; } + @Override + public void allowPassingToDifferentDriver() { + super.allowPassingToDifferentDriver(); + vector.allowPassingToDifferentDriver(); + } + @Override public void closeInternal() { - blockFactory().adjustBreaker(-ramBytesUsed(), true); + blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock(), true); + Releasables.closeExpectNoException(vector); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayVector.java index 5aa8724eb0ca2..114d924df467c 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayVector.java @@ -15,27 +15,20 @@ * Vector implementation that stores an array of boolean values. * This class is generated. Do not edit it. */ -public final class BooleanArrayVector extends AbstractVector implements BooleanVector { +final class BooleanArrayVector extends AbstractVector implements BooleanVector { static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(BooleanArrayVector.class); private final boolean[] values; - private final BooleanBlock block; - - public BooleanArrayVector(boolean[] values, int positionCount) { - this(values, positionCount, BlockFactory.getNonBreakingInstance()); - } - - public BooleanArrayVector(boolean[] values, int positionCount, BlockFactory blockFactory) { + BooleanArrayVector(boolean[] values, int positionCount, BlockFactory blockFactory) { super(positionCount, blockFactory); this.values = values; - this.block = new BooleanVectorBlock(this); } @Override public BooleanBlock asBlock() { - return block; + return new BooleanVectorBlock(this); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayBlock.java new file mode 100644 index 0000000000000..17ed741bd59da --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayBlock.java @@ -0,0 +1,145 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data; + +import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.util.BitArray; +import org.elasticsearch.core.Releasables; + +import java.util.BitSet; + +/** + * Block implementation that stores values in a {@link BooleanBigArrayVector}. Does not take ownership of the given + * {@link BitArray} and does not adjust circuit breakers to account for it. + * This class is generated. Do not edit it. + */ +public final class BooleanBigArrayBlock extends AbstractArrayBlock implements BooleanBlock { + + private static final long BASE_RAM_BYTES_USED = 0; // TODO: fix this + private final BooleanBigArrayVector vector; + + public BooleanBigArrayBlock( + BitArray values, + int positionCount, + int[] firstValueIndexes, + BitSet nulls, + MvOrdering mvOrdering, + BlockFactory blockFactory + ) { + super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); + this.vector = new BooleanBigArrayVector(values, (int) values.size(), blockFactory); + } + + @Override + public BooleanVector asVector() { + return null; + } + + @Override + public boolean getBoolean(int valueIndex) { + return vector.getBoolean(valueIndex); + } + + @Override + public BooleanBlock filter(int... positions) { + try (var builder = blockFactory().newBooleanBlockBuilder(positions.length)) { + for (int pos : positions) { + if (isNull(pos)) { + builder.appendNull(); + continue; + } + int valueCount = getValueCount(pos); + int first = getFirstValueIndex(pos); + if (valueCount == 1) { + builder.appendBoolean(getBoolean(getFirstValueIndex(pos))); + } else { + builder.beginPositionEntry(); + for (int c = 0; c < valueCount; c++) { + builder.appendBoolean(getBoolean(first + c)); + } + builder.endPositionEntry(); + } + } + return builder.mvOrdering(mvOrdering()).build(); + } + } + + @Override + public ElementType elementType() { + return ElementType.BOOLEAN; + } + + @Override + public BooleanBlock expand() { + if (firstValueIndexes == null) { + incRef(); + return this; + } + // TODO use reference counting to share the vector + try (var builder = blockFactory().newBooleanBlockBuilder(firstValueIndexes[getPositionCount()])) { + for (int pos = 0; pos < getPositionCount(); pos++) { + if (isNull(pos)) { + builder.appendNull(); + continue; + } + int first = getFirstValueIndex(pos); + int end = first + getValueCount(pos); + for (int i = first; i < end; i++) { + builder.appendBoolean(getBoolean(i)); + } + } + return builder.mvOrdering(MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING).build(); + } + } + + private long ramBytesUsedOnlyBlock() { + return BASE_RAM_BYTES_USED + BlockRamUsageEstimator.sizeOf(firstValueIndexes) + BlockRamUsageEstimator.sizeOfBitSet(nullsMask); + } + + @Override + public long ramBytesUsed() { + return ramBytesUsedOnlyBlock() + RamUsageEstimator.sizeOf(vector); + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof BooleanBlock that) { + return BooleanBlock.equals(this, that); + } + return false; + } + + @Override + public int hashCode() { + return BooleanBlock.hash(this); + } + + @Override + public String toString() { + return getClass().getSimpleName() + + "[positions=" + + getPositionCount() + + ", mvOrdering=" + + mvOrdering() + + ", ramBytesUsed=" + + vector.ramBytesUsed() + + ']'; + } + + @Override + public void allowPassingToDifferentDriver() { + super.allowPassingToDifferentDriver(); + vector.allowPassingToDifferentDriver(); + } + + @Override + public void closeInternal() { + blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock(), true); + Releasables.closeExpectNoException(vector); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayVector.java index 2621ec612944e..9618edb1fa77a 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayVector.java @@ -12,30 +12,24 @@ import org.elasticsearch.core.Releasable; /** - * Vector implementation that defers to an enclosed BooleanArray. + * Vector implementation that defers to an enclosed {@link BitArray}. + * Does not take ownership of the array and does not adjust circuit breakers to account for it. * This class is generated. Do not edit it. */ public final class BooleanBigArrayVector extends AbstractVector implements BooleanVector, Releasable { - private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(BooleanBigArrayVector.class); + private static final long BASE_RAM_BYTES_USED = 0; // FIXME private final BitArray values; - private final BooleanBlock block; - - public BooleanBigArrayVector(BitArray values, int positionCount) { - this(values, positionCount, BlockFactory.getNonBreakingInstance()); - } - public BooleanBigArrayVector(BitArray values, int positionCount, BlockFactory blockFactory) { super(positionCount, blockFactory); this.values = values; - this.block = new BooleanVectorBlock(this); } @Override public BooleanBlock asBlock() { - return block; + return new BooleanVectorBlock(this); } @Override @@ -71,11 +65,9 @@ public BooleanVector filter(int... positions) { } @Override - public void close() { - if (released) { - throw new IllegalStateException("can't release already released vector [" + this + "]"); - } - released = true; + public void closeInternal() { + // The circuit breaker that tracks the values {@link BitArray} is adjusted outside + // of this class. values.close(); } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java index 352ee783d8614..fffa3af137d76 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java @@ -18,7 +18,7 @@ * Block that stores boolean values. * This class is generated. Do not edit it. */ -public sealed interface BooleanBlock extends Block permits BooleanArrayBlock, BooleanVectorBlock, ConstantNullBlock { +public sealed interface BooleanBlock extends Block permits BooleanArrayBlock, BooleanVectorBlock, ConstantNullBlock, BooleanBigArrayBlock { /** * Retrieves the boolean value stored at the given value index. @@ -166,44 +166,6 @@ static int hash(BooleanBlock block) { return result; } - /** - * Returns a builder using the {@link BlockFactory#getNonBreakingInstance non-breaking block factory}. - * @deprecated use {@link BlockFactory#newBooleanBlockBuilder} - */ - // Eventually, we want to remove this entirely, always passing an explicit BlockFactory - @Deprecated - static Builder newBlockBuilder(int estimatedSize) { - return newBlockBuilder(estimatedSize, BlockFactory.getNonBreakingInstance()); - } - - /** - * Returns a builder. - * @deprecated use {@link BlockFactory#newBooleanBlockBuilder} - */ - @Deprecated - static Builder newBlockBuilder(int estimatedSize, BlockFactory blockFactory) { - return blockFactory.newBooleanBlockBuilder(estimatedSize); - } - - /** - * Returns a constant block built by the {@link BlockFactory#getNonBreakingInstance non-breaking block factory}. - * @deprecated use {@link BlockFactory#newConstantBooleanBlockWith} - */ - // Eventually, we want to remove this entirely, always passing an explicit BlockFactory - @Deprecated - static BooleanBlock newConstantBlockWith(boolean value, int positions) { - return newConstantBlockWith(value, positions, BlockFactory.getNonBreakingInstance()); - } - - /** - * Returns a constant block. - * @deprecated use {@link BlockFactory#newConstantBooleanBlockWith} - */ - @Deprecated - static BooleanBlock newConstantBlockWith(boolean value, int positions, BlockFactory blockFactory) { - return blockFactory.newConstantBooleanBlockWith(value, positions); - } - /** * Builder for {@link BooleanBlock} */ diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlockBuilder.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlockBuilder.java index 1c3549c06ca87..651422f6716fb 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlockBuilder.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlockBuilder.java @@ -9,6 +9,7 @@ import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.common.util.BitArray; import java.util.Arrays; @@ -179,6 +180,31 @@ public BooleanBlockBuilder mvOrdering(Block.MvOrdering mvOrdering) { return this; } + private BooleanBlock buildBigArraysBlock() { + final BooleanBlock theBlock; + final BitArray array = new BitArray(valueCount, blockFactory.bigArrays()); + for (int i = 0; i < valueCount; i++) { + if (values[i]) { + array.set(i); + } + } + if (isDense() && singleValued()) { + theBlock = new BooleanBigArrayVector(array, positionCount, blockFactory).asBlock(); + } else { + theBlock = new BooleanBigArrayBlock(array, positionCount, firstValueIndexes, nullsMask, mvOrdering, blockFactory); + } + /* + * Update the breaker with the actual bytes used. + * We pass false below even though we've used the bytes. That's weird, + * but if we break here we will throw away the used memory, letting + * it be deallocated. The exception will bubble up and the builder will + * still technically be open, meaning the calling code should close it + * which will return all used memory to the breaker. + */ + blockFactory.adjustBreaker(theBlock.ramBytesUsed() - estimatedBytes - array.ramBytesUsed(), false); + return theBlock; + } + @Override public BooleanBlock build() { try { @@ -187,20 +213,26 @@ public BooleanBlock build() { if (hasNonNullValue && positionCount == 1 && valueCount == 1) { theBlock = blockFactory.newConstantBooleanBlockWith(values[0], 1, estimatedBytes); } else { - if (values.length - valueCount > 1024 || valueCount < (values.length / 2)) { - values = Arrays.copyOf(values, valueCount); - } - if (isDense() && singleValued()) { - theBlock = blockFactory.newBooleanArrayVector(values, positionCount, estimatedBytes).asBlock(); + if (estimatedBytes > blockFactory.maxPrimitiveArrayBytes()) { + theBlock = buildBigArraysBlock(); } else { - theBlock = blockFactory.newBooleanArrayBlock( - values, - positionCount, - firstValueIndexes, - nullsMask, - mvOrdering, - estimatedBytes - ); + if (values.length - valueCount > 1024 || valueCount < (values.length / 2)) { + adjustBreaker(valueCount * elementSize()); + values = Arrays.copyOf(values, valueCount); + adjustBreaker(-values.length * elementSize()); + } + if (isDense() && singleValued()) { + theBlock = blockFactory.newBooleanArrayVector(values, positionCount, estimatedBytes).asBlock(); + } else { + theBlock = blockFactory.newBooleanArrayBlock( + values, + positionCount, + firstValueIndexes, + nullsMask, + mvOrdering, + estimatedBytes + ); + } } } built(); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVector.java index ec4ab8f7def1c..7c86f40981ec7 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVector.java @@ -101,40 +101,10 @@ default void writeTo(StreamOutput out) throws IOException { } } - /** - * Returns a builder using the {@link BlockFactory#getNonBreakingInstance nonbreaking block factory}. - * @deprecated use {@link BlockFactory#newBooleanVectorBuilder} - */ - // Eventually, we want to remove this entirely, always passing an explicit BlockFactory - @Deprecated - static Builder newVectorBuilder(int estimatedSize) { - return newVectorBuilder(estimatedSize, BlockFactory.getNonBreakingInstance()); - } - - /** - * Creates a builder that grows as needed. Prefer {@link #newVectorFixedBuilder} - * if you know the size up front because it's faster. - * @deprecated use {@link BlockFactory#newBooleanVectorBuilder} - */ - @Deprecated - static Builder newVectorBuilder(int estimatedSize, BlockFactory blockFactory) { - return blockFactory.newBooleanVectorBuilder(estimatedSize); - } - - /** - * Creates a builder that never grows. Prefer this over {@link #newVectorBuilder} - * if you know the size up front because it's faster. - * @deprecated use {@link BlockFactory#newBooleanVectorFixedBuilder} - */ - @Deprecated - static FixedBuilder newVectorFixedBuilder(int size, BlockFactory blockFactory) { - return blockFactory.newBooleanVectorFixedBuilder(size); - } - /** * A builder that grows as needed. */ - sealed interface Builder extends Vector.Builder permits BooleanVectorBuilder { + sealed interface Builder extends Vector.Builder permits BooleanVectorBuilder, FixedBuilder { /** * Appends a boolean to the current entry. */ @@ -147,13 +117,11 @@ sealed interface Builder extends Vector.Builder permits BooleanVectorBuilder { /** * A builder that never grows. */ - sealed interface FixedBuilder extends Vector.Builder permits BooleanVectorFixedBuilder { + sealed interface FixedBuilder extends Builder permits BooleanVectorFixedBuilder { /** * Appends a boolean to the current entry. */ - FixedBuilder appendBoolean(boolean value); - @Override - BooleanVector build(); + FixedBuilder appendBoolean(boolean value); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java index 3fa4a90a6e734..d707e3cf901c1 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java @@ -10,7 +10,7 @@ import org.elasticsearch.core.Releasables; /** - * Block view of a BooleanVector. + * Block view of a {@link BooleanVector}. Cannot represent multi-values or nulls. * This class is generated. Do not edit it. */ public final class BooleanVectorBlock extends AbstractVectorBlock implements BooleanBlock { @@ -73,11 +73,6 @@ public String toString() { return getClass().getSimpleName() + "[vector=" + vector + "]"; } - @Override - public boolean isReleased() { - return super.isReleased() || vector.isReleased(); - } - @Override public void closeInternal() { assert (vector.isReleased() == false) : "can't release block [" + this + "] containing already released vector"; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java index 34d4e5aaa43e2..e2598d3d86b8f 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java @@ -15,20 +15,17 @@ import java.util.BitSet; /** - * Block implementation that stores an array of BytesRef. + * Block implementation that stores values in a {@link BytesRefArrayVector}. + * Does not take ownership of the given {@link BytesRefArray} and does not adjust circuit breakers to account for it. * This class is generated. Do not edit it. */ -public final class BytesRefArrayBlock extends AbstractArrayBlock implements BytesRefBlock { +final class BytesRefArrayBlock extends AbstractArrayBlock implements BytesRefBlock { private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(BytesRefArrayBlock.class); - private final BytesRefArray values; + private final BytesRefArrayVector vector; - public BytesRefArrayBlock(BytesRefArray values, int positionCount, int[] firstValueIndexes, BitSet nulls, MvOrdering mvOrdering) { - this(values, positionCount, firstValueIndexes, nulls, mvOrdering, BlockFactory.getNonBreakingInstance()); - } - - public BytesRefArrayBlock( + BytesRefArrayBlock( BytesRefArray values, int positionCount, int[] firstValueIndexes, @@ -37,7 +34,7 @@ public BytesRefArrayBlock( BlockFactory blockFactory ) { super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); - this.values = values; + this.vector = new BytesRefArrayVector(values, (int) values.size(), blockFactory); } @Override @@ -47,7 +44,7 @@ public BytesRefVector asVector() { @Override public BytesRef getBytesRef(int valueIndex, BytesRef dest) { - return values.get(valueIndex, dest); + return vector.getBytesRef(valueIndex, dest); } @Override @@ -86,7 +83,7 @@ public BytesRefBlock expand() { incRef(); return this; } - // TODO use reference counting to share the values + // TODO use reference counting to share the vector final BytesRef scratch = new BytesRef(); try (var builder = blockFactory().newBytesRefBlockBuilder(firstValueIndexes[getPositionCount()])) { for (int pos = 0; pos < getPositionCount(); pos++) { @@ -104,14 +101,13 @@ public BytesRefBlock expand() { } } - public static long ramBytesEstimated(BytesRefArray values, int[] firstValueIndexes, BitSet nullsMask) { - return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(values) + BlockRamUsageEstimator.sizeOf(firstValueIndexes) - + BlockRamUsageEstimator.sizeOfBitSet(nullsMask); + private long ramBytesUsedOnlyBlock() { + return BASE_RAM_BYTES_USED + BlockRamUsageEstimator.sizeOf(firstValueIndexes) + BlockRamUsageEstimator.sizeOfBitSet(nullsMask); } @Override public long ramBytesUsed() { - return ramBytesEstimated(values, firstValueIndexes, nullsMask); + return ramBytesUsedOnlyBlock() + vector.ramBytesUsed(); } @Override @@ -134,14 +130,20 @@ public String toString() { + getPositionCount() + ", mvOrdering=" + mvOrdering() - + ", values=" - + values.size() + + ", vector=" + + vector + ']'; } + @Override + public void allowPassingToDifferentDriver() { + super.allowPassingToDifferentDriver(); + vector.allowPassingToDifferentDriver(); + } + @Override public void closeInternal() { - blockFactory().adjustBreaker(-ramBytesUsed() + values.bigArraysRamBytesUsed(), true); - Releasables.closeExpectNoException(values); + blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock(), true); + Releasables.closeExpectNoException(vector); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayVector.java index a8bb60f9f20fa..53e5ee61787c6 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayVector.java @@ -14,29 +14,23 @@ /** * Vector implementation that stores an array of BytesRef values. + * Does not take ownership of the given {@link BytesRefArray} and does not adjust circuit breakers to account for it. * This class is generated. Do not edit it. */ -public final class BytesRefArrayVector extends AbstractVector implements BytesRefVector { +final class BytesRefArrayVector extends AbstractVector implements BytesRefVector { static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(BytesRefArrayVector.class); private final BytesRefArray values; - private final BytesRefBlock block; - - public BytesRefArrayVector(BytesRefArray values, int positionCount) { - this(values, positionCount, BlockFactory.getNonBreakingInstance()); - } - - public BytesRefArrayVector(BytesRefArray values, int positionCount, BlockFactory blockFactory) { + BytesRefArrayVector(BytesRefArray values, int positionCount, BlockFactory blockFactory) { super(positionCount, blockFactory); this.values = values; - this.block = new BytesRefVectorBlock(this); } @Override public BytesRefBlock asBlock() { - return block; + return new BytesRefVectorBlock(this); } @Override @@ -93,11 +87,9 @@ public String toString() { } @Override - public void close() { - if (released) { - throw new IllegalStateException("can't release already released vector [" + this + "]"); - } - released = true; + public void closeInternal() { + // The circuit breaker that tracks the values {@link BytesRefArray} is adjusted outside + // of this class. blockFactory().adjustBreaker(-ramBytesUsed() + values.bigArraysRamBytesUsed(), true); Releasables.closeExpectNoException(values); } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java index 50611f3e15130..8ed17a1435302 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java @@ -171,44 +171,6 @@ static int hash(BytesRefBlock block) { return result; } - /** - * Returns a builder using the {@link BlockFactory#getNonBreakingInstance non-breaking block factory}. - * @deprecated use {@link BlockFactory#newBytesRefBlockBuilder} - */ - // Eventually, we want to remove this entirely, always passing an explicit BlockFactory - @Deprecated - static Builder newBlockBuilder(int estimatedSize) { - return newBlockBuilder(estimatedSize, BlockFactory.getNonBreakingInstance()); - } - - /** - * Returns a builder. - * @deprecated use {@link BlockFactory#newBytesRefBlockBuilder} - */ - @Deprecated - static Builder newBlockBuilder(int estimatedSize, BlockFactory blockFactory) { - return blockFactory.newBytesRefBlockBuilder(estimatedSize); - } - - /** - * Returns a constant block built by the {@link BlockFactory#getNonBreakingInstance non-breaking block factory}. - * @deprecated use {@link BlockFactory#newConstantBytesRefBlockWith} - */ - // Eventually, we want to remove this entirely, always passing an explicit BlockFactory - @Deprecated - static BytesRefBlock newConstantBlockWith(BytesRef value, int positions) { - return newConstantBlockWith(value, positions, BlockFactory.getNonBreakingInstance()); - } - - /** - * Returns a constant block. - * @deprecated use {@link BlockFactory#newConstantBytesRefBlockWith} - */ - @Deprecated - static BytesRefBlock newConstantBlockWith(BytesRef value, int positions, BlockFactory blockFactory) { - return blockFactory.newConstantBytesRefBlockWith(value, positions); - } - /** * Builder for {@link BytesRefBlock} */ diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlockBuilder.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlockBuilder.java index 70e20ac9f1d00..f2bed6e42a039 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlockBuilder.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlockBuilder.java @@ -190,40 +190,46 @@ public BytesRefBlockBuilder mvOrdering(Block.MvOrdering mvOrdering) { return this; } + private BytesRefBlock buildFromBytesArray() { + assert estimatedBytes == 0 || firstValueIndexes != null; + final BytesRefBlock theBlock; + if (hasNonNullValue && positionCount == 1 && valueCount == 1) { + theBlock = new ConstantBytesRefVector(BytesRef.deepCopyOf(values.get(0, new BytesRef())), 1, blockFactory).asBlock(); + /* + * Update the breaker with the actual bytes used. + * We pass false below even though we've used the bytes. That's weird, + * but if we break here we will throw away the used memory, letting + * it be deallocated. The exception will bubble up and the builder will + * still technically be open, meaning the calling code should close it + * which will return all used memory to the breaker. + */ + blockFactory.adjustBreaker(theBlock.ramBytesUsed() - estimatedBytes, false); + Releasables.closeExpectNoException(values); + } else { + if (isDense() && singleValued()) { + theBlock = new BytesRefArrayVector(values, positionCount, blockFactory).asBlock(); + } else { + theBlock = new BytesRefArrayBlock(values, positionCount, firstValueIndexes, nullsMask, mvOrdering, blockFactory); + } + /* + * Update the breaker with the actual bytes used. + * We pass false below even though we've used the bytes. That's weird, + * but if we break here we will throw away the used memory, letting + * it be deallocated. The exception will bubble up and the builder will + * still technically be open, meaning the calling code should close it + * which will return all used memory to the breaker. + */ + blockFactory.adjustBreaker(theBlock.ramBytesUsed() - estimatedBytes - values.bigArraysRamBytesUsed(), false); + } + return theBlock; + } + @Override public BytesRefBlock build() { try { finish(); BytesRefBlock theBlock; - assert estimatedBytes == 0 || firstValueIndexes != null; - if (hasNonNullValue && positionCount == 1 && valueCount == 1) { - theBlock = new ConstantBytesRefVector(BytesRef.deepCopyOf(values.get(0, new BytesRef())), 1, blockFactory).asBlock(); - /* - * Update the breaker with the actual bytes used. - * We pass false below even though we've used the bytes. That's weird, - * but if we break here we will throw away the used memory, letting - * it be deallocated. The exception will bubble up and the builder will - * still technically be open, meaning the calling code should close it - * which will return all used memory to the breaker. - */ - blockFactory.adjustBreaker(theBlock.ramBytesUsed() - estimatedBytes, false); - Releasables.closeExpectNoException(values); - } else { - if (isDense() && singleValued()) { - theBlock = new BytesRefArrayVector(values, positionCount, blockFactory).asBlock(); - } else { - theBlock = new BytesRefArrayBlock(values, positionCount, firstValueIndexes, nullsMask, mvOrdering, blockFactory); - } - /* - * Update the breaker with the actual bytes used. - * We pass false below even though we've used the bytes. That's weird, - * but if we break here we will throw away the used memory, letting - * it be deallocated. The exception will bubble up and the builder will - * still technically be open, meaning the calling code should close it - * which will return all used memory to the breaker. - */ - blockFactory.adjustBreaker(theBlock.ramBytesUsed() - estimatedBytes - values.bigArraysRamBytesUsed(), false); - } + theBlock = buildFromBytesArray(); values = null; built(); return theBlock; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVector.java index b7011666b981d..5c56ece72c298 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVector.java @@ -101,25 +101,6 @@ default void writeTo(StreamOutput out) throws IOException { } } - /** - * Returns a builder using the {@link BlockFactory#getNonBreakingInstance nonbreaking block factory}. - * @deprecated use {@link BlockFactory#newBytesRefVectorBuilder} - */ - // Eventually, we want to remove this entirely, always passing an explicit BlockFactory - @Deprecated - static Builder newVectorBuilder(int estimatedSize) { - return newVectorBuilder(estimatedSize, BlockFactory.getNonBreakingInstance()); - } - - /** - * Creates a builder that grows as needed. - * @deprecated use {@link BlockFactory#newBytesRefVectorBuilder} - */ - @Deprecated - static Builder newVectorBuilder(int estimatedSize, BlockFactory blockFactory) { - return blockFactory.newBytesRefVectorBuilder(estimatedSize); - } - /** * A builder that grows as needed. */ diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java index 9d3f69bfaa981..92f93d5d23a49 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java @@ -11,7 +11,7 @@ import org.elasticsearch.core.Releasables; /** - * Block view of a BytesRefVector. + * Block view of a {@link BytesRefVector}. Cannot represent multi-values or nulls. * This class is generated. Do not edit it. */ public final class BytesRefVectorBlock extends AbstractVectorBlock implements BytesRefBlock { @@ -74,11 +74,6 @@ public String toString() { return getClass().getSimpleName() + "[vector=" + vector + "]"; } - @Override - public boolean isReleased() { - return super.isReleased() || vector.isReleased(); - } - @Override public void closeInternal() { assert (vector.isReleased() == false) : "can't release block [" + this + "] containing already released vector"; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantBooleanVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantBooleanVector.java index b636d89a206e0..16d70d1a0e800 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantBooleanVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantBooleanVector.java @@ -13,22 +13,15 @@ * Vector implementation that stores a constant boolean value. * This class is generated. Do not edit it. */ -public final class ConstantBooleanVector extends AbstractVector implements BooleanVector { +final class ConstantBooleanVector extends AbstractVector implements BooleanVector { static final long RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(ConstantBooleanVector.class); private final boolean value; - private final BooleanBlock block; - - public ConstantBooleanVector(boolean value, int positionCount) { - this(value, positionCount, BlockFactory.getNonBreakingInstance()); - } - - public ConstantBooleanVector(boolean value, int positionCount, BlockFactory blockFactory) { + ConstantBooleanVector(boolean value, int positionCount, BlockFactory blockFactory) { super(positionCount, blockFactory); this.value = value; - this.block = new BooleanVectorBlock(this); } @Override @@ -38,12 +31,12 @@ public boolean getBoolean(int position) { @Override public BooleanBlock asBlock() { - return block; + return new BooleanVectorBlock(this); } @Override public BooleanVector filter(int... positions) { - return new ConstantBooleanVector(value, positions.length); + return blockFactory().newConstantBooleanVector(value, positions.length); } @Override @@ -77,13 +70,4 @@ public int hashCode() { public String toString() { return getClass().getSimpleName() + "[positions=" + getPositionCount() + ", value=" + value + ']'; } - - @Override - public void close() { - if (released) { - throw new IllegalStateException("can't release already released vector [" + this + "]"); - } - released = true; - blockFactory().adjustBreaker(-ramBytesUsed(), true); - } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantBytesRefVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantBytesRefVector.java index be34db592b228..57ec1c945ade5 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantBytesRefVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantBytesRefVector.java @@ -14,22 +14,15 @@ * Vector implementation that stores a constant BytesRef value. * This class is generated. Do not edit it. */ -public final class ConstantBytesRefVector extends AbstractVector implements BytesRefVector { +final class ConstantBytesRefVector extends AbstractVector implements BytesRefVector { static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(ConstantBytesRefVector.class) + RamUsageEstimator .shallowSizeOfInstance(BytesRef.class); private final BytesRef value; - private final BytesRefBlock block; - - public ConstantBytesRefVector(BytesRef value, int positionCount) { - this(value, positionCount, BlockFactory.getNonBreakingInstance()); - } - - public ConstantBytesRefVector(BytesRef value, int positionCount, BlockFactory blockFactory) { + ConstantBytesRefVector(BytesRef value, int positionCount, BlockFactory blockFactory) { super(positionCount, blockFactory); this.value = value; - this.block = new BytesRefVectorBlock(this); } @Override @@ -39,12 +32,12 @@ public BytesRef getBytesRef(int position, BytesRef ignore) { @Override public BytesRefBlock asBlock() { - return block; + return new BytesRefVectorBlock(this); } @Override public BytesRefVector filter(int... positions) { - return new ConstantBytesRefVector(value, positions.length); + return blockFactory().newConstantBytesRefVector(value, positions.length); } @Override @@ -82,13 +75,4 @@ public int hashCode() { public String toString() { return getClass().getSimpleName() + "[positions=" + getPositionCount() + ", value=" + value + ']'; } - - @Override - public void close() { - if (released) { - throw new IllegalStateException("can't release already released vector [" + this + "]"); - } - released = true; - blockFactory().adjustBreaker(-ramBytesUsed(), true); - } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantDoubleVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantDoubleVector.java index f6cce49aa3d42..a783f0243313e 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantDoubleVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantDoubleVector.java @@ -13,22 +13,15 @@ * Vector implementation that stores a constant double value. * This class is generated. Do not edit it. */ -public final class ConstantDoubleVector extends AbstractVector implements DoubleVector { +final class ConstantDoubleVector extends AbstractVector implements DoubleVector { static final long RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(ConstantDoubleVector.class); private final double value; - private final DoubleBlock block; - - public ConstantDoubleVector(double value, int positionCount) { - this(value, positionCount, BlockFactory.getNonBreakingInstance()); - } - - public ConstantDoubleVector(double value, int positionCount, BlockFactory blockFactory) { + ConstantDoubleVector(double value, int positionCount, BlockFactory blockFactory) { super(positionCount, blockFactory); this.value = value; - this.block = new DoubleVectorBlock(this); } @Override @@ -38,12 +31,12 @@ public double getDouble(int position) { @Override public DoubleBlock asBlock() { - return block; + return new DoubleVectorBlock(this); } @Override public DoubleVector filter(int... positions) { - return new ConstantDoubleVector(value, positions.length); + return blockFactory().newConstantDoubleVector(value, positions.length); } @Override @@ -77,13 +70,4 @@ public int hashCode() { public String toString() { return getClass().getSimpleName() + "[positions=" + getPositionCount() + ", value=" + value + ']'; } - - @Override - public void close() { - if (released) { - throw new IllegalStateException("can't release already released vector [" + this + "]"); - } - released = true; - blockFactory().adjustBreaker(-ramBytesUsed(), true); - } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantIntVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantIntVector.java index fa7b9223d5107..56573e985c387 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantIntVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantIntVector.java @@ -13,22 +13,15 @@ * Vector implementation that stores a constant int value. * This class is generated. Do not edit it. */ -public final class ConstantIntVector extends AbstractVector implements IntVector { +final class ConstantIntVector extends AbstractVector implements IntVector { static final long RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(ConstantIntVector.class); private final int value; - private final IntBlock block; - - public ConstantIntVector(int value, int positionCount) { - this(value, positionCount, BlockFactory.getNonBreakingInstance()); - } - - public ConstantIntVector(int value, int positionCount, BlockFactory blockFactory) { + ConstantIntVector(int value, int positionCount, BlockFactory blockFactory) { super(positionCount, blockFactory); this.value = value; - this.block = new IntVectorBlock(this); } @Override @@ -38,12 +31,12 @@ public int getInt(int position) { @Override public IntBlock asBlock() { - return block; + return new IntVectorBlock(this); } @Override public IntVector filter(int... positions) { - return new ConstantIntVector(value, positions.length); + return blockFactory().newConstantIntVector(value, positions.length); } @Override @@ -77,13 +70,4 @@ public int hashCode() { public String toString() { return getClass().getSimpleName() + "[positions=" + getPositionCount() + ", value=" + value + ']'; } - - @Override - public void close() { - if (released) { - throw new IllegalStateException("can't release already released vector [" + this + "]"); - } - released = true; - blockFactory().adjustBreaker(-ramBytesUsed(), true); - } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantLongVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantLongVector.java index 21d4d81dfd193..0173f1c1d4d7a 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantLongVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantLongVector.java @@ -13,22 +13,15 @@ * Vector implementation that stores a constant long value. * This class is generated. Do not edit it. */ -public final class ConstantLongVector extends AbstractVector implements LongVector { +final class ConstantLongVector extends AbstractVector implements LongVector { static final long RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(ConstantLongVector.class); private final long value; - private final LongBlock block; - - public ConstantLongVector(long value, int positionCount) { - this(value, positionCount, BlockFactory.getNonBreakingInstance()); - } - - public ConstantLongVector(long value, int positionCount, BlockFactory blockFactory) { + ConstantLongVector(long value, int positionCount, BlockFactory blockFactory) { super(positionCount, blockFactory); this.value = value; - this.block = new LongVectorBlock(this); } @Override @@ -38,12 +31,12 @@ public long getLong(int position) { @Override public LongBlock asBlock() { - return block; + return new LongVectorBlock(this); } @Override public LongVector filter(int... positions) { - return new ConstantLongVector(value, positions.length); + return blockFactory().newConstantLongVector(value, positions.length); } @Override @@ -77,13 +70,4 @@ public int hashCode() { public String toString() { return getClass().getSimpleName() + "[positions=" + getPositionCount() + ", value=" + value + ']'; } - - @Override - public void close() { - if (released) { - throw new IllegalStateException("can't release already released vector [" + this + "]"); - } - released = true; - blockFactory().adjustBreaker(-ramBytesUsed(), true); - } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java index db3546c73c054..96e96ac459a50 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java @@ -8,25 +8,21 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.core.Releasables; -import java.util.Arrays; import java.util.BitSet; /** - * Block implementation that stores an array of double. + * Block implementation that stores values in a {@link DoubleArrayVector}. * This class is generated. Do not edit it. */ -public final class DoubleArrayBlock extends AbstractArrayBlock implements DoubleBlock { +final class DoubleArrayBlock extends AbstractArrayBlock implements DoubleBlock { private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(DoubleArrayBlock.class); - private final double[] values; + private final DoubleArrayVector vector; - public DoubleArrayBlock(double[] values, int positionCount, int[] firstValueIndexes, BitSet nulls, MvOrdering mvOrdering) { - this(values, positionCount, firstValueIndexes, nulls, mvOrdering, BlockFactory.getNonBreakingInstance()); - } - - public DoubleArrayBlock( + DoubleArrayBlock( double[] values, int positionCount, int[] firstValueIndexes, @@ -35,7 +31,7 @@ public DoubleArrayBlock( BlockFactory blockFactory ) { super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); - this.values = values; + this.vector = new DoubleArrayVector(values, values.length, blockFactory); } @Override @@ -45,7 +41,7 @@ public DoubleVector asVector() { @Override public double getDouble(int valueIndex) { - return values[valueIndex]; + return vector.getDouble(valueIndex); } @Override @@ -83,7 +79,7 @@ public DoubleBlock expand() { incRef(); return this; } - // TODO use reference counting to share the values + // TODO use reference counting to share the vector try (var builder = blockFactory().newDoubleBlockBuilder(firstValueIndexes[getPositionCount()])) { for (int pos = 0; pos < getPositionCount(); pos++) { if (isNull(pos)) { @@ -100,14 +96,13 @@ public DoubleBlock expand() { } } - public static long ramBytesEstimated(double[] values, int[] firstValueIndexes, BitSet nullsMask) { - return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(values) + BlockRamUsageEstimator.sizeOf(firstValueIndexes) - + BlockRamUsageEstimator.sizeOfBitSet(nullsMask); + private long ramBytesUsedOnlyBlock() { + return BASE_RAM_BYTES_USED + BlockRamUsageEstimator.sizeOf(firstValueIndexes) + BlockRamUsageEstimator.sizeOfBitSet(nullsMask); } @Override public long ramBytesUsed() { - return ramBytesEstimated(values, firstValueIndexes, nullsMask); + return ramBytesUsedOnlyBlock() + vector.ramBytesUsed(); } @Override @@ -130,13 +125,20 @@ public String toString() { + getPositionCount() + ", mvOrdering=" + mvOrdering() - + ", values=" - + Arrays.toString(values) + + ", vector=" + + vector + ']'; } + @Override + public void allowPassingToDifferentDriver() { + super.allowPassingToDifferentDriver(); + vector.allowPassingToDifferentDriver(); + } + @Override public void closeInternal() { - blockFactory().adjustBreaker(-ramBytesUsed(), true); + blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock(), true); + Releasables.closeExpectNoException(vector); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayVector.java index 08e51b0e313d8..bb6d9c22539a6 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayVector.java @@ -15,27 +15,20 @@ * Vector implementation that stores an array of double values. * This class is generated. Do not edit it. */ -public final class DoubleArrayVector extends AbstractVector implements DoubleVector { +final class DoubleArrayVector extends AbstractVector implements DoubleVector { static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(DoubleArrayVector.class); private final double[] values; - private final DoubleBlock block; - - public DoubleArrayVector(double[] values, int positionCount) { - this(values, positionCount, BlockFactory.getNonBreakingInstance()); - } - - public DoubleArrayVector(double[] values, int positionCount, BlockFactory blockFactory) { + DoubleArrayVector(double[] values, int positionCount, BlockFactory blockFactory) { super(positionCount, blockFactory); this.values = values; - this.block = new DoubleVectorBlock(this); } @Override public DoubleBlock asBlock() { - return block; + return new DoubleVectorBlock(this); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayBlock.java new file mode 100644 index 0000000000000..5b1dcbfc9d728 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayBlock.java @@ -0,0 +1,145 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data; + +import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.util.DoubleArray; +import org.elasticsearch.core.Releasables; + +import java.util.BitSet; + +/** + * Block implementation that stores values in a {@link DoubleBigArrayVector}. Does not take ownership of the given + * {@link DoubleArray} and does not adjust circuit breakers to account for it. + * This class is generated. Do not edit it. + */ +public final class DoubleBigArrayBlock extends AbstractArrayBlock implements DoubleBlock { + + private static final long BASE_RAM_BYTES_USED = 0; // TODO: fix this + private final DoubleBigArrayVector vector; + + public DoubleBigArrayBlock( + DoubleArray values, + int positionCount, + int[] firstValueIndexes, + BitSet nulls, + MvOrdering mvOrdering, + BlockFactory blockFactory + ) { + super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); + this.vector = new DoubleBigArrayVector(values, (int) values.size(), blockFactory); + } + + @Override + public DoubleVector asVector() { + return null; + } + + @Override + public double getDouble(int valueIndex) { + return vector.getDouble(valueIndex); + } + + @Override + public DoubleBlock filter(int... positions) { + try (var builder = blockFactory().newDoubleBlockBuilder(positions.length)) { + for (int pos : positions) { + if (isNull(pos)) { + builder.appendNull(); + continue; + } + int valueCount = getValueCount(pos); + int first = getFirstValueIndex(pos); + if (valueCount == 1) { + builder.appendDouble(getDouble(getFirstValueIndex(pos))); + } else { + builder.beginPositionEntry(); + for (int c = 0; c < valueCount; c++) { + builder.appendDouble(getDouble(first + c)); + } + builder.endPositionEntry(); + } + } + return builder.mvOrdering(mvOrdering()).build(); + } + } + + @Override + public ElementType elementType() { + return ElementType.DOUBLE; + } + + @Override + public DoubleBlock expand() { + if (firstValueIndexes == null) { + incRef(); + return this; + } + // TODO use reference counting to share the vector + try (var builder = blockFactory().newDoubleBlockBuilder(firstValueIndexes[getPositionCount()])) { + for (int pos = 0; pos < getPositionCount(); pos++) { + if (isNull(pos)) { + builder.appendNull(); + continue; + } + int first = getFirstValueIndex(pos); + int end = first + getValueCount(pos); + for (int i = first; i < end; i++) { + builder.appendDouble(getDouble(i)); + } + } + return builder.mvOrdering(MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING).build(); + } + } + + private long ramBytesUsedOnlyBlock() { + return BASE_RAM_BYTES_USED + BlockRamUsageEstimator.sizeOf(firstValueIndexes) + BlockRamUsageEstimator.sizeOfBitSet(nullsMask); + } + + @Override + public long ramBytesUsed() { + return ramBytesUsedOnlyBlock() + RamUsageEstimator.sizeOf(vector); + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof DoubleBlock that) { + return DoubleBlock.equals(this, that); + } + return false; + } + + @Override + public int hashCode() { + return DoubleBlock.hash(this); + } + + @Override + public String toString() { + return getClass().getSimpleName() + + "[positions=" + + getPositionCount() + + ", mvOrdering=" + + mvOrdering() + + ", ramBytesUsed=" + + vector.ramBytesUsed() + + ']'; + } + + @Override + public void allowPassingToDifferentDriver() { + super.allowPassingToDifferentDriver(); + vector.allowPassingToDifferentDriver(); + } + + @Override + public void closeInternal() { + blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock(), true); + Releasables.closeExpectNoException(vector); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayVector.java index 476b94ad3fa05..d6fab63a6b6ff 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayVector.java @@ -12,30 +12,24 @@ import org.elasticsearch.core.Releasable; /** - * Vector implementation that defers to an enclosed DoubleArray. + * Vector implementation that defers to an enclosed {@link DoubleArray}. + * Does not take ownership of the array and does not adjust circuit breakers to account for it. * This class is generated. Do not edit it. */ public final class DoubleBigArrayVector extends AbstractVector implements DoubleVector, Releasable { - private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(DoubleBigArrayVector.class); + private static final long BASE_RAM_BYTES_USED = 0; // FIXME private final DoubleArray values; - private final DoubleBlock block; - - public DoubleBigArrayVector(DoubleArray values, int positionCount) { - this(values, positionCount, BlockFactory.getNonBreakingInstance()); - } - public DoubleBigArrayVector(DoubleArray values, int positionCount, BlockFactory blockFactory) { super(positionCount, blockFactory); this.values = values; - this.block = new DoubleVectorBlock(this); } @Override public DoubleBlock asBlock() { - return block; + return new DoubleVectorBlock(this); } @Override @@ -69,11 +63,9 @@ public DoubleVector filter(int... positions) { } @Override - public void close() { - if (released) { - throw new IllegalStateException("can't release already released vector [" + this + "]"); - } - released = true; + public void closeInternal() { + // The circuit breaker that tracks the values {@link DoubleArray} is adjusted outside + // of this class. values.close(); } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java index 31d0000d28515..890f965c765bb 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java @@ -18,7 +18,7 @@ * Block that stores double values. * This class is generated. Do not edit it. */ -public sealed interface DoubleBlock extends Block permits DoubleArrayBlock, DoubleVectorBlock, ConstantNullBlock { +public sealed interface DoubleBlock extends Block permits DoubleArrayBlock, DoubleVectorBlock, ConstantNullBlock, DoubleBigArrayBlock { /** * Retrieves the double value stored at the given value index. @@ -167,44 +167,6 @@ static int hash(DoubleBlock block) { return result; } - /** - * Returns a builder using the {@link BlockFactory#getNonBreakingInstance non-breaking block factory}. - * @deprecated use {@link BlockFactory#newDoubleBlockBuilder} - */ - // Eventually, we want to remove this entirely, always passing an explicit BlockFactory - @Deprecated - static Builder newBlockBuilder(int estimatedSize) { - return newBlockBuilder(estimatedSize, BlockFactory.getNonBreakingInstance()); - } - - /** - * Returns a builder. - * @deprecated use {@link BlockFactory#newDoubleBlockBuilder} - */ - @Deprecated - static Builder newBlockBuilder(int estimatedSize, BlockFactory blockFactory) { - return blockFactory.newDoubleBlockBuilder(estimatedSize); - } - - /** - * Returns a constant block built by the {@link BlockFactory#getNonBreakingInstance non-breaking block factory}. - * @deprecated use {@link BlockFactory#newConstantDoubleBlockWith} - */ - // Eventually, we want to remove this entirely, always passing an explicit BlockFactory - @Deprecated - static DoubleBlock newConstantBlockWith(double value, int positions) { - return newConstantBlockWith(value, positions, BlockFactory.getNonBreakingInstance()); - } - - /** - * Returns a constant block. - * @deprecated use {@link BlockFactory#newConstantDoubleBlockWith} - */ - @Deprecated - static DoubleBlock newConstantBlockWith(double value, int positions, BlockFactory blockFactory) { - return blockFactory.newConstantDoubleBlockWith(value, positions); - } - /** * Builder for {@link DoubleBlock} */ diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlockBuilder.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlockBuilder.java index 7781e4c353e8e..4e0fa1180a2ff 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlockBuilder.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlockBuilder.java @@ -9,6 +9,7 @@ import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.common.util.DoubleArray; import java.util.Arrays; @@ -179,6 +180,29 @@ public DoubleBlockBuilder mvOrdering(Block.MvOrdering mvOrdering) { return this; } + private DoubleBlock buildBigArraysBlock() { + final DoubleBlock theBlock; + final DoubleArray array = blockFactory.bigArrays().newDoubleArray(valueCount, false); + for (int i = 0; i < valueCount; i++) { + array.set(i, values[i]); + } + if (isDense() && singleValued()) { + theBlock = new DoubleBigArrayVector(array, positionCount, blockFactory).asBlock(); + } else { + theBlock = new DoubleBigArrayBlock(array, positionCount, firstValueIndexes, nullsMask, mvOrdering, blockFactory); + } + /* + * Update the breaker with the actual bytes used. + * We pass false below even though we've used the bytes. That's weird, + * but if we break here we will throw away the used memory, letting + * it be deallocated. The exception will bubble up and the builder will + * still technically be open, meaning the calling code should close it + * which will return all used memory to the breaker. + */ + blockFactory.adjustBreaker(theBlock.ramBytesUsed() - estimatedBytes - array.ramBytesUsed(), false); + return theBlock; + } + @Override public DoubleBlock build() { try { @@ -187,20 +211,26 @@ public DoubleBlock build() { if (hasNonNullValue && positionCount == 1 && valueCount == 1) { theBlock = blockFactory.newConstantDoubleBlockWith(values[0], 1, estimatedBytes); } else { - if (values.length - valueCount > 1024 || valueCount < (values.length / 2)) { - values = Arrays.copyOf(values, valueCount); - } - if (isDense() && singleValued()) { - theBlock = blockFactory.newDoubleArrayVector(values, positionCount, estimatedBytes).asBlock(); + if (estimatedBytes > blockFactory.maxPrimitiveArrayBytes()) { + theBlock = buildBigArraysBlock(); } else { - theBlock = blockFactory.newDoubleArrayBlock( - values, - positionCount, - firstValueIndexes, - nullsMask, - mvOrdering, - estimatedBytes - ); + if (values.length - valueCount > 1024 || valueCount < (values.length / 2)) { + adjustBreaker(valueCount * elementSize()); + values = Arrays.copyOf(values, valueCount); + adjustBreaker(-values.length * elementSize()); + } + if (isDense() && singleValued()) { + theBlock = blockFactory.newDoubleArrayVector(values, positionCount, estimatedBytes).asBlock(); + } else { + theBlock = blockFactory.newDoubleArrayBlock( + values, + positionCount, + firstValueIndexes, + nullsMask, + mvOrdering, + estimatedBytes + ); + } } } built(); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVector.java index acabd0deb17f6..f54044874acdd 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVector.java @@ -102,40 +102,10 @@ default void writeTo(StreamOutput out) throws IOException { } } - /** - * Returns a builder using the {@link BlockFactory#getNonBreakingInstance nonbreaking block factory}. - * @deprecated use {@link BlockFactory#newDoubleVectorBuilder} - */ - // Eventually, we want to remove this entirely, always passing an explicit BlockFactory - @Deprecated - static Builder newVectorBuilder(int estimatedSize) { - return newVectorBuilder(estimatedSize, BlockFactory.getNonBreakingInstance()); - } - - /** - * Creates a builder that grows as needed. Prefer {@link #newVectorFixedBuilder} - * if you know the size up front because it's faster. - * @deprecated use {@link BlockFactory#newDoubleVectorBuilder} - */ - @Deprecated - static Builder newVectorBuilder(int estimatedSize, BlockFactory blockFactory) { - return blockFactory.newDoubleVectorBuilder(estimatedSize); - } - - /** - * Creates a builder that never grows. Prefer this over {@link #newVectorBuilder} - * if you know the size up front because it's faster. - * @deprecated use {@link BlockFactory#newDoubleVectorFixedBuilder} - */ - @Deprecated - static FixedBuilder newVectorFixedBuilder(int size, BlockFactory blockFactory) { - return blockFactory.newDoubleVectorFixedBuilder(size); - } - /** * A builder that grows as needed. */ - sealed interface Builder extends Vector.Builder permits DoubleVectorBuilder { + sealed interface Builder extends Vector.Builder permits DoubleVectorBuilder, FixedBuilder { /** * Appends a double to the current entry. */ @@ -148,13 +118,11 @@ sealed interface Builder extends Vector.Builder permits DoubleVectorBuilder { /** * A builder that never grows. */ - sealed interface FixedBuilder extends Vector.Builder permits DoubleVectorFixedBuilder { + sealed interface FixedBuilder extends Builder permits DoubleVectorFixedBuilder { /** * Appends a double to the current entry. */ - FixedBuilder appendDouble(double value); - @Override - DoubleVector build(); + FixedBuilder appendDouble(double value); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java index b23a448c58336..2aa8e07c25604 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java @@ -10,7 +10,7 @@ import org.elasticsearch.core.Releasables; /** - * Block view of a DoubleVector. + * Block view of a {@link DoubleVector}. Cannot represent multi-values or nulls. * This class is generated. Do not edit it. */ public final class DoubleVectorBlock extends AbstractVectorBlock implements DoubleBlock { @@ -73,11 +73,6 @@ public String toString() { return getClass().getSimpleName() + "[vector=" + vector + "]"; } - @Override - public boolean isReleased() { - return super.isReleased() || vector.isReleased(); - } - @Override public void closeInternal() { assert (vector.isReleased() == false) : "can't release block [" + this + "] containing already released vector"; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java index 111fc0c757af1..e8f10ced11adc 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java @@ -8,25 +8,21 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.core.Releasables; -import java.util.Arrays; import java.util.BitSet; /** - * Block implementation that stores an array of int. + * Block implementation that stores values in a {@link IntArrayVector}. * This class is generated. Do not edit it. */ -public final class IntArrayBlock extends AbstractArrayBlock implements IntBlock { +final class IntArrayBlock extends AbstractArrayBlock implements IntBlock { private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(IntArrayBlock.class); - private final int[] values; + private final IntArrayVector vector; - public IntArrayBlock(int[] values, int positionCount, int[] firstValueIndexes, BitSet nulls, MvOrdering mvOrdering) { - this(values, positionCount, firstValueIndexes, nulls, mvOrdering, BlockFactory.getNonBreakingInstance()); - } - - public IntArrayBlock( + IntArrayBlock( int[] values, int positionCount, int[] firstValueIndexes, @@ -35,7 +31,7 @@ public IntArrayBlock( BlockFactory blockFactory ) { super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); - this.values = values; + this.vector = new IntArrayVector(values, values.length, blockFactory); } @Override @@ -45,7 +41,7 @@ public IntVector asVector() { @Override public int getInt(int valueIndex) { - return values[valueIndex]; + return vector.getInt(valueIndex); } @Override @@ -83,7 +79,7 @@ public IntBlock expand() { incRef(); return this; } - // TODO use reference counting to share the values + // TODO use reference counting to share the vector try (var builder = blockFactory().newIntBlockBuilder(firstValueIndexes[getPositionCount()])) { for (int pos = 0; pos < getPositionCount(); pos++) { if (isNull(pos)) { @@ -100,14 +96,13 @@ public IntBlock expand() { } } - public static long ramBytesEstimated(int[] values, int[] firstValueIndexes, BitSet nullsMask) { - return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(values) + BlockRamUsageEstimator.sizeOf(firstValueIndexes) - + BlockRamUsageEstimator.sizeOfBitSet(nullsMask); + private long ramBytesUsedOnlyBlock() { + return BASE_RAM_BYTES_USED + BlockRamUsageEstimator.sizeOf(firstValueIndexes) + BlockRamUsageEstimator.sizeOfBitSet(nullsMask); } @Override public long ramBytesUsed() { - return ramBytesEstimated(values, firstValueIndexes, nullsMask); + return ramBytesUsedOnlyBlock() + vector.ramBytesUsed(); } @Override @@ -130,13 +125,20 @@ public String toString() { + getPositionCount() + ", mvOrdering=" + mvOrdering() - + ", values=" - + Arrays.toString(values) + + ", vector=" + + vector + ']'; } + @Override + public void allowPassingToDifferentDriver() { + super.allowPassingToDifferentDriver(); + vector.allowPassingToDifferentDriver(); + } + @Override public void closeInternal() { - blockFactory().adjustBreaker(-ramBytesUsed(), true); + blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock(), true); + Releasables.closeExpectNoException(vector); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayVector.java index 9c8c27efa0806..0576b77a0d700 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayVector.java @@ -15,27 +15,20 @@ * Vector implementation that stores an array of int values. * This class is generated. Do not edit it. */ -public final class IntArrayVector extends AbstractVector implements IntVector { +final class IntArrayVector extends AbstractVector implements IntVector { static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(IntArrayVector.class); private final int[] values; - private final IntBlock block; - - public IntArrayVector(int[] values, int positionCount) { - this(values, positionCount, BlockFactory.getNonBreakingInstance()); - } - - public IntArrayVector(int[] values, int positionCount, BlockFactory blockFactory) { + IntArrayVector(int[] values, int positionCount, BlockFactory blockFactory) { super(positionCount, blockFactory); this.values = values; - this.block = new IntVectorBlock(this); } @Override public IntBlock asBlock() { - return block; + return new IntVectorBlock(this); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayBlock.java new file mode 100644 index 0000000000000..ad6033fb452a0 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayBlock.java @@ -0,0 +1,145 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data; + +import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.util.IntArray; +import org.elasticsearch.core.Releasables; + +import java.util.BitSet; + +/** + * Block implementation that stores values in a {@link IntBigArrayVector}. Does not take ownership of the given + * {@link IntArray} and does not adjust circuit breakers to account for it. + * This class is generated. Do not edit it. + */ +public final class IntBigArrayBlock extends AbstractArrayBlock implements IntBlock { + + private static final long BASE_RAM_BYTES_USED = 0; // TODO: fix this + private final IntBigArrayVector vector; + + public IntBigArrayBlock( + IntArray values, + int positionCount, + int[] firstValueIndexes, + BitSet nulls, + MvOrdering mvOrdering, + BlockFactory blockFactory + ) { + super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); + this.vector = new IntBigArrayVector(values, (int) values.size(), blockFactory); + } + + @Override + public IntVector asVector() { + return null; + } + + @Override + public int getInt(int valueIndex) { + return vector.getInt(valueIndex); + } + + @Override + public IntBlock filter(int... positions) { + try (var builder = blockFactory().newIntBlockBuilder(positions.length)) { + for (int pos : positions) { + if (isNull(pos)) { + builder.appendNull(); + continue; + } + int valueCount = getValueCount(pos); + int first = getFirstValueIndex(pos); + if (valueCount == 1) { + builder.appendInt(getInt(getFirstValueIndex(pos))); + } else { + builder.beginPositionEntry(); + for (int c = 0; c < valueCount; c++) { + builder.appendInt(getInt(first + c)); + } + builder.endPositionEntry(); + } + } + return builder.mvOrdering(mvOrdering()).build(); + } + } + + @Override + public ElementType elementType() { + return ElementType.INT; + } + + @Override + public IntBlock expand() { + if (firstValueIndexes == null) { + incRef(); + return this; + } + // TODO use reference counting to share the vector + try (var builder = blockFactory().newIntBlockBuilder(firstValueIndexes[getPositionCount()])) { + for (int pos = 0; pos < getPositionCount(); pos++) { + if (isNull(pos)) { + builder.appendNull(); + continue; + } + int first = getFirstValueIndex(pos); + int end = first + getValueCount(pos); + for (int i = first; i < end; i++) { + builder.appendInt(getInt(i)); + } + } + return builder.mvOrdering(MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING).build(); + } + } + + private long ramBytesUsedOnlyBlock() { + return BASE_RAM_BYTES_USED + BlockRamUsageEstimator.sizeOf(firstValueIndexes) + BlockRamUsageEstimator.sizeOfBitSet(nullsMask); + } + + @Override + public long ramBytesUsed() { + return ramBytesUsedOnlyBlock() + RamUsageEstimator.sizeOf(vector); + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof IntBlock that) { + return IntBlock.equals(this, that); + } + return false; + } + + @Override + public int hashCode() { + return IntBlock.hash(this); + } + + @Override + public String toString() { + return getClass().getSimpleName() + + "[positions=" + + getPositionCount() + + ", mvOrdering=" + + mvOrdering() + + ", ramBytesUsed=" + + vector.ramBytesUsed() + + ']'; + } + + @Override + public void allowPassingToDifferentDriver() { + super.allowPassingToDifferentDriver(); + vector.allowPassingToDifferentDriver(); + } + + @Override + public void closeInternal() { + blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock(), true); + Releasables.closeExpectNoException(vector); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayVector.java index 76d2797f2a64b..c1799c06713e9 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayVector.java @@ -12,30 +12,24 @@ import org.elasticsearch.core.Releasable; /** - * Vector implementation that defers to an enclosed IntArray. + * Vector implementation that defers to an enclosed {@link IntArray}. + * Does not take ownership of the array and does not adjust circuit breakers to account for it. * This class is generated. Do not edit it. */ public final class IntBigArrayVector extends AbstractVector implements IntVector, Releasable { - private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(IntBigArrayVector.class); + private static final long BASE_RAM_BYTES_USED = 0; // FIXME private final IntArray values; - private final IntBlock block; - - public IntBigArrayVector(IntArray values, int positionCount) { - this(values, positionCount, BlockFactory.getNonBreakingInstance()); - } - public IntBigArrayVector(IntArray values, int positionCount, BlockFactory blockFactory) { super(positionCount, blockFactory); this.values = values; - this.block = new IntVectorBlock(this); } @Override public IntBlock asBlock() { - return block; + return new IntVectorBlock(this); } @Override @@ -69,11 +63,9 @@ public IntVector filter(int... positions) { } @Override - public void close() { - if (released) { - throw new IllegalStateException("can't release already released vector [" + this + "]"); - } - released = true; + public void closeInternal() { + // The circuit breaker that tracks the values {@link IntArray} is adjusted outside + // of this class. values.close(); } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java index 3909d2b6761be..9a66445eb55a2 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java @@ -18,7 +18,7 @@ * Block that stores int values. * This class is generated. Do not edit it. */ -public sealed interface IntBlock extends Block permits IntArrayBlock, IntVectorBlock, ConstantNullBlock { +public sealed interface IntBlock extends Block permits IntArrayBlock, IntVectorBlock, ConstantNullBlock, IntBigArrayBlock { /** * Retrieves the int value stored at the given value index. @@ -166,44 +166,6 @@ static int hash(IntBlock block) { return result; } - /** - * Returns a builder using the {@link BlockFactory#getNonBreakingInstance non-breaking block factory}. - * @deprecated use {@link BlockFactory#newIntBlockBuilder} - */ - // Eventually, we want to remove this entirely, always passing an explicit BlockFactory - @Deprecated - static Builder newBlockBuilder(int estimatedSize) { - return newBlockBuilder(estimatedSize, BlockFactory.getNonBreakingInstance()); - } - - /** - * Returns a builder. - * @deprecated use {@link BlockFactory#newIntBlockBuilder} - */ - @Deprecated - static Builder newBlockBuilder(int estimatedSize, BlockFactory blockFactory) { - return blockFactory.newIntBlockBuilder(estimatedSize); - } - - /** - * Returns a constant block built by the {@link BlockFactory#getNonBreakingInstance non-breaking block factory}. - * @deprecated use {@link BlockFactory#newConstantIntBlockWith} - */ - // Eventually, we want to remove this entirely, always passing an explicit BlockFactory - @Deprecated - static IntBlock newConstantBlockWith(int value, int positions) { - return newConstantBlockWith(value, positions, BlockFactory.getNonBreakingInstance()); - } - - /** - * Returns a constant block. - * @deprecated use {@link BlockFactory#newConstantIntBlockWith} - */ - @Deprecated - static IntBlock newConstantBlockWith(int value, int positions, BlockFactory blockFactory) { - return blockFactory.newConstantIntBlockWith(value, positions); - } - /** * Builder for {@link IntBlock} */ diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlockBuilder.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlockBuilder.java index 49c3b156ce44b..5f67c0683a5d7 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlockBuilder.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlockBuilder.java @@ -9,6 +9,7 @@ import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.common.util.IntArray; import java.util.Arrays; @@ -179,6 +180,29 @@ public IntBlockBuilder mvOrdering(Block.MvOrdering mvOrdering) { return this; } + private IntBlock buildBigArraysBlock() { + final IntBlock theBlock; + final IntArray array = blockFactory.bigArrays().newIntArray(valueCount, false); + for (int i = 0; i < valueCount; i++) { + array.set(i, values[i]); + } + if (isDense() && singleValued()) { + theBlock = new IntBigArrayVector(array, positionCount, blockFactory).asBlock(); + } else { + theBlock = new IntBigArrayBlock(array, positionCount, firstValueIndexes, nullsMask, mvOrdering, blockFactory); + } + /* + * Update the breaker with the actual bytes used. + * We pass false below even though we've used the bytes. That's weird, + * but if we break here we will throw away the used memory, letting + * it be deallocated. The exception will bubble up and the builder will + * still technically be open, meaning the calling code should close it + * which will return all used memory to the breaker. + */ + blockFactory.adjustBreaker(theBlock.ramBytesUsed() - estimatedBytes - array.ramBytesUsed(), false); + return theBlock; + } + @Override public IntBlock build() { try { @@ -187,20 +211,26 @@ public IntBlock build() { if (hasNonNullValue && positionCount == 1 && valueCount == 1) { theBlock = blockFactory.newConstantIntBlockWith(values[0], 1, estimatedBytes); } else { - if (values.length - valueCount > 1024 || valueCount < (values.length / 2)) { - values = Arrays.copyOf(values, valueCount); - } - if (isDense() && singleValued()) { - theBlock = blockFactory.newIntArrayVector(values, positionCount, estimatedBytes).asBlock(); + if (estimatedBytes > blockFactory.maxPrimitiveArrayBytes()) { + theBlock = buildBigArraysBlock(); } else { - theBlock = blockFactory.newIntArrayBlock( - values, - positionCount, - firstValueIndexes, - nullsMask, - mvOrdering, - estimatedBytes - ); + if (values.length - valueCount > 1024 || valueCount < (values.length / 2)) { + adjustBreaker(valueCount * elementSize()); + values = Arrays.copyOf(values, valueCount); + adjustBreaker(-values.length * elementSize()); + } + if (isDense() && singleValued()) { + theBlock = blockFactory.newIntArrayVector(values, positionCount, estimatedBytes).asBlock(); + } else { + theBlock = blockFactory.newIntArrayBlock( + values, + positionCount, + firstValueIndexes, + nullsMask, + mvOrdering, + estimatedBytes + ); + } } } built(); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVector.java index 645288565c431..bc7e3c87ec33d 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVector.java @@ -101,36 +101,6 @@ default void writeTo(StreamOutput out) throws IOException { } } - /** - * Returns a builder using the {@link BlockFactory#getNonBreakingInstance nonbreaking block factory}. - * @deprecated use {@link BlockFactory#newIntVectorBuilder} - */ - // Eventually, we want to remove this entirely, always passing an explicit BlockFactory - @Deprecated - static Builder newVectorBuilder(int estimatedSize) { - return newVectorBuilder(estimatedSize, BlockFactory.getNonBreakingInstance()); - } - - /** - * Creates a builder that grows as needed. Prefer {@link #newVectorFixedBuilder} - * if you know the size up front because it's faster. - * @deprecated use {@link BlockFactory#newIntVectorBuilder} - */ - @Deprecated - static Builder newVectorBuilder(int estimatedSize, BlockFactory blockFactory) { - return blockFactory.newIntVectorBuilder(estimatedSize); - } - - /** - * Creates a builder that never grows. Prefer this over {@link #newVectorBuilder} - * if you know the size up front because it's faster. - * @deprecated use {@link BlockFactory#newIntVectorFixedBuilder} - */ - @Deprecated - static FixedBuilder newVectorFixedBuilder(int size, BlockFactory blockFactory) { - return blockFactory.newIntVectorFixedBuilder(size); - } - /** Create a vector for a range of ints. */ static IntVector range(int startInclusive, int endExclusive, BlockFactory blockFactory) { int[] values = new int[endExclusive - startInclusive]; @@ -143,7 +113,7 @@ static IntVector range(int startInclusive, int endExclusive, BlockFactory blockF /** * A builder that grows as needed. */ - sealed interface Builder extends Vector.Builder permits IntVectorBuilder { + sealed interface Builder extends Vector.Builder permits IntVectorBuilder, FixedBuilder { /** * Appends a int to the current entry. */ @@ -156,13 +126,11 @@ sealed interface Builder extends Vector.Builder permits IntVectorBuilder { /** * A builder that never grows. */ - sealed interface FixedBuilder extends Vector.Builder permits IntVectorFixedBuilder { + sealed interface FixedBuilder extends Builder permits IntVectorFixedBuilder { /** * Appends a int to the current entry. */ - FixedBuilder appendInt(int value); - @Override - IntVector build(); + FixedBuilder appendInt(int value); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java index 028ef35577753..97a4a48533e3a 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java @@ -10,7 +10,7 @@ import org.elasticsearch.core.Releasables; /** - * Block view of a IntVector. + * Block view of a {@link IntVector}. Cannot represent multi-values or nulls. * This class is generated. Do not edit it. */ public final class IntVectorBlock extends AbstractVectorBlock implements IntBlock { @@ -73,11 +73,6 @@ public String toString() { return getClass().getSimpleName() + "[vector=" + vector + "]"; } - @Override - public boolean isReleased() { - return super.isReleased() || vector.isReleased(); - } - @Override public void closeInternal() { assert (vector.isReleased() == false) : "can't release block [" + this + "] containing already released vector"; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java index 9e0fa9bcc2993..792f9b267e748 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java @@ -8,25 +8,21 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.core.Releasables; -import java.util.Arrays; import java.util.BitSet; /** - * Block implementation that stores an array of long. + * Block implementation that stores values in a {@link LongArrayVector}. * This class is generated. Do not edit it. */ -public final class LongArrayBlock extends AbstractArrayBlock implements LongBlock { +final class LongArrayBlock extends AbstractArrayBlock implements LongBlock { private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(LongArrayBlock.class); - private final long[] values; + private final LongArrayVector vector; - public LongArrayBlock(long[] values, int positionCount, int[] firstValueIndexes, BitSet nulls, MvOrdering mvOrdering) { - this(values, positionCount, firstValueIndexes, nulls, mvOrdering, BlockFactory.getNonBreakingInstance()); - } - - public LongArrayBlock( + LongArrayBlock( long[] values, int positionCount, int[] firstValueIndexes, @@ -35,7 +31,7 @@ public LongArrayBlock( BlockFactory blockFactory ) { super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); - this.values = values; + this.vector = new LongArrayVector(values, values.length, blockFactory); } @Override @@ -45,7 +41,7 @@ public LongVector asVector() { @Override public long getLong(int valueIndex) { - return values[valueIndex]; + return vector.getLong(valueIndex); } @Override @@ -83,7 +79,7 @@ public LongBlock expand() { incRef(); return this; } - // TODO use reference counting to share the values + // TODO use reference counting to share the vector try (var builder = blockFactory().newLongBlockBuilder(firstValueIndexes[getPositionCount()])) { for (int pos = 0; pos < getPositionCount(); pos++) { if (isNull(pos)) { @@ -100,14 +96,13 @@ public LongBlock expand() { } } - public static long ramBytesEstimated(long[] values, int[] firstValueIndexes, BitSet nullsMask) { - return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(values) + BlockRamUsageEstimator.sizeOf(firstValueIndexes) - + BlockRamUsageEstimator.sizeOfBitSet(nullsMask); + private long ramBytesUsedOnlyBlock() { + return BASE_RAM_BYTES_USED + BlockRamUsageEstimator.sizeOf(firstValueIndexes) + BlockRamUsageEstimator.sizeOfBitSet(nullsMask); } @Override public long ramBytesUsed() { - return ramBytesEstimated(values, firstValueIndexes, nullsMask); + return ramBytesUsedOnlyBlock() + vector.ramBytesUsed(); } @Override @@ -130,13 +125,20 @@ public String toString() { + getPositionCount() + ", mvOrdering=" + mvOrdering() - + ", values=" - + Arrays.toString(values) + + ", vector=" + + vector + ']'; } + @Override + public void allowPassingToDifferentDriver() { + super.allowPassingToDifferentDriver(); + vector.allowPassingToDifferentDriver(); + } + @Override public void closeInternal() { - blockFactory().adjustBreaker(-ramBytesUsed(), true); + blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock(), true); + Releasables.closeExpectNoException(vector); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayVector.java index 0a3ada321d94c..3c5f6b7448321 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayVector.java @@ -15,27 +15,20 @@ * Vector implementation that stores an array of long values. * This class is generated. Do not edit it. */ -public final class LongArrayVector extends AbstractVector implements LongVector { +final class LongArrayVector extends AbstractVector implements LongVector { static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(LongArrayVector.class); private final long[] values; - private final LongBlock block; - - public LongArrayVector(long[] values, int positionCount) { - this(values, positionCount, BlockFactory.getNonBreakingInstance()); - } - - public LongArrayVector(long[] values, int positionCount, BlockFactory blockFactory) { + LongArrayVector(long[] values, int positionCount, BlockFactory blockFactory) { super(positionCount, blockFactory); this.values = values; - this.block = new LongVectorBlock(this); } @Override public LongBlock asBlock() { - return block; + return new LongVectorBlock(this); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayBlock.java new file mode 100644 index 0000000000000..dc19a4038a9e9 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayBlock.java @@ -0,0 +1,145 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data; + +import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.util.LongArray; +import org.elasticsearch.core.Releasables; + +import java.util.BitSet; + +/** + * Block implementation that stores values in a {@link LongBigArrayVector}. Does not take ownership of the given + * {@link LongArray} and does not adjust circuit breakers to account for it. + * This class is generated. Do not edit it. + */ +public final class LongBigArrayBlock extends AbstractArrayBlock implements LongBlock { + + private static final long BASE_RAM_BYTES_USED = 0; // TODO: fix this + private final LongBigArrayVector vector; + + public LongBigArrayBlock( + LongArray values, + int positionCount, + int[] firstValueIndexes, + BitSet nulls, + MvOrdering mvOrdering, + BlockFactory blockFactory + ) { + super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); + this.vector = new LongBigArrayVector(values, (int) values.size(), blockFactory); + } + + @Override + public LongVector asVector() { + return null; + } + + @Override + public long getLong(int valueIndex) { + return vector.getLong(valueIndex); + } + + @Override + public LongBlock filter(int... positions) { + try (var builder = blockFactory().newLongBlockBuilder(positions.length)) { + for (int pos : positions) { + if (isNull(pos)) { + builder.appendNull(); + continue; + } + int valueCount = getValueCount(pos); + int first = getFirstValueIndex(pos); + if (valueCount == 1) { + builder.appendLong(getLong(getFirstValueIndex(pos))); + } else { + builder.beginPositionEntry(); + for (int c = 0; c < valueCount; c++) { + builder.appendLong(getLong(first + c)); + } + builder.endPositionEntry(); + } + } + return builder.mvOrdering(mvOrdering()).build(); + } + } + + @Override + public ElementType elementType() { + return ElementType.LONG; + } + + @Override + public LongBlock expand() { + if (firstValueIndexes == null) { + incRef(); + return this; + } + // TODO use reference counting to share the vector + try (var builder = blockFactory().newLongBlockBuilder(firstValueIndexes[getPositionCount()])) { + for (int pos = 0; pos < getPositionCount(); pos++) { + if (isNull(pos)) { + builder.appendNull(); + continue; + } + int first = getFirstValueIndex(pos); + int end = first + getValueCount(pos); + for (int i = first; i < end; i++) { + builder.appendLong(getLong(i)); + } + } + return builder.mvOrdering(MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING).build(); + } + } + + private long ramBytesUsedOnlyBlock() { + return BASE_RAM_BYTES_USED + BlockRamUsageEstimator.sizeOf(firstValueIndexes) + BlockRamUsageEstimator.sizeOfBitSet(nullsMask); + } + + @Override + public long ramBytesUsed() { + return ramBytesUsedOnlyBlock() + RamUsageEstimator.sizeOf(vector); + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof LongBlock that) { + return LongBlock.equals(this, that); + } + return false; + } + + @Override + public int hashCode() { + return LongBlock.hash(this); + } + + @Override + public String toString() { + return getClass().getSimpleName() + + "[positions=" + + getPositionCount() + + ", mvOrdering=" + + mvOrdering() + + ", ramBytesUsed=" + + vector.ramBytesUsed() + + ']'; + } + + @Override + public void allowPassingToDifferentDriver() { + super.allowPassingToDifferentDriver(); + vector.allowPassingToDifferentDriver(); + } + + @Override + public void closeInternal() { + blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock(), true); + Releasables.closeExpectNoException(vector); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayVector.java index 2101b606e9a90..8c9f908e65368 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayVector.java @@ -12,30 +12,24 @@ import org.elasticsearch.core.Releasable; /** - * Vector implementation that defers to an enclosed LongArray. + * Vector implementation that defers to an enclosed {@link LongArray}. + * Does not take ownership of the array and does not adjust circuit breakers to account for it. * This class is generated. Do not edit it. */ public final class LongBigArrayVector extends AbstractVector implements LongVector, Releasable { - private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(LongBigArrayVector.class); + private static final long BASE_RAM_BYTES_USED = 0; // FIXME private final LongArray values; - private final LongBlock block; - - public LongBigArrayVector(LongArray values, int positionCount) { - this(values, positionCount, BlockFactory.getNonBreakingInstance()); - } - public LongBigArrayVector(LongArray values, int positionCount, BlockFactory blockFactory) { super(positionCount, blockFactory); this.values = values; - this.block = new LongVectorBlock(this); } @Override public LongBlock asBlock() { - return block; + return new LongVectorBlock(this); } @Override @@ -69,11 +63,9 @@ public LongVector filter(int... positions) { } @Override - public void close() { - if (released) { - throw new IllegalStateException("can't release already released vector [" + this + "]"); - } - released = true; + public void closeInternal() { + // The circuit breaker that tracks the values {@link LongArray} is adjusted outside + // of this class. values.close(); } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java index 41ac8f7237f64..5e5dc0606b896 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java @@ -18,7 +18,7 @@ * Block that stores long values. * This class is generated. Do not edit it. */ -public sealed interface LongBlock extends Block permits LongArrayBlock, LongVectorBlock, ConstantNullBlock { +public sealed interface LongBlock extends Block permits LongArrayBlock, LongVectorBlock, ConstantNullBlock, LongBigArrayBlock { /** * Retrieves the long value stored at the given value index. @@ -167,44 +167,6 @@ static int hash(LongBlock block) { return result; } - /** - * Returns a builder using the {@link BlockFactory#getNonBreakingInstance non-breaking block factory}. - * @deprecated use {@link BlockFactory#newLongBlockBuilder} - */ - // Eventually, we want to remove this entirely, always passing an explicit BlockFactory - @Deprecated - static Builder newBlockBuilder(int estimatedSize) { - return newBlockBuilder(estimatedSize, BlockFactory.getNonBreakingInstance()); - } - - /** - * Returns a builder. - * @deprecated use {@link BlockFactory#newLongBlockBuilder} - */ - @Deprecated - static Builder newBlockBuilder(int estimatedSize, BlockFactory blockFactory) { - return blockFactory.newLongBlockBuilder(estimatedSize); - } - - /** - * Returns a constant block built by the {@link BlockFactory#getNonBreakingInstance non-breaking block factory}. - * @deprecated use {@link BlockFactory#newConstantLongBlockWith} - */ - // Eventually, we want to remove this entirely, always passing an explicit BlockFactory - @Deprecated - static LongBlock newConstantBlockWith(long value, int positions) { - return newConstantBlockWith(value, positions, BlockFactory.getNonBreakingInstance()); - } - - /** - * Returns a constant block. - * @deprecated use {@link BlockFactory#newConstantLongBlockWith} - */ - @Deprecated - static LongBlock newConstantBlockWith(long value, int positions, BlockFactory blockFactory) { - return blockFactory.newConstantLongBlockWith(value, positions); - } - /** * Builder for {@link LongBlock} */ diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlockBuilder.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlockBuilder.java index 1692c4cff6a57..4f8c1658c0973 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlockBuilder.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlockBuilder.java @@ -9,6 +9,7 @@ import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.common.util.LongArray; import java.util.Arrays; @@ -179,6 +180,29 @@ public LongBlockBuilder mvOrdering(Block.MvOrdering mvOrdering) { return this; } + private LongBlock buildBigArraysBlock() { + final LongBlock theBlock; + final LongArray array = blockFactory.bigArrays().newLongArray(valueCount, false); + for (int i = 0; i < valueCount; i++) { + array.set(i, values[i]); + } + if (isDense() && singleValued()) { + theBlock = new LongBigArrayVector(array, positionCount, blockFactory).asBlock(); + } else { + theBlock = new LongBigArrayBlock(array, positionCount, firstValueIndexes, nullsMask, mvOrdering, blockFactory); + } + /* + * Update the breaker with the actual bytes used. + * We pass false below even though we've used the bytes. That's weird, + * but if we break here we will throw away the used memory, letting + * it be deallocated. The exception will bubble up and the builder will + * still technically be open, meaning the calling code should close it + * which will return all used memory to the breaker. + */ + blockFactory.adjustBreaker(theBlock.ramBytesUsed() - estimatedBytes - array.ramBytesUsed(), false); + return theBlock; + } + @Override public LongBlock build() { try { @@ -187,20 +211,26 @@ public LongBlock build() { if (hasNonNullValue && positionCount == 1 && valueCount == 1) { theBlock = blockFactory.newConstantLongBlockWith(values[0], 1, estimatedBytes); } else { - if (values.length - valueCount > 1024 || valueCount < (values.length / 2)) { - values = Arrays.copyOf(values, valueCount); - } - if (isDense() && singleValued()) { - theBlock = blockFactory.newLongArrayVector(values, positionCount, estimatedBytes).asBlock(); + if (estimatedBytes > blockFactory.maxPrimitiveArrayBytes()) { + theBlock = buildBigArraysBlock(); } else { - theBlock = blockFactory.newLongArrayBlock( - values, - positionCount, - firstValueIndexes, - nullsMask, - mvOrdering, - estimatedBytes - ); + if (values.length - valueCount > 1024 || valueCount < (values.length / 2)) { + adjustBreaker(valueCount * elementSize()); + values = Arrays.copyOf(values, valueCount); + adjustBreaker(-values.length * elementSize()); + } + if (isDense() && singleValued()) { + theBlock = blockFactory.newLongArrayVector(values, positionCount, estimatedBytes).asBlock(); + } else { + theBlock = blockFactory.newLongArrayBlock( + values, + positionCount, + firstValueIndexes, + nullsMask, + mvOrdering, + estimatedBytes + ); + } } } built(); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVector.java index a312d7aeab0cc..358f5b32366cb 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVector.java @@ -102,40 +102,10 @@ default void writeTo(StreamOutput out) throws IOException { } } - /** - * Returns a builder using the {@link BlockFactory#getNonBreakingInstance nonbreaking block factory}. - * @deprecated use {@link BlockFactory#newLongVectorBuilder} - */ - // Eventually, we want to remove this entirely, always passing an explicit BlockFactory - @Deprecated - static Builder newVectorBuilder(int estimatedSize) { - return newVectorBuilder(estimatedSize, BlockFactory.getNonBreakingInstance()); - } - - /** - * Creates a builder that grows as needed. Prefer {@link #newVectorFixedBuilder} - * if you know the size up front because it's faster. - * @deprecated use {@link BlockFactory#newLongVectorBuilder} - */ - @Deprecated - static Builder newVectorBuilder(int estimatedSize, BlockFactory blockFactory) { - return blockFactory.newLongVectorBuilder(estimatedSize); - } - - /** - * Creates a builder that never grows. Prefer this over {@link #newVectorBuilder} - * if you know the size up front because it's faster. - * @deprecated use {@link BlockFactory#newLongVectorFixedBuilder} - */ - @Deprecated - static FixedBuilder newVectorFixedBuilder(int size, BlockFactory blockFactory) { - return blockFactory.newLongVectorFixedBuilder(size); - } - /** * A builder that grows as needed. */ - sealed interface Builder extends Vector.Builder permits LongVectorBuilder { + sealed interface Builder extends Vector.Builder permits LongVectorBuilder, FixedBuilder { /** * Appends a long to the current entry. */ @@ -148,13 +118,11 @@ sealed interface Builder extends Vector.Builder permits LongVectorBuilder { /** * A builder that never grows. */ - sealed interface FixedBuilder extends Vector.Builder permits LongVectorFixedBuilder { + sealed interface FixedBuilder extends Builder permits LongVectorFixedBuilder { /** * Appends a long to the current entry. */ - FixedBuilder appendLong(long value); - @Override - LongVector build(); + FixedBuilder appendLong(long value); } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java index 589a9341188fc..1f4565fec5a8d 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java @@ -10,7 +10,7 @@ import org.elasticsearch.core.Releasables; /** - * Block view of a LongVector. + * Block view of a {@link LongVector}. Cannot represent multi-values or nulls. * This class is generated. Do not edit it. */ public final class LongVectorBlock extends AbstractVectorBlock implements LongBlock { @@ -73,11 +73,6 @@ public String toString() { return getClass().getSimpleName() + "[vector=" + vector + "]"; } - @Override - public boolean isReleased() { - return super.isReleased() || vector.isReleased(); - } - @Override public void closeInternal() { assert (vector.isReleased() == false) : "can't release block [" + this + "] containing already released vector"; diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeBytesRef.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeBytesRef.java index 1fd4c1ea3562d..89388cd9cc109 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeBytesRef.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeBytesRef.java @@ -49,7 +49,7 @@ public BytesRefBlock dedupeToBlockAdaptive(BlockFactory blockFactory) { block.incRef(); return block; } - try (BytesRefBlock.Builder builder = BytesRefBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (BytesRefBlock.Builder builder = blockFactory.newBytesRefBlockBuilder(block.getPositionCount())) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -99,7 +99,7 @@ public BytesRefBlock dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { block.incRef(); return block; } - try (BytesRefBlock.Builder builder = BytesRefBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (BytesRefBlock.Builder builder = blockFactory.newBytesRefBlockBuilder(block.getPositionCount())) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -129,7 +129,7 @@ public BytesRefBlock dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { block.incRef(); return block; } - try (BytesRefBlock.Builder builder = BytesRefBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (BytesRefBlock.Builder builder = blockFactory.newBytesRefBlockBuilder(block.getPositionCount())) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeDouble.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeDouble.java index 157b6670e95af..6066dbe8a74e0 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeDouble.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeDouble.java @@ -46,7 +46,7 @@ public DoubleBlock dedupeToBlockAdaptive(BlockFactory blockFactory) { block.incRef(); return block; } - try (DoubleBlock.Builder builder = DoubleBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (DoubleBlock.Builder builder = blockFactory.newDoubleBlockBuilder(block.getPositionCount())) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -96,7 +96,7 @@ public DoubleBlock dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { block.incRef(); return block; } - try (DoubleBlock.Builder builder = DoubleBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (DoubleBlock.Builder builder = blockFactory.newDoubleBlockBuilder(block.getPositionCount())) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -126,7 +126,7 @@ public DoubleBlock dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { block.incRef(); return block; } - try (DoubleBlock.Builder builder = DoubleBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (DoubleBlock.Builder builder = blockFactory.newDoubleBlockBuilder(block.getPositionCount())) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeInt.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeInt.java index 7bc9d77d3f877..866c38d5d7277 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeInt.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeInt.java @@ -45,7 +45,7 @@ public IntBlock dedupeToBlockAdaptive(BlockFactory blockFactory) { block.incRef(); return block; } - try (IntBlock.Builder builder = IntBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (IntBlock.Builder builder = blockFactory.newIntBlockBuilder(block.getPositionCount())) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -95,7 +95,7 @@ public IntBlock dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { block.incRef(); return block; } - try (IntBlock.Builder builder = IntBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (IntBlock.Builder builder = blockFactory.newIntBlockBuilder(block.getPositionCount())) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -125,7 +125,7 @@ public IntBlock dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { block.incRef(); return block; } - try (IntBlock.Builder builder = IntBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (IntBlock.Builder builder = blockFactory.newIntBlockBuilder(block.getPositionCount())) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeLong.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeLong.java index acbc9139a75c5..a3012ffa551b2 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeLong.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/MultivalueDedupeLong.java @@ -47,7 +47,7 @@ public LongBlock dedupeToBlockAdaptive(BlockFactory blockFactory) { block.incRef(); return block; } - try (LongBlock.Builder builder = LongBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (LongBlock.Builder builder = blockFactory.newLongBlockBuilder(block.getPositionCount())) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -97,7 +97,7 @@ public LongBlock dedupeToBlockUsingCopyAndSort(BlockFactory blockFactory) { block.incRef(); return block; } - try (LongBlock.Builder builder = LongBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (LongBlock.Builder builder = blockFactory.newLongBlockBuilder(block.getPositionCount())) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -127,7 +127,7 @@ public LongBlock dedupeToBlockUsingCopyMissing(BlockFactory blockFactory) { block.incRef(); return block; } - try (LongBlock.Builder builder = LongBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (LongBlock.Builder builder = blockFactory.newLongBlockBuilder(block.getPositionCount())) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForBoolean.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForBoolean.java index 3d568adc2b5ea..184ef69f00d85 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForBoolean.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForBoolean.java @@ -24,7 +24,7 @@ class ResultBuilderForBoolean implements ResultBuilder { ResultBuilderForBoolean(BlockFactory blockFactory, TopNEncoder encoder, boolean inKey, int initialSize) { assert encoder == TopNEncoder.DEFAULT_UNSORTABLE : encoder.toString(); this.inKey = inKey; - this.builder = BooleanBlock.newBlockBuilder(initialSize, blockFactory); + this.builder = blockFactory.newBooleanBlockBuilder(initialSize); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForBytesRef.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForBytesRef.java index e37f82f3363a9..4008f7fbd924b 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForBytesRef.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForBytesRef.java @@ -28,7 +28,7 @@ class ResultBuilderForBytesRef implements ResultBuilder { ResultBuilderForBytesRef(BlockFactory blockFactory, TopNEncoder encoder, boolean inKey, int initialSize) { this.encoder = encoder; this.inKey = inKey; - this.builder = BytesRefBlock.newBlockBuilder(initialSize, blockFactory); + this.builder = blockFactory.newBytesRefBlockBuilder(initialSize); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForDouble.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForDouble.java index 77c976c6e0085..f06a1e814ef43 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForDouble.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForDouble.java @@ -24,7 +24,7 @@ class ResultBuilderForDouble implements ResultBuilder { ResultBuilderForDouble(BlockFactory blockFactory, TopNEncoder encoder, boolean inKey, int initialSize) { assert encoder == TopNEncoder.DEFAULT_UNSORTABLE : encoder.toString(); this.inKey = inKey; - this.builder = DoubleBlock.newBlockBuilder(initialSize, blockFactory); + this.builder = blockFactory.newDoubleBlockBuilder(initialSize); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForInt.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForInt.java index 389ed3bc2e3c3..848bbf9ab6a0a 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForInt.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForInt.java @@ -24,7 +24,7 @@ class ResultBuilderForInt implements ResultBuilder { ResultBuilderForInt(BlockFactory blockFactory, TopNEncoder encoder, boolean inKey, int initialSize) { assert encoder == TopNEncoder.DEFAULT_UNSORTABLE : encoder.toString(); this.inKey = inKey; - this.builder = IntBlock.newBlockBuilder(initialSize, blockFactory); + this.builder = blockFactory.newIntBlockBuilder(initialSize); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForLong.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForLong.java index 63ee9d35c59e5..b4361ad83180a 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForLong.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/operator/topn/ResultBuilderForLong.java @@ -24,7 +24,7 @@ class ResultBuilderForLong implements ResultBuilder { ResultBuilderForLong(BlockFactory blockFactory, TopNEncoder encoder, boolean inKey, int initialSize) { assert encoder == TopNEncoder.DEFAULT_UNSORTABLE : encoder.toString(); this.inKey = inKey; - this.builder = LongBlock.newBlockBuilder(initialSize, blockFactory); + this.builder = blockFactory.newLongBlockBuilder(initialSize); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBooleanAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBooleanAggregatorFunction.java index dd5450d3b460c..e9b4498d50265 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBooleanAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBooleanAggregatorFunction.java @@ -86,14 +86,18 @@ private void addRawBlock(BooleanBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block uncastBlock = page.getBlock(channels.get(0)); - if (uncastBlock.areAllValuesNull()) { + Block fbitUncast = page.getBlock(channels.get(0)); + if (fbitUncast.areAllValuesNull()) { return; } - BooleanVector fbit = page.getBlock(channels.get(0)).asVector(); - BooleanVector tbit = page.getBlock(channels.get(1)).asVector(); + BooleanVector fbit = ((BooleanBlock) fbitUncast).asVector(); assert fbit.getPositionCount() == 1; - assert fbit.getPositionCount() == tbit.getPositionCount(); + Block tbitUncast = page.getBlock(channels.get(1)); + if (tbitUncast.areAllValuesNull()) { + return; + } + BooleanVector tbit = ((BooleanBlock) tbitUncast).asVector(); + assert tbit.getPositionCount() == 1; CountDistinctBooleanAggregator.combineIntermediate(state, fbit.getBoolean(0), tbit.getBoolean(0)); } diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBytesRefAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBytesRefAggregatorFunction.java index fd770678d5943..3591dbeb41ffa 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBytesRefAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctBytesRefAggregatorFunction.java @@ -95,11 +95,11 @@ private void addRawBlock(BytesRefBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block uncastBlock = page.getBlock(channels.get(0)); - if (uncastBlock.areAllValuesNull()) { + Block hllUncast = page.getBlock(channels.get(0)); + if (hllUncast.areAllValuesNull()) { return; } - BytesRefVector hll = page.getBlock(channels.get(0)).asVector(); + BytesRefVector hll = ((BytesRefBlock) hllUncast).asVector(); assert hll.getPositionCount() == 1; BytesRef scratch = new BytesRef(); CountDistinctBytesRefAggregator.combineIntermediate(state, hll.getBytesRef(0, scratch)); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctDoubleAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctDoubleAggregatorFunction.java index a8169b5a901e1..38d4c7250debe 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctDoubleAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctDoubleAggregatorFunction.java @@ -95,11 +95,11 @@ private void addRawBlock(DoubleBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block uncastBlock = page.getBlock(channels.get(0)); - if (uncastBlock.areAllValuesNull()) { + Block hllUncast = page.getBlock(channels.get(0)); + if (hllUncast.areAllValuesNull()) { return; } - BytesRefVector hll = page.getBlock(channels.get(0)).asVector(); + BytesRefVector hll = ((BytesRefBlock) hllUncast).asVector(); assert hll.getPositionCount() == 1; BytesRef scratch = new BytesRef(); CountDistinctDoubleAggregator.combineIntermediate(state, hll.getBytesRef(0, scratch)); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctIntAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctIntAggregatorFunction.java index 9f685f4672939..d4bc68500745e 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctIntAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctIntAggregatorFunction.java @@ -95,11 +95,11 @@ private void addRawBlock(IntBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block uncastBlock = page.getBlock(channels.get(0)); - if (uncastBlock.areAllValuesNull()) { + Block hllUncast = page.getBlock(channels.get(0)); + if (hllUncast.areAllValuesNull()) { return; } - BytesRefVector hll = page.getBlock(channels.get(0)).asVector(); + BytesRefVector hll = ((BytesRefBlock) hllUncast).asVector(); assert hll.getPositionCount() == 1; BytesRef scratch = new BytesRef(); CountDistinctIntAggregator.combineIntermediate(state, hll.getBytesRef(0, scratch)); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctLongAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctLongAggregatorFunction.java index 55b396aa627d5..06c6f67b356e0 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctLongAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/CountDistinctLongAggregatorFunction.java @@ -95,11 +95,11 @@ private void addRawBlock(LongBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block uncastBlock = page.getBlock(channels.get(0)); - if (uncastBlock.areAllValuesNull()) { + Block hllUncast = page.getBlock(channels.get(0)); + if (hllUncast.areAllValuesNull()) { return; } - BytesRefVector hll = page.getBlock(channels.get(0)).asVector(); + BytesRefVector hll = ((BytesRefBlock) hllUncast).asVector(); assert hll.getPositionCount() == 1; BytesRef scratch = new BytesRef(); CountDistinctLongAggregator.combineIntermediate(state, hll.getBytesRef(0, scratch)); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxDoubleAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxDoubleAggregatorFunction.java index 6929900c29ea1..f78a8773ccfcd 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxDoubleAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxDoubleAggregatorFunction.java @@ -90,14 +90,18 @@ private void addRawBlock(DoubleBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block uncastBlock = page.getBlock(channels.get(0)); - if (uncastBlock.areAllValuesNull()) { + Block maxUncast = page.getBlock(channels.get(0)); + if (maxUncast.areAllValuesNull()) { return; } - DoubleVector max = page.getBlock(channels.get(0)).asVector(); - BooleanVector seen = page.getBlock(channels.get(1)).asVector(); + DoubleVector max = ((DoubleBlock) maxUncast).asVector(); assert max.getPositionCount() == 1; - assert max.getPositionCount() == seen.getPositionCount(); + Block seenUncast = page.getBlock(channels.get(1)); + if (seenUncast.areAllValuesNull()) { + return; + } + BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); + assert seen.getPositionCount() == 1; if (seen.getBoolean(0)) { state.doubleValue(MaxDoubleAggregator.combine(state.doubleValue(), max.getDouble(0))); state.seen(true); @@ -112,10 +116,10 @@ public void evaluateIntermediate(Block[] blocks, int offset, DriverContext drive @Override public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { if (state.seen() == false) { - blocks[offset] = Block.constantNullBlock(1, driverContext.blockFactory()); + blocks[offset] = driverContext.blockFactory().newConstantNullBlock(1); return; } - blocks[offset] = DoubleBlock.newConstantBlockWith(state.doubleValue(), 1, driverContext.blockFactory()); + blocks[offset] = driverContext.blockFactory().newConstantDoubleBlockWith(state.doubleValue(), 1); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxIntAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxIntAggregatorFunction.java index 1759442fbb12a..6f83ee7224879 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxIntAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxIntAggregatorFunction.java @@ -90,14 +90,18 @@ private void addRawBlock(IntBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block uncastBlock = page.getBlock(channels.get(0)); - if (uncastBlock.areAllValuesNull()) { + Block maxUncast = page.getBlock(channels.get(0)); + if (maxUncast.areAllValuesNull()) { return; } - IntVector max = page.getBlock(channels.get(0)).asVector(); - BooleanVector seen = page.getBlock(channels.get(1)).asVector(); + IntVector max = ((IntBlock) maxUncast).asVector(); assert max.getPositionCount() == 1; - assert max.getPositionCount() == seen.getPositionCount(); + Block seenUncast = page.getBlock(channels.get(1)); + if (seenUncast.areAllValuesNull()) { + return; + } + BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); + assert seen.getPositionCount() == 1; if (seen.getBoolean(0)) { state.intValue(MaxIntAggregator.combine(state.intValue(), max.getInt(0))); state.seen(true); @@ -112,10 +116,10 @@ public void evaluateIntermediate(Block[] blocks, int offset, DriverContext drive @Override public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { if (state.seen() == false) { - blocks[offset] = Block.constantNullBlock(1, driverContext.blockFactory()); + blocks[offset] = driverContext.blockFactory().newConstantNullBlock(1); return; } - blocks[offset] = IntBlock.newConstantBlockWith(state.intValue(), 1, driverContext.blockFactory()); + blocks[offset] = driverContext.blockFactory().newConstantIntBlockWith(state.intValue(), 1); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxLongAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxLongAggregatorFunction.java index fe7d797faf10a..8826128a68837 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxLongAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MaxLongAggregatorFunction.java @@ -90,14 +90,18 @@ private void addRawBlock(LongBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block uncastBlock = page.getBlock(channels.get(0)); - if (uncastBlock.areAllValuesNull()) { + Block maxUncast = page.getBlock(channels.get(0)); + if (maxUncast.areAllValuesNull()) { return; } - LongVector max = page.getBlock(channels.get(0)).asVector(); - BooleanVector seen = page.getBlock(channels.get(1)).asVector(); + LongVector max = ((LongBlock) maxUncast).asVector(); assert max.getPositionCount() == 1; - assert max.getPositionCount() == seen.getPositionCount(); + Block seenUncast = page.getBlock(channels.get(1)); + if (seenUncast.areAllValuesNull()) { + return; + } + BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); + assert seen.getPositionCount() == 1; if (seen.getBoolean(0)) { state.longValue(MaxLongAggregator.combine(state.longValue(), max.getLong(0))); state.seen(true); @@ -112,10 +116,10 @@ public void evaluateIntermediate(Block[] blocks, int offset, DriverContext drive @Override public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { if (state.seen() == false) { - blocks[offset] = Block.constantNullBlock(1, driverContext.blockFactory()); + blocks[offset] = driverContext.blockFactory().newConstantNullBlock(1); return; } - blocks[offset] = LongBlock.newConstantBlockWith(state.longValue(), 1, driverContext.blockFactory()); + blocks[offset] = driverContext.blockFactory().newConstantLongBlockWith(state.longValue(), 1); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleAggregatorFunction.java index a2e8d8fbf592c..4bcf08ce0fa35 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleAggregatorFunction.java @@ -88,11 +88,11 @@ private void addRawBlock(DoubleBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block uncastBlock = page.getBlock(channels.get(0)); - if (uncastBlock.areAllValuesNull()) { + Block quartUncast = page.getBlock(channels.get(0)); + if (quartUncast.areAllValuesNull()) { return; } - BytesRefVector quart = page.getBlock(channels.get(0)).asVector(); + BytesRefVector quart = ((BytesRefBlock) quartUncast).asVector(); assert quart.getPositionCount() == 1; BytesRef scratch = new BytesRef(); MedianAbsoluteDeviationDoubleAggregator.combineIntermediate(state, quart.getBytesRef(0, scratch)); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntAggregatorFunction.java index 21e99587a5d09..db9dbdab52244 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntAggregatorFunction.java @@ -88,11 +88,11 @@ private void addRawBlock(IntBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block uncastBlock = page.getBlock(channels.get(0)); - if (uncastBlock.areAllValuesNull()) { + Block quartUncast = page.getBlock(channels.get(0)); + if (quartUncast.areAllValuesNull()) { return; } - BytesRefVector quart = page.getBlock(channels.get(0)).asVector(); + BytesRefVector quart = ((BytesRefBlock) quartUncast).asVector(); assert quart.getPositionCount() == 1; BytesRef scratch = new BytesRef(); MedianAbsoluteDeviationIntAggregator.combineIntermediate(state, quart.getBytesRef(0, scratch)); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongAggregatorFunction.java index 8c3aa95864aff..bf5fd51d7ed17 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongAggregatorFunction.java @@ -88,11 +88,11 @@ private void addRawBlock(LongBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block uncastBlock = page.getBlock(channels.get(0)); - if (uncastBlock.areAllValuesNull()) { + Block quartUncast = page.getBlock(channels.get(0)); + if (quartUncast.areAllValuesNull()) { return; } - BytesRefVector quart = page.getBlock(channels.get(0)).asVector(); + BytesRefVector quart = ((BytesRefBlock) quartUncast).asVector(); assert quart.getPositionCount() == 1; BytesRef scratch = new BytesRef(); MedianAbsoluteDeviationLongAggregator.combineIntermediate(state, quart.getBytesRef(0, scratch)); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinDoubleAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinDoubleAggregatorFunction.java index 1f9a8fb49fb2d..7d7544e5d8470 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinDoubleAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinDoubleAggregatorFunction.java @@ -90,14 +90,18 @@ private void addRawBlock(DoubleBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block uncastBlock = page.getBlock(channels.get(0)); - if (uncastBlock.areAllValuesNull()) { + Block minUncast = page.getBlock(channels.get(0)); + if (minUncast.areAllValuesNull()) { return; } - DoubleVector min = page.getBlock(channels.get(0)).asVector(); - BooleanVector seen = page.getBlock(channels.get(1)).asVector(); + DoubleVector min = ((DoubleBlock) minUncast).asVector(); assert min.getPositionCount() == 1; - assert min.getPositionCount() == seen.getPositionCount(); + Block seenUncast = page.getBlock(channels.get(1)); + if (seenUncast.areAllValuesNull()) { + return; + } + BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); + assert seen.getPositionCount() == 1; if (seen.getBoolean(0)) { state.doubleValue(MinDoubleAggregator.combine(state.doubleValue(), min.getDouble(0))); state.seen(true); @@ -112,10 +116,10 @@ public void evaluateIntermediate(Block[] blocks, int offset, DriverContext drive @Override public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { if (state.seen() == false) { - blocks[offset] = Block.constantNullBlock(1, driverContext.blockFactory()); + blocks[offset] = driverContext.blockFactory().newConstantNullBlock(1); return; } - blocks[offset] = DoubleBlock.newConstantBlockWith(state.doubleValue(), 1, driverContext.blockFactory()); + blocks[offset] = driverContext.blockFactory().newConstantDoubleBlockWith(state.doubleValue(), 1); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinIntAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinIntAggregatorFunction.java index bbeba4c8374ab..0f2385cc120f9 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinIntAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinIntAggregatorFunction.java @@ -90,14 +90,18 @@ private void addRawBlock(IntBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block uncastBlock = page.getBlock(channels.get(0)); - if (uncastBlock.areAllValuesNull()) { + Block minUncast = page.getBlock(channels.get(0)); + if (minUncast.areAllValuesNull()) { return; } - IntVector min = page.getBlock(channels.get(0)).asVector(); - BooleanVector seen = page.getBlock(channels.get(1)).asVector(); + IntVector min = ((IntBlock) minUncast).asVector(); assert min.getPositionCount() == 1; - assert min.getPositionCount() == seen.getPositionCount(); + Block seenUncast = page.getBlock(channels.get(1)); + if (seenUncast.areAllValuesNull()) { + return; + } + BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); + assert seen.getPositionCount() == 1; if (seen.getBoolean(0)) { state.intValue(MinIntAggregator.combine(state.intValue(), min.getInt(0))); state.seen(true); @@ -112,10 +116,10 @@ public void evaluateIntermediate(Block[] blocks, int offset, DriverContext drive @Override public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { if (state.seen() == false) { - blocks[offset] = Block.constantNullBlock(1, driverContext.blockFactory()); + blocks[offset] = driverContext.blockFactory().newConstantNullBlock(1); return; } - blocks[offset] = IntBlock.newConstantBlockWith(state.intValue(), 1, driverContext.blockFactory()); + blocks[offset] = driverContext.blockFactory().newConstantIntBlockWith(state.intValue(), 1); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinLongAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinLongAggregatorFunction.java index 5299b505e124c..805729588158e 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinLongAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/MinLongAggregatorFunction.java @@ -90,14 +90,18 @@ private void addRawBlock(LongBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block uncastBlock = page.getBlock(channels.get(0)); - if (uncastBlock.areAllValuesNull()) { + Block minUncast = page.getBlock(channels.get(0)); + if (minUncast.areAllValuesNull()) { return; } - LongVector min = page.getBlock(channels.get(0)).asVector(); - BooleanVector seen = page.getBlock(channels.get(1)).asVector(); + LongVector min = ((LongBlock) minUncast).asVector(); assert min.getPositionCount() == 1; - assert min.getPositionCount() == seen.getPositionCount(); + Block seenUncast = page.getBlock(channels.get(1)); + if (seenUncast.areAllValuesNull()) { + return; + } + BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); + assert seen.getPositionCount() == 1; if (seen.getBoolean(0)) { state.longValue(MinLongAggregator.combine(state.longValue(), min.getLong(0))); state.seen(true); @@ -112,10 +116,10 @@ public void evaluateIntermediate(Block[] blocks, int offset, DriverContext drive @Override public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { if (state.seen() == false) { - blocks[offset] = Block.constantNullBlock(1, driverContext.blockFactory()); + blocks[offset] = driverContext.blockFactory().newConstantNullBlock(1); return; } - blocks[offset] = LongBlock.newConstantBlockWith(state.longValue(), 1, driverContext.blockFactory()); + blocks[offset] = driverContext.blockFactory().newConstantLongBlockWith(state.longValue(), 1); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileDoubleAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileDoubleAggregatorFunction.java index f7560379e476d..cd7a5b5974442 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileDoubleAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileDoubleAggregatorFunction.java @@ -91,11 +91,11 @@ private void addRawBlock(DoubleBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block uncastBlock = page.getBlock(channels.get(0)); - if (uncastBlock.areAllValuesNull()) { + Block quartUncast = page.getBlock(channels.get(0)); + if (quartUncast.areAllValuesNull()) { return; } - BytesRefVector quart = page.getBlock(channels.get(0)).asVector(); + BytesRefVector quart = ((BytesRefBlock) quartUncast).asVector(); assert quart.getPositionCount() == 1; BytesRef scratch = new BytesRef(); PercentileDoubleAggregator.combineIntermediate(state, quart.getBytesRef(0, scratch)); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileIntAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileIntAggregatorFunction.java index d45ba7a1e350a..b9b1c2e90b768 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileIntAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileIntAggregatorFunction.java @@ -91,11 +91,11 @@ private void addRawBlock(IntBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block uncastBlock = page.getBlock(channels.get(0)); - if (uncastBlock.areAllValuesNull()) { + Block quartUncast = page.getBlock(channels.get(0)); + if (quartUncast.areAllValuesNull()) { return; } - BytesRefVector quart = page.getBlock(channels.get(0)).asVector(); + BytesRefVector quart = ((BytesRefBlock) quartUncast).asVector(); assert quart.getPositionCount() == 1; BytesRef scratch = new BytesRef(); PercentileIntAggregator.combineIntermediate(state, quart.getBytesRef(0, scratch)); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileLongAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileLongAggregatorFunction.java index dac045d814926..cc785ce55bb55 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileLongAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/PercentileLongAggregatorFunction.java @@ -91,11 +91,11 @@ private void addRawBlock(LongBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block uncastBlock = page.getBlock(channels.get(0)); - if (uncastBlock.areAllValuesNull()) { + Block quartUncast = page.getBlock(channels.get(0)); + if (quartUncast.areAllValuesNull()) { return; } - BytesRefVector quart = page.getBlock(channels.get(0)).asVector(); + BytesRefVector quart = ((BytesRefBlock) quartUncast).asVector(); assert quart.getPositionCount() == 1; BytesRef scratch = new BytesRef(); PercentileLongAggregator.combineIntermediate(state, quart.getBytesRef(0, scratch)); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumDoubleAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumDoubleAggregatorFunction.java index 5520c587555b3..354726f82b8f3 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumDoubleAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumDoubleAggregatorFunction.java @@ -91,15 +91,24 @@ private void addRawBlock(DoubleBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block uncastBlock = page.getBlock(channels.get(0)); - if (uncastBlock.areAllValuesNull()) { + Block valueUncast = page.getBlock(channels.get(0)); + if (valueUncast.areAllValuesNull()) { return; } - DoubleVector value = page.getBlock(channels.get(0)).asVector(); - DoubleVector delta = page.getBlock(channels.get(1)).asVector(); - BooleanVector seen = page.getBlock(channels.get(2)).asVector(); + DoubleVector value = ((DoubleBlock) valueUncast).asVector(); assert value.getPositionCount() == 1; - assert value.getPositionCount() == delta.getPositionCount() && value.getPositionCount() == seen.getPositionCount(); + Block deltaUncast = page.getBlock(channels.get(1)); + if (deltaUncast.areAllValuesNull()) { + return; + } + DoubleVector delta = ((DoubleBlock) deltaUncast).asVector(); + assert delta.getPositionCount() == 1; + Block seenUncast = page.getBlock(channels.get(2)); + if (seenUncast.areAllValuesNull()) { + return; + } + BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); + assert seen.getPositionCount() == 1; SumDoubleAggregator.combineIntermediate(state, value.getDouble(0), delta.getDouble(0), seen.getBoolean(0)); } @@ -111,7 +120,7 @@ public void evaluateIntermediate(Block[] blocks, int offset, DriverContext drive @Override public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { if (state.seen() == false) { - blocks[offset] = Block.constantNullBlock(1, driverContext.blockFactory()); + blocks[offset] = driverContext.blockFactory().newConstantNullBlock(1); return; } blocks[offset] = SumDoubleAggregator.evaluateFinal(state, driverContext); diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumIntAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumIntAggregatorFunction.java index 1225b90bf09f7..e210429991aa6 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumIntAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumIntAggregatorFunction.java @@ -92,14 +92,18 @@ private void addRawBlock(IntBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block uncastBlock = page.getBlock(channels.get(0)); - if (uncastBlock.areAllValuesNull()) { + Block sumUncast = page.getBlock(channels.get(0)); + if (sumUncast.areAllValuesNull()) { return; } - LongVector sum = page.getBlock(channels.get(0)).asVector(); - BooleanVector seen = page.getBlock(channels.get(1)).asVector(); + LongVector sum = ((LongBlock) sumUncast).asVector(); assert sum.getPositionCount() == 1; - assert sum.getPositionCount() == seen.getPositionCount(); + Block seenUncast = page.getBlock(channels.get(1)); + if (seenUncast.areAllValuesNull()) { + return; + } + BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); + assert seen.getPositionCount() == 1; if (seen.getBoolean(0)) { state.longValue(SumIntAggregator.combine(state.longValue(), sum.getLong(0))); state.seen(true); @@ -114,10 +118,10 @@ public void evaluateIntermediate(Block[] blocks, int offset, DriverContext drive @Override public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { if (state.seen() == false) { - blocks[offset] = Block.constantNullBlock(1, driverContext.blockFactory()); + blocks[offset] = driverContext.blockFactory().newConstantNullBlock(1); return; } - blocks[offset] = LongBlock.newConstantBlockWith(state.longValue(), 1, driverContext.blockFactory()); + blocks[offset] = driverContext.blockFactory().newConstantLongBlockWith(state.longValue(), 1); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumLongAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumLongAggregatorFunction.java index 720e7ca9f3bbf..38d1b3de78265 100644 --- a/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumLongAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/generated/org/elasticsearch/compute/aggregation/SumLongAggregatorFunction.java @@ -90,14 +90,18 @@ private void addRawBlock(LongBlock block) { public void addIntermediateInput(Page page) { assert channels.size() == intermediateBlockCount(); assert page.getBlockCount() >= channels.get(0) + intermediateStateDesc().size(); - Block uncastBlock = page.getBlock(channels.get(0)); - if (uncastBlock.areAllValuesNull()) { + Block sumUncast = page.getBlock(channels.get(0)); + if (sumUncast.areAllValuesNull()) { return; } - LongVector sum = page.getBlock(channels.get(0)).asVector(); - BooleanVector seen = page.getBlock(channels.get(1)).asVector(); + LongVector sum = ((LongBlock) sumUncast).asVector(); assert sum.getPositionCount() == 1; - assert sum.getPositionCount() == seen.getPositionCount(); + Block seenUncast = page.getBlock(channels.get(1)); + if (seenUncast.areAllValuesNull()) { + return; + } + BooleanVector seen = ((BooleanBlock) seenUncast).asVector(); + assert seen.getPositionCount() == 1; if (seen.getBoolean(0)) { state.longValue(SumLongAggregator.combine(state.longValue(), sum.getLong(0))); state.seen(true); @@ -112,10 +116,10 @@ public void evaluateIntermediate(Block[] blocks, int offset, DriverContext drive @Override public void evaluateFinal(Block[] blocks, int offset, DriverContext driverContext) { if (state.seen() == false) { - blocks[offset] = Block.constantNullBlock(1, driverContext.blockFactory()); + blocks[offset] = driverContext.blockFactory().newConstantNullBlock(1); return; } - blocks[offset] = LongBlock.newConstantBlockWith(state.longValue(), 1, driverContext.blockFactory()); + blocks[offset] = driverContext.blockFactory().newConstantLongBlockWith(state.longValue(), 1); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/OwningChannelActionListener.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/OwningChannelActionListener.java deleted file mode 100644 index 50a20ee6ee73d..0000000000000 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/OwningChannelActionListener.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.compute; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.ChannelActionListener; -import org.elasticsearch.transport.TransportChannel; -import org.elasticsearch.transport.TransportResponse; - -/** - * Wraps a {@link ChannelActionListener} and takes ownership of responses passed to - * {@link org.elasticsearch.action.ActionListener#onResponse(Object)}; the reference count will be decreased once sending is done. - * - * Deprecated: use {@link ChannelActionListener} instead and ensure responses sent to it are properly closed after. - */ -@Deprecated(forRemoval = true) -public final class OwningChannelActionListener implements ActionListener { - private final ChannelActionListener listener; - - public OwningChannelActionListener(TransportChannel channel) { - this.listener = new ChannelActionListener<>(channel); - } - - @Override - public void onResponse(Response response) { - ActionListener.respondAndRelease(listener, response); - } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - - @Override - public String toString() { - return "OwningChannelActionListener{" + listener + "}"; - } - -} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctBooleanAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctBooleanAggregator.java index d083a48fffb7a..218af8fcb705e 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctBooleanAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctBooleanAggregator.java @@ -13,7 +13,6 @@ import org.elasticsearch.compute.ann.GroupingAggregator; import org.elasticsearch.compute.ann.IntermediateState; import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.operator.DriverContext; @@ -33,10 +32,6 @@ public static void combine(SingleState current, boolean v) { current.bits |= v ? BIT_TRUE : BIT_FALSE; } - public static void combineStates(SingleState current, SingleState state) { - current.bits |= state.bits; - } - public static void combineIntermediate(SingleState current, boolean fbit, boolean tbit) { if (fbit) current.bits |= BIT_FALSE; if (tbit) current.bits |= BIT_TRUE; @@ -44,7 +39,7 @@ public static void combineIntermediate(SingleState current, boolean fbit, boolea public static Block evaluateFinal(SingleState state, DriverContext driverContext) { long result = ((state.bits & BIT_TRUE) >> 1) + (state.bits & BIT_FALSE); - return LongBlock.newConstantBlockWith(result, 1, driverContext.blockFactory()); + return driverContext.blockFactory().newConstantLongBlockWith(result, 1); } public static GroupingState initGrouping(BigArrays bigArrays) { @@ -65,7 +60,7 @@ public static void combineIntermediate(GroupingState current, int groupId, boole } public static Block evaluateFinal(GroupingState state, IntVector selected, DriverContext driverContext) { - LongBlock.Builder builder = LongBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory()); + LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(selected.getPositionCount()); for (int i = 0; i < selected.getPositionCount(); i++) { int group = selected.getInt(i); long count = (state.bits.get(2 * group) ? 1 : 0) + (state.bits.get(2 * group + 1) ? 1 : 0); @@ -135,8 +130,8 @@ void combineStates(int currentGroupId, GroupingState state) { public void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { assert blocks.length >= offset + 2; try ( - var fbitBuilder = BooleanBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory()); - var tbitBuilder = BooleanBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory()) + var fbitBuilder = driverContext.blockFactory().newBooleanBlockBuilder(selected.getPositionCount()); + var tbitBuilder = driverContext.blockFactory().newBooleanBlockBuilder(selected.getPositionCount()) ) { for (int i = 0; i < selected.getPositionCount(); i++) { int group = selected.getInt(i); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctBytesRefAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctBytesRefAggregator.java index 89ad27f1fef28..13a9e00bb28ab 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctBytesRefAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctBytesRefAggregator.java @@ -29,17 +29,13 @@ public static void combine(HllStates.SingleState current, BytesRef v) { current.collect(v); } - public static void combineStates(HllStates.SingleState current, HllStates.SingleState state) { - current.merge(0, state.hll, 0); - } - public static void combineIntermediate(HllStates.SingleState current, BytesRef inValue) { current.merge(0, inValue, 0); } public static Block evaluateFinal(HllStates.SingleState state, DriverContext driverContext) { long result = state.cardinality(); - return LongBlock.newConstantBlockWith(result, 1, driverContext.blockFactory()); + return driverContext.blockFactory().newConstantLongBlockWith(result, 1); } public static HllStates.GroupingState initGrouping(BigArrays bigArrays, int precision) { @@ -64,7 +60,7 @@ public static void combineStates( } public static Block evaluateFinal(HllStates.GroupingState state, IntVector selected, DriverContext driverContext) { - try (LongBlock.Builder builder = LongBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory())) { + try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(selected.getPositionCount())) { for (int i = 0; i < selected.getPositionCount(); i++) { int group = selected.getInt(i); long count = state.cardinality(group); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctDoubleAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctDoubleAggregator.java index 86b3f9997246e..46a0d24cec8c4 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctDoubleAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctDoubleAggregator.java @@ -29,17 +29,13 @@ public static void combine(HllStates.SingleState current, double v) { current.collect(v); } - public static void combineStates(HllStates.SingleState current, HllStates.SingleState state) { - current.merge(0, state.hll, 0); - } - public static void combineIntermediate(HllStates.SingleState current, BytesRef inValue) { current.merge(0, inValue, 0); } public static Block evaluateFinal(HllStates.SingleState state, DriverContext driverContext) { long result = state.cardinality(); - return LongBlock.newConstantBlockWith(result, 1, driverContext.blockFactory()); + return driverContext.blockFactory().newConstantLongBlockWith(result, 1); } public static HllStates.GroupingState initGrouping(BigArrays bigArrays, int precision) { @@ -64,7 +60,7 @@ public static void combineStates( } public static Block evaluateFinal(HllStates.GroupingState state, IntVector selected, DriverContext driverContext) { - try (LongBlock.Builder builder = LongBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory())) { + try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(selected.getPositionCount())) { for (int i = 0; i < selected.getPositionCount(); i++) { int group = selected.getInt(i); long count = state.cardinality(group); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctIntAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctIntAggregator.java index 993284b0c57c3..9c29eb98f2987 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctIntAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctIntAggregator.java @@ -29,17 +29,13 @@ public static void combine(HllStates.SingleState current, int v) { current.collect(v); } - public static void combineStates(HllStates.SingleState current, HllStates.SingleState state) { - current.merge(0, state.hll, 0); - } - public static void combineIntermediate(HllStates.SingleState current, BytesRef inValue) { current.merge(0, inValue, 0); } public static Block evaluateFinal(HllStates.SingleState state, DriverContext driverContext) { long result = state.cardinality(); - return LongBlock.newConstantBlockWith(result, 1, driverContext.blockFactory()); + return driverContext.blockFactory().newConstantLongBlockWith(result, 1); } public static HllStates.GroupingState initGrouping(BigArrays bigArrays, int precision) { @@ -64,7 +60,7 @@ public static void combineStates( } public static Block evaluateFinal(HllStates.GroupingState state, IntVector selected, DriverContext driverContext) { - try (LongBlock.Builder builder = LongBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory())) { + try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(selected.getPositionCount())) { for (int i = 0; i < selected.getPositionCount(); i++) { int group = selected.getInt(i); long count = state.cardinality(group); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctLongAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctLongAggregator.java index a09c8df3b0fc3..59570e2f5a7ef 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctLongAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountDistinctLongAggregator.java @@ -29,17 +29,13 @@ public static void combine(HllStates.SingleState current, long v) { current.collect(v); } - public static void combineStates(HllStates.SingleState current, HllStates.SingleState state) { - current.merge(0, state.hll, 0); - } - public static void combineIntermediate(HllStates.SingleState current, BytesRef inValue) { current.merge(0, inValue, 0); } public static Block evaluateFinal(HllStates.SingleState state, DriverContext driverContext) { long result = state.cardinality(); - return LongBlock.newConstantBlockWith(result, 1, driverContext.blockFactory()); + return driverContext.blockFactory().newConstantLongBlockWith(result, 1); } public static HllStates.GroupingState initGrouping(BigArrays bigArrays, int precision) { @@ -64,7 +60,7 @@ public static void combineStates( } public static Block evaluateFinal(HllStates.GroupingState state, IntVector selected, DriverContext driverContext) { - try (LongBlock.Builder builder = LongBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory())) { + try (LongBlock.Builder builder = driverContext.blockFactory().newLongBlockBuilder(selected.getPositionCount())) { for (int i = 0; i < selected.getPositionCount(); i++) { int group = selected.getInt(i); long count = state.cardinality(group); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountGroupingAggregatorFunction.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountGroupingAggregatorFunction.java index 995dc5e15740f..5dba070172ae9 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountGroupingAggregatorFunction.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/CountGroupingAggregatorFunction.java @@ -181,7 +181,7 @@ public void evaluateIntermediate(Block[] blocks, int offset, IntVector selected) @Override public void evaluateFinal(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { - try (LongVector.Builder builder = LongVector.newVectorBuilder(selected.getPositionCount(), driverContext.blockFactory())) { + try (LongVector.Builder builder = driverContext.blockFactory().newLongVectorFixedBuilder(selected.getPositionCount())) { for (int i = 0; i < selected.getPositionCount(); i++) { int si = selected.getInt(i); builder.appendLong(state.hasValue(si) ? state.get(si) : 0); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/HllStates.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/HllStates.java index 66844f002111e..a8102efa61746 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/HllStates.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/HllStates.java @@ -16,7 +16,6 @@ import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.core.Releasables; @@ -179,7 +178,7 @@ void merge(int groupId, BytesRef other, int otherGroup) { @Override public void toIntermediate(Block[] blocks, int offset, IntVector selected, DriverContext driverContext) { assert blocks.length >= offset + 1; - try (var builder = BytesRefBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory())) { + try (var builder = driverContext.blockFactory().newBytesRefBlockBuilder(selected.getPositionCount())) { for (int i = 0; i < selected.getPositionCount(); i++) { int group = selected.getInt(i); builder.appendBytesRef(serializeHLL(group, hll)); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleAggregator.java index 2d73c323e9556..db0d57b887008 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationDoubleAggregator.java @@ -28,10 +28,6 @@ public static void combine(QuantileStates.SingleState current, double v) { current.add(v); } - public static void combineStates(QuantileStates.SingleState current, QuantileStates.SingleState state) { - current.add(state); - } - public static void combineIntermediate(QuantileStates.SingleState state, BytesRef inValue) { state.add(inValue); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntAggregator.java index b4696f0ab1934..a57e28aebd437 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationIntAggregator.java @@ -28,10 +28,6 @@ public static void combine(QuantileStates.SingleState current, int v) { current.add(v); } - public static void combineStates(QuantileStates.SingleState current, QuantileStates.SingleState state) { - current.add(state); - } - public static void combineIntermediate(QuantileStates.SingleState state, BytesRef inValue) { state.add(inValue); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongAggregator.java index bbd9f1821b681..54340f809e4cd 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/MedianAbsoluteDeviationLongAggregator.java @@ -32,10 +32,6 @@ public static void combineIntermediate(QuantileStates.SingleState state, BytesRe state.add(inValue); } - public static void combineStates(QuantileStates.SingleState current, QuantileStates.SingleState state) { - current.add(state); - } - public static Block evaluateFinal(QuantileStates.SingleState state, DriverContext driverContext) { return state.evaluateMedianAbsoluteDeviation(driverContext); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/PercentileDoubleAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/PercentileDoubleAggregator.java index 3020a920ebddb..1cff8d89b7541 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/PercentileDoubleAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/PercentileDoubleAggregator.java @@ -28,10 +28,6 @@ public static void combine(QuantileStates.SingleState current, double v) { current.add(v); } - public static void combineStates(QuantileStates.SingleState current, QuantileStates.SingleState state) { - current.add(state); - } - public static void combineIntermediate(QuantileStates.SingleState state, BytesRef inValue) { state.add(inValue); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/PercentileIntAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/PercentileIntAggregator.java index 4ccd409cc8ccf..d93dc7099fffe 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/PercentileIntAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/PercentileIntAggregator.java @@ -28,10 +28,6 @@ public static void combine(QuantileStates.SingleState current, int v) { current.add(v); } - public static void combineStates(QuantileStates.SingleState current, QuantileStates.SingleState state) { - current.add(state); - } - public static void combineIntermediate(QuantileStates.SingleState state, BytesRef inValue) { state.add(inValue); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/PercentileLongAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/PercentileLongAggregator.java index 2a0eb3a060930..9d900069d15ae 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/PercentileLongAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/PercentileLongAggregator.java @@ -28,10 +28,6 @@ public static void combine(QuantileStates.SingleState current, long v) { current.add(v); } - public static void combineStates(QuantileStates.SingleState current, QuantileStates.SingleState state) { - current.add(state); - } - public static void combineIntermediate(QuantileStates.SingleState state, BytesRef inValue) { state.add(inValue); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/QuantileStates.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/QuantileStates.java index 0b5b89425ed46..0ba7afb0d5e68 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/QuantileStates.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/QuantileStates.java @@ -72,10 +72,6 @@ void add(double v) { digest.add(v); } - void add(SingleState other) { - digest.add(other.digest); - } - void add(BytesRef other) { digest.add(deserializeDigest(other)); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/SumDoubleAggregator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/SumDoubleAggregator.java index 4c2c38da28b75..5e46225a873f8 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/SumDoubleAggregator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/SumDoubleAggregator.java @@ -14,7 +14,6 @@ import org.elasticsearch.compute.ann.IntermediateState; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; -import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.DoubleBlock; import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.operator.DriverContext; @@ -42,10 +41,6 @@ public static void combine(SumState current, double value, double delta) { current.add(value, delta); } - public static void combineStates(SumState current, SumState state) { - current.add(state.value(), state.delta()); - } - public static void combineIntermediate(SumState state, double inValue, double inDelta, boolean seen) { if (seen) { combine(state, inValue, inDelta); @@ -63,7 +58,7 @@ public static void evaluateIntermediate(SumState state, DriverContext driverCont public static Block evaluateFinal(SumState state, DriverContext driverContext) { double result = state.value(); - return DoubleBlock.newConstantBlockWith(result, 1, driverContext.blockFactory()); + return driverContext.blockFactory().newConstantDoubleBlockWith(result, 1); } public static GroupingSumState initGrouping(BigArrays bigArrays) { @@ -95,9 +90,9 @@ public static void evaluateIntermediate( ) { assert blocks.length >= offset + 3; try ( - var valuesBuilder = DoubleBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory()); - var deltaBuilder = DoubleBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory()); - var seenBuilder = BooleanBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory()) + var valuesBuilder = driverContext.blockFactory().newDoubleBlockBuilder(selected.getPositionCount()); + var deltaBuilder = driverContext.blockFactory().newDoubleBlockBuilder(selected.getPositionCount()); + var seenBuilder = driverContext.blockFactory().newBooleanBlockBuilder(selected.getPositionCount()) ) { for (int i = 0; i < selected.getPositionCount(); i++) { int group = selected.getInt(i); @@ -117,7 +112,7 @@ public static void evaluateIntermediate( } public static Block evaluateFinal(GroupingSumState state, IntVector selected, DriverContext driverContext) { - try (DoubleBlock.Builder builder = DoubleBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory())) { + try (DoubleBlock.Builder builder = driverContext.blockFactory().newDoubleBlockBuilder(selected.getPositionCount())) { for (int i = 0; i < selected.getPositionCount(); i++) { int si = selected.getInt(i); if (state.hasValue(si) && si < state.values.size()) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-ArrayState.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-ArrayState.java.st index 42f86580a228d..e81af4841d1a4 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-ArrayState.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/X-ArrayState.java.st @@ -10,7 +10,6 @@ package org.elasticsearch.compute.aggregation; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.$Type$Array; import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.BooleanBlock; $if(long)$ import org.elasticsearch.compute.data.IntVector; $endif$ @@ -73,14 +72,14 @@ $endif$ Block toValuesBlock(org.elasticsearch.compute.data.IntVector selected, DriverContext driverContext) { if (false == trackingGroupIds()) { - try ($Type$Vector.Builder builder = $Type$Vector.newVectorBuilder(selected.getPositionCount(), driverContext.blockFactory())) { + try ($Type$Vector.Builder builder = driverContext.blockFactory().new$Type$VectorFixedBuilder(selected.getPositionCount())) { for (int i = 0; i < selected.getPositionCount(); i++) { builder.append$Type$(values.get(selected.getInt(i))); } return builder.build().asBlock(); } } - try ($Type$Block.Builder builder = $Type$Block.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory())) { + try ($Type$Block.Builder builder = driverContext.blockFactory().new$Type$BlockBuilder(selected.getPositionCount())) { for (int i = 0; i < selected.getPositionCount(); i++) { int group = selected.getInt(i); if (hasValue(group)) { @@ -111,8 +110,8 @@ $endif$ ) { assert blocks.length >= offset + 2; try ( - var valuesBuilder = $Type$Block.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory()); - var hasValueBuilder = BooleanBlock.newBlockBuilder(selected.getPositionCount(), driverContext.blockFactory()) + var valuesBuilder = driverContext.blockFactory().new$Type$BlockBuilder(selected.getPositionCount()); + var hasValueBuilder = driverContext.blockFactory().newBooleanVectorFixedBuilder(selected.getPositionCount()) ) { for (int i = 0; i < selected.getPositionCount(); i++) { int group = selected.getInt(i); @@ -124,7 +123,7 @@ $endif$ hasValueBuilder.appendBoolean(hasValue(group)); } blocks[offset + 0] = valuesBuilder.build(); - blocks[offset + 1] = hasValueBuilder.build(); + blocks[offset + 1] = hasValueBuilder.build().asBlock(); } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BooleanBlockHash.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BooleanBlockHash.java index 684e6aec60b9e..aa7c737e331c7 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BooleanBlockHash.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BooleanBlockHash.java @@ -51,8 +51,8 @@ public void add(Page page, GroupingAggregatorFunction.AddInput addInput) { addInput.add(0, groupIds); } } else { - try (IntBlock groupIds = add(booleanVector).asBlock()) { - addInput.add(0, groupIds.asVector()); + try (IntVector groupIds = add(booleanVector)) { + addInput.add(0, groupIds); } } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BytesRefLongBlockHash.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BytesRefLongBlockHash.java index da2c85e532016..7ee8a7165aa17 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BytesRefLongBlockHash.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/BytesRefLongBlockHash.java @@ -18,7 +18,6 @@ import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.BytesRefVector; -import org.elasticsearch.compute.data.IntArrayVector; import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.LongVector; @@ -72,7 +71,9 @@ public void add(Page page, GroupingAggregatorFunction.AddInput addInput) { BytesRefVector vector1 = block1.asVector(); LongVector vector2 = block2.asVector(); if (vector1 != null && vector2 != null) { - addInput.add(0, add(vector1, vector2)); + try (IntVector ords = add(vector1, vector2)) { + addInput.add(0, ords); + } } else { try (AddWork work = new AddWork(block1, block2, addInput)) { work.add(); @@ -88,7 +89,7 @@ public IntVector add(BytesRefVector vector1, LongVector vector2) { long hash1 = hashOrdToGroup(bytesHash.add(vector1.getBytesRef(i, scratch))); ords[i] = Math.toIntExact(hashOrdToGroup(finalHash.add(hash1, vector2.getLong(i)))); } - return new IntArrayVector(ords, positions); + return blockFactory.newIntArrayVector(ords, positions); } private static final long[] EMPTY = new long[0]; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/LongLongBlockHash.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/LongLongBlockHash.java index ce53f0bb8e7f4..49b16198a5d77 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/LongLongBlockHash.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/LongLongBlockHash.java @@ -65,7 +65,7 @@ public void add(Page page, GroupingAggregatorFunction.AddInput addInput) { private IntVector add(LongVector vector1, LongVector vector2) { int positions = vector1.getPositionCount(); - try (var builder = IntVector.newVectorFixedBuilder(positions, blockFactory)) { + try (var builder = blockFactory.newIntVectorFixedBuilder(positions)) { for (int i = 0; i < positions; i++) { builder.appendInt(Math.toIntExact(hashOrdToGroup(hash.add(vector1.getLong(i), vector2.getLong(i))))); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractBlock.java index 177e3fb6798d1..8ce6ef9ab78ab 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractBlock.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractBlock.java @@ -11,8 +11,7 @@ import java.util.BitSet; -abstract class AbstractBlock implements Block { - private int references = 1; +abstract class AbstractBlock extends AbstractNonThreadSafeRefCounted implements Block { private final int positionCount; @Nullable @@ -101,55 +100,7 @@ public void allowPassingToDifferentDriver() { } @Override - public boolean isReleased() { + public final boolean isReleased() { return hasReferences() == false; } - - @Override - public final void incRef() { - if (isReleased()) { - throw new IllegalStateException("can't increase refCount on already released block [" + this + "]"); - } - references++; - } - - @Override - public final boolean tryIncRef() { - if (isReleased()) { - return false; - } - references++; - return true; - } - - @Override - public final boolean decRef() { - if (isReleased()) { - throw new IllegalStateException("can't release already released block [" + this + "]"); - } - - references--; - - if (references <= 0) { - closeInternal(); - return true; - } - return false; - } - - @Override - public final boolean hasReferences() { - return references >= 1; - } - - @Override - public final void close() { - decRef(); - } - - /** - * This is called when the number of references reaches zero. - * It must release any resources held by the block (adjusting circuit breakers if needed). - */ - protected abstract void closeInternal(); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractNonThreadSafeRefCounted.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractNonThreadSafeRefCounted.java new file mode 100644 index 0000000000000..2dfd8c3eca5ac --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractNonThreadSafeRefCounted.java @@ -0,0 +1,67 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data; + +import org.elasticsearch.core.RefCounted; +import org.elasticsearch.core.Releasable; + +/** + * Releasable, non-threadsafe version of {@link org.elasticsearch.core.AbstractRefCounted}. + * Calls to {@link AbstractNonThreadSafeRefCounted#decRef()} and {@link AbstractNonThreadSafeRefCounted#close()} are equivalent. + */ +abstract class AbstractNonThreadSafeRefCounted implements RefCounted, Releasable { + private int references = 1; + + @Override + public final void incRef() { + if (hasReferences() == false) { + throw new IllegalStateException("can't increase refCount on already released object [" + this + "]"); + } + references++; + } + + @Override + public final boolean tryIncRef() { + if (hasReferences() == false) { + return false; + } + references++; + return true; + } + + @Override + public final boolean decRef() { + if (hasReferences() == false) { + throw new IllegalStateException("can't release already released object [" + this + "]"); + } + + references--; + + if (references <= 0) { + closeInternal(); + return true; + } + return false; + } + + @Override + public final boolean hasReferences() { + return references >= 1; + } + + @Override + public final void close() { + decRef(); + } + + /** + * This is called when the number of references reaches zero. + * This is where resources should be released (adjusting circuit breakers if needed). + */ + protected abstract void closeInternal(); +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractVector.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractVector.java index 33ef14cfb4ad8..cc9727b751411 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractVector.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractVector.java @@ -10,11 +10,10 @@ /** * A dense Vector of single values. */ -abstract class AbstractVector implements Vector { +abstract class AbstractVector extends AbstractNonThreadSafeRefCounted implements Vector { private final int positionCount; private BlockFactory blockFactory; - protected boolean released; protected AbstractVector(int positionCount, BlockFactory blockFactory) { this.positionCount = positionCount; @@ -41,16 +40,12 @@ public void allowPassingToDifferentDriver() { } @Override - public void close() { - if (released) { - throw new IllegalStateException("can't release already released vector [" + this + "]"); - } - released = true; + protected void closeInternal() { blockFactory.adjustBreaker(-ramBytesUsed(), true); } @Override public final boolean isReleased() { - return released; + return hasReferences() == false; } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java index 964e510de9a20..c89a0ce260c67 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java @@ -159,25 +159,6 @@ default boolean mvSortedAscending() { */ Block expand(); - /** - * {@return a constant null block with the given number of positions, using the non-breaking block factory}. - * @deprecated use {@link BlockFactory#newConstantNullBlock} - */ - // Eventually, this should use the GLOBAL breaking instance - @Deprecated - static Block constantNullBlock(int positions) { - return constantNullBlock(positions, BlockFactory.getNonBreakingInstance()); - } - - /** - * {@return a constant null block with the given number of positions}. - * @deprecated use {@link BlockFactory#newConstantNullBlock} - */ - @Deprecated - static Block constantNullBlock(int positions, BlockFactory blockFactory) { - return blockFactory.newConstantNullBlock(positions); - } - /** * Builds {@link Block}s. Typically, you use one of it's direct supinterfaces like {@link IntBlock.Builder}. * This is {@link Releasable} and should be released after building the block or if building the block fails. diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockFactory.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockFactory.java index 092f66a7d4427..cf84b902250e7 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockFactory.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockFactory.java @@ -10,7 +10,6 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreakingException; -import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.BytesRefArray; @@ -25,35 +24,35 @@ public class BlockFactory { public static final String LOCAL_BREAKER_OVER_RESERVED_MAX_SIZE_SETTING = "esql.block_factory.local_breaker.max_over_reserved"; public static final ByteSizeValue LOCAL_BREAKER_OVER_RESERVED_DEFAULT_MAX_SIZE = ByteSizeValue.ofKb(16); - private static final BlockFactory NON_BREAKING = BlockFactory.getInstance( - new NoopCircuitBreaker("noop-esql-breaker"), - BigArrays.NON_RECYCLING_INSTANCE - ); + public static final String MAX_BLOCK_PRIMITIVE_ARRAY_SIZE_SETTING = "esql.block_factory.max_block_primitive_array_size"; + public static final ByteSizeValue DEFAULT_MAX_BLOCK_PRIMITIVE_ARRAY_SIZE = ByteSizeValue.ofKb(512); private final CircuitBreaker breaker; private final BigArrays bigArrays; + private final long maxPrimitiveArrayBytes; private final BlockFactory parent; public BlockFactory(CircuitBreaker breaker, BigArrays bigArrays) { - this(breaker, bigArrays, null); + this(breaker, bigArrays, DEFAULT_MAX_BLOCK_PRIMITIVE_ARRAY_SIZE); } - protected BlockFactory(CircuitBreaker breaker, BigArrays bigArrays, BlockFactory parent) { + public BlockFactory(CircuitBreaker breaker, BigArrays bigArrays, ByteSizeValue maxPrimitiveArraySize) { + this(breaker, bigArrays, maxPrimitiveArraySize, null); + } + + protected BlockFactory(CircuitBreaker breaker, BigArrays bigArrays, ByteSizeValue maxPrimitiveArraySize, BlockFactory parent) { + assert breaker instanceof LocalCircuitBreaker == false + || (parent != null && ((LocalCircuitBreaker) breaker).parentBreaker() == parent.breaker) + : "use local breaker without parent block factory"; this.breaker = breaker; this.bigArrays = bigArrays; this.parent = parent; - } - - /** - * Returns the Non-Breaking block factory. - */ - public static BlockFactory getNonBreakingInstance() { - return NON_BREAKING; + this.maxPrimitiveArrayBytes = maxPrimitiveArraySize.getBytes(); } public static BlockFactory getInstance(CircuitBreaker breaker, BigArrays bigArrays) { - return new BlockFactory(breaker, bigArrays); + return new BlockFactory(breaker, bigArrays, DEFAULT_MAX_BLOCK_PRIMITIVE_ARRAY_SIZE, null); } // For testing @@ -74,7 +73,7 @@ public BlockFactory newChildFactory(LocalCircuitBreaker childBreaker) { if (childBreaker.parentBreaker() != breaker) { throw new IllegalStateException("Different parent breaker"); } - return new BlockFactory(childBreaker, bigArrays, this); + return new BlockFactory(childBreaker, bigArrays, ByteSizeValue.ofBytes(maxPrimitiveArrayBytes), this); } /** @@ -391,4 +390,11 @@ public Block newConstantNullBlock(int positions) { adjustBreaker(b.ramBytesUsed(), true); return b; } + + /** + * Returns the maximum number of bytes that a Block should be backed by a primitive array before switching to using BigArrays. + */ + public long maxPrimitiveArrayBytes() { + return maxPrimitiveArrayBytes; + } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockUtils.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockUtils.java index 405dd088bf3a5..03c1ff05ae99e 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockUtils.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockUtils.java @@ -216,7 +216,7 @@ public static void appendValue(Block.Builder builder, Object val, ElementType ty public static Block constantBlock(BlockFactory blockFactory, Object val, int size) { if (val == null) { - return Block.constantNullBlock(size); + return blockFactory.newConstantNullBlock(size); } return constantBlock(blockFactory, fromJava(val.getClass()), val, size); } @@ -224,12 +224,12 @@ public static Block constantBlock(BlockFactory blockFactory, Object val, int siz // TODO: allow null values private static Block constantBlock(BlockFactory blockFactory, ElementType type, Object val, int size) { return switch (type) { - case NULL -> Block.constantNullBlock(size); - case LONG -> LongBlock.newConstantBlockWith((long) val, size, blockFactory); - case INT -> IntBlock.newConstantBlockWith((int) val, size, blockFactory); - case BYTES_REF -> BytesRefBlock.newConstantBlockWith(toBytesRef(val), size, blockFactory); - case DOUBLE -> DoubleBlock.newConstantBlockWith((double) val, size, blockFactory); - case BOOLEAN -> BooleanBlock.newConstantBlockWith((boolean) val, size, blockFactory); + case NULL -> blockFactory.newConstantNullBlock(size); + case LONG -> blockFactory.newConstantLongBlockWith((long) val, size); + case INT -> blockFactory.newConstantIntBlockWith((int) val, size); + case BYTES_REF -> blockFactory.newConstantBytesRefBlockWith(toBytesRef(val), size); + case DOUBLE -> blockFactory.newConstantDoubleBlockWith((double) val, size); + case BOOLEAN -> blockFactory.newConstantBooleanBlockWith((boolean) val, size); default -> throw new UnsupportedOperationException("unsupported element type [" + type + "]"); }; } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java index 29e39f43cddc2..639e1c298291f 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java @@ -19,15 +19,10 @@ /** * Block implementation representing a constant null value. */ -public final class ConstantNullBlock extends AbstractBlock implements BooleanBlock, IntBlock, LongBlock, DoubleBlock, BytesRefBlock { +final class ConstantNullBlock extends AbstractBlock implements BooleanBlock, IntBlock, LongBlock, DoubleBlock, BytesRefBlock { private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(ConstantNullBlock.class); - // Eventually, this should use the GLOBAL breaking instance - ConstantNullBlock(int positionCount) { - this(positionCount, BlockFactory.getNonBreakingInstance()); - } - ConstantNullBlock(int positionCount, BlockFactory blockFactory) { super(positionCount, blockFactory); } @@ -83,8 +78,9 @@ public String getWriteableName() { return "ConstantNullBlock"; } - static ConstantNullBlock of(StreamInput in) throws IOException { - return new ConstantNullBlock(in.readVInt()); + static Block of(StreamInput in) throws IOException { + BlockFactory blockFactory = ((BlockStreamInput) in).blockFactory(); + return blockFactory.newConstantNullBlock(in.readVInt()); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocBlock.java index d45314f5c8a78..8c75c8216c59e 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocBlock.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocBlock.java @@ -7,7 +7,6 @@ package org.elasticsearch.compute.data; -import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Releasables; @@ -18,8 +17,6 @@ */ public class DocBlock extends AbstractVectorBlock implements Block { - private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(DocBlock.class); - private final DocVector vector; DocBlock(DocVector vector) { @@ -67,12 +64,7 @@ public boolean equals(Object obj) { @Override public long ramBytesUsed() { - return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(vector); - } - - @Override - public boolean isReleased() { - return super.isReleased() || vector.isReleased(); + return vector.ramBytesUsed(); } @Override @@ -84,8 +76,8 @@ public void closeInternal() { /** * A builder the for {@link DocBlock}. */ - public static Builder newBlockBuilder(int estimatedSize, BlockFactory blockFactory) { - return new Builder(estimatedSize, blockFactory); + public static Builder newBlockBuilder(BlockFactory blockFactory, int estimatedSize) { + return new Builder(blockFactory, estimatedSize); } public static class Builder implements Block.Builder { @@ -93,10 +85,22 @@ public static class Builder implements Block.Builder { private final IntVector.Builder segments; private final IntVector.Builder docs; - private Builder(int estimatedSize, BlockFactory blockFactory) { - shards = IntVector.newVectorBuilder(estimatedSize, blockFactory); - segments = IntVector.newVectorBuilder(estimatedSize, blockFactory); - docs = IntVector.newVectorBuilder(estimatedSize, blockFactory); + private Builder(BlockFactory blockFactory, int estimatedSize) { + IntVector.Builder shards = null; + IntVector.Builder segments = null; + IntVector.Builder docs = null; + try { + shards = blockFactory.newIntVectorBuilder(estimatedSize); + segments = blockFactory.newIntVectorBuilder(estimatedSize); + docs = blockFactory.newIntVectorBuilder(estimatedSize); + } finally { + if (docs == null) { + Releasables.closeExpectNoException(shards, segments, docs); + } + } + this.shards = shards; + this.segments = segments; + this.docs = docs; } public Builder appendShard(int shard) { @@ -159,7 +163,21 @@ public Block.Builder mvOrdering(MvOrdering mvOrdering) { @Override public DocBlock build() { // Pass null for singleSegmentNonDecreasing so we calculate it when we first need it. - return new DocVector(shards.build(), segments.build(), docs.build(), null).asBlock(); + IntVector shards = null; + IntVector segments = null; + IntVector docs = null; + DocVector result = null; + try { + shards = this.shards.build(); + segments = this.segments.build(); + docs = this.docs.build(); + result = new DocVector(shards, segments, docs, null); + return result.asBlock(); + } finally { + if (result == null) { + Releasables.closeExpectNoException(shards, segments, docs); + } + } } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocVector.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocVector.java index 3097dc73fb814..32eae7a972bcf 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocVector.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocVector.java @@ -46,10 +46,8 @@ public final class DocVector extends AbstractVector implements Vector { */ private int[] shardSegmentDocMapBackwards; - final DocBlock block; - public DocVector(IntVector shards, IntVector segments, IntVector docs, Boolean singleSegmentNonDecreasing) { - super(shards.getPositionCount(), null); + super(shards.getPositionCount(), shards.blockFactory()); this.shards = shards; this.segments = segments; this.docs = docs; @@ -64,7 +62,7 @@ public DocVector(IntVector shards, IntVector segments, IntVector docs, Boolean s "invalid position count [" + shards.getPositionCount() + " != " + docs.getPositionCount() + "]" ); } - block = new DocBlock(this); + blockFactory().adjustBreaker(BASE_RAM_BYTES_USED, true); } public IntVector shards() { @@ -130,53 +128,85 @@ private void buildShardSegmentDocMapIfMissing() { return; } - int[] forwards = shardSegmentDocMapForwards = new int[shards.getPositionCount()]; - for (int p = 0; p < forwards.length; p++) { - forwards[p] = p; - } - new IntroSorter() { - int pivot; - - @Override - protected void setPivot(int i) { - pivot = forwards[i]; + boolean success = false; + long estimatedSize = sizeOfSegmentDocMap(); + blockFactory().adjustBreaker(estimatedSize, true); + int[] forwards = null; + int[] backwards = null; + try { + int[] finalForwards = forwards = new int[shards.getPositionCount()]; + for (int p = 0; p < forwards.length; p++) { + forwards[p] = p; } + new IntroSorter() { + int pivot; - @Override - protected int comparePivot(int j) { - int cmp = Integer.compare(shards.getInt(pivot), shards.getInt(forwards[j])); - if (cmp != 0) { - return cmp; + @Override + protected void setPivot(int i) { + pivot = finalForwards[i]; } - cmp = Integer.compare(segments.getInt(pivot), segments.getInt(forwards[j])); - if (cmp != 0) { - return cmp; + + @Override + protected int comparePivot(int j) { + int cmp = Integer.compare(shards.getInt(pivot), shards.getInt(finalForwards[j])); + if (cmp != 0) { + return cmp; + } + cmp = Integer.compare(segments.getInt(pivot), segments.getInt(finalForwards[j])); + if (cmp != 0) { + return cmp; + } + return Integer.compare(docs.getInt(pivot), docs.getInt(finalForwards[j])); } - return Integer.compare(docs.getInt(pivot), docs.getInt(forwards[j])); - } - @Override - protected void swap(int i, int j) { - int tmp = forwards[i]; - forwards[i] = forwards[j]; - forwards[j] = tmp; - } - }.sort(0, forwards.length); + @Override + protected void swap(int i, int j) { + int tmp = finalForwards[i]; + finalForwards[i] = finalForwards[j]; + finalForwards[j] = tmp; + } + }.sort(0, forwards.length); - int[] backwards = shardSegmentDocMapBackwards = new int[forwards.length]; - for (int p = 0; p < forwards.length; p++) { - backwards[forwards[p]] = p; + backwards = new int[forwards.length]; + for (int p = 0; p < forwards.length; p++) { + backwards[forwards[p]] = p; + } + success = true; + shardSegmentDocMapForwards = forwards; + shardSegmentDocMapBackwards = backwards; + } finally { + if (success == false) { + blockFactory().adjustBreaker(-estimatedSize, true); + } } } + private long sizeOfSegmentDocMap() { + return 2 * (((long) RamUsageEstimator.NUM_BYTES_ARRAY_HEADER) + ((long) Integer.BYTES) * shards.getPositionCount()); + } + @Override public DocBlock asBlock() { - return block; + return new DocBlock(this); } @Override public DocVector filter(int... positions) { - return new DocVector(shards.filter(positions), segments.filter(positions), docs.filter(positions), null); + IntVector filteredShards = null; + IntVector filteredSegments = null; + IntVector filteredDocs = null; + DocVector result = null; + try { + filteredShards = shards.filter(positions); + filteredSegments = segments.filter(positions); + filteredDocs = docs.filter(positions); + result = new DocVector(filteredShards, filteredSegments, filteredDocs, null); + return result; + } finally { + if (result == null) { + Releasables.closeExpectNoException(filteredShards, filteredSegments, filteredDocs); + } + } } @Override @@ -225,14 +255,22 @@ public long ramBytesUsed() { @Override public void allowPassingToDifferentDriver() { + super.allowPassingToDifferentDriver(); shards.allowPassingToDifferentDriver(); segments.allowPassingToDifferentDriver(); docs.allowPassingToDifferentDriver(); } @Override - public void close() { - released = true; - Releasables.closeExpectNoException(shards.asBlock(), segments.asBlock(), docs.asBlock()); // Ugh! we always close blocks + public void closeInternal() { + Releasables.closeExpectNoException( + () -> blockFactory().adjustBreaker( + -BASE_RAM_BYTES_USED - (shardSegmentDocMapForwards == null ? 0 : sizeOfSegmentDocMap()), + true + ), + shards, + segments, + docs + ); } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ElementType.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ElementType.java index 324b6ee963596..2f7d65c8719e6 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ElementType.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ElementType.java @@ -13,16 +13,16 @@ * The type of elements in {@link Block} and {@link Vector} */ public enum ElementType { - BOOLEAN(BooleanBlock::newBlockBuilder), - INT(IntBlock::newBlockBuilder), - LONG(LongBlock::newBlockBuilder), - DOUBLE(DoubleBlock::newBlockBuilder), + BOOLEAN(BlockFactory::newBooleanBlockBuilder), + INT(BlockFactory::newIntBlockBuilder), + LONG(BlockFactory::newLongBlockBuilder), + DOUBLE(BlockFactory::newDoubleBlockBuilder), /** * Blocks containing only null values. */ - NULL((estimatedSize, blockFactory) -> new ConstantNullBlock.Builder(blockFactory)), + NULL((blockFactory, estimatedSize) -> new ConstantNullBlock.Builder(blockFactory)), - BYTES_REF(BytesRefBlock::newBlockBuilder), + BYTES_REF(BlockFactory::newBytesRefBlockBuilder), /** * Blocks that reference individual lucene documents. @@ -32,10 +32,10 @@ public enum ElementType { /** * Intermediate blocks which don't support retrieving elements. */ - UNKNOWN((estimatedSize, blockFactory) -> { throw new UnsupportedOperationException("can't build null blocks"); }); + UNKNOWN((blockFactory, estimatedSize) -> { throw new UnsupportedOperationException("can't build null blocks"); }); - interface BuilderSupplier { - Block.Builder newBlockBuilder(int estimatedSize, BlockFactory blockFactory); + private interface BuilderSupplier { + Block.Builder newBlockBuilder(BlockFactory blockFactory, int estimatedSize); } private final BuilderSupplier builder; @@ -44,20 +44,11 @@ interface BuilderSupplier { this.builder = builder; } - /** - * Create a new {@link Block.Builder} for blocks of this type. - * @deprecated use {@link #newBlockBuilder(int, BlockFactory)} - */ - @Deprecated - public Block.Builder newBlockBuilder(int estimatedSize) { - return builder.newBlockBuilder(estimatedSize, BlockFactory.getNonBreakingInstance()); - } - /** * Create a new {@link Block.Builder} for blocks of this type. */ public Block.Builder newBlockBuilder(int estimatedSize, BlockFactory blockFactory) { - return builder.newBlockBuilder(estimatedSize, blockFactory); + return builder.newBlockBuilder(blockFactory, estimatedSize); } public static ElementType fromJava(Class type) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Vector.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Vector.java index 0ca06498f7129..fc09f636ac700 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Vector.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Vector.java @@ -8,15 +8,16 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.Accountable; +import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.Releasable; /** * A dense Vector of single values. */ -public interface Vector extends Accountable, Releasable { +public interface Vector extends Accountable, RefCounted, Releasable { /** - * {@return Returns a Block view over this vector.} + * {@return Returns a new Block containing this vector.} */ Block asBlock(); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st index 03397e1a2e5ad..01a6d70d63795 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st @@ -15,31 +15,25 @@ import org.elasticsearch.core.Releasables; $else$ import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.core.Releasables; -import java.util.Arrays; $endif$ import java.util.BitSet; /** - * Block implementation that stores an array of $type$. + * Block implementation that stores values in a {@link $Type$ArrayVector}. +$if(BytesRef)$ + * Does not take ownership of the given {@link BytesRefArray} and does not adjust circuit breakers to account for it. +$endif$ * This class is generated. Do not edit it. */ -public final class $Type$ArrayBlock extends AbstractArrayBlock implements $Type$Block { +final class $Type$ArrayBlock extends AbstractArrayBlock implements $Type$Block { private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance($Type$ArrayBlock.class); -$if(BytesRef)$ - private final BytesRefArray values; - -$else$ - private final $type$[] values; -$endif$ + private final $Type$ArrayVector vector; - public $Type$ArrayBlock($if(BytesRef)$BytesRefArray$else$$type$[]$endif$ values, int positionCount, int[] firstValueIndexes, BitSet nulls, MvOrdering mvOrdering) { - this(values, positionCount, firstValueIndexes, nulls, mvOrdering, BlockFactory.getNonBreakingInstance()); - } - - public $Type$ArrayBlock( + $Type$ArrayBlock( $if(BytesRef)$BytesRefArray$else$$type$[]$endif$ values, int positionCount, int[] firstValueIndexes, @@ -48,7 +42,11 @@ $endif$ BlockFactory blockFactory ) { super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); - this.values = values; + $if(BytesRef)$ + this.vector = new BytesRefArrayVector(values, (int) values.size(), blockFactory); + $else$ + this.vector = new $Type$ArrayVector(values, values.length, blockFactory); + $endif$ } @Override @@ -59,10 +57,10 @@ $endif$ @Override $if(BytesRef)$ public BytesRef getBytesRef(int valueIndex, BytesRef dest) { - return values.get(valueIndex, dest); + return vector.getBytesRef(valueIndex, dest); $else$ public $type$ get$Type$(int valueIndex) { - return values[valueIndex]; + return vector.get$Type$(valueIndex); $endif$ } @@ -104,7 +102,7 @@ $endif$ incRef(); return this; } - // TODO use reference counting to share the values + // TODO use reference counting to share the vector $if(BytesRef)$ final BytesRef scratch = new BytesRef(); $endif$ @@ -128,14 +126,13 @@ $endif$ } } - public static long ramBytesEstimated($if(BytesRef)$BytesRefArray$else$$type$[]$endif$ values, int[] firstValueIndexes, BitSet nullsMask) { - return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(values) + BlockRamUsageEstimator.sizeOf(firstValueIndexes) - + BlockRamUsageEstimator.sizeOfBitSet(nullsMask); + private long ramBytesUsedOnlyBlock() { + return BASE_RAM_BYTES_USED + BlockRamUsageEstimator.sizeOf(firstValueIndexes) + BlockRamUsageEstimator.sizeOfBitSet(nullsMask); } @Override public long ramBytesUsed() { - return ramBytesEstimated(values, firstValueIndexes, nullsMask); + return ramBytesUsedOnlyBlock() + vector.ramBytesUsed(); } @Override @@ -158,23 +155,20 @@ $endif$ + getPositionCount() + ", mvOrdering=" + mvOrdering() -$if(BytesRef)$ - + ", values=" - + values.size() -$else$ - + ", values=" - + Arrays.toString(values) -$endif$ + + ", vector=" + + vector + ']'; } + @Override + public void allowPassingToDifferentDriver() { + super.allowPassingToDifferentDriver(); + vector.allowPassingToDifferentDriver(); + } + @Override public void closeInternal() { - $if(BytesRef)$ - blockFactory().adjustBreaker(-ramBytesUsed() + values.bigArraysRamBytesUsed(), true); - Releasables.closeExpectNoException(values); - $else$ - blockFactory().adjustBreaker(-ramBytesUsed(), true); - $endif$ + blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock(), true); + Releasables.closeExpectNoException(vector); } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayVector.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayVector.java.st index 4dd903945d04f..2608816f91f19 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayVector.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayVector.java.st @@ -21,9 +21,12 @@ $endif$ /** * Vector implementation that stores an array of $type$ values. +$if(BytesRef)$ + * Does not take ownership of the given {@link BytesRefArray} and does not adjust circuit breakers to account for it. +$endif$ * This class is generated. Do not edit it. */ -public final class $Type$ArrayVector extends AbstractVector implements $Type$Vector { +final class $Type$ArrayVector extends AbstractVector implements $Type$Vector { static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance($Type$ArrayVector.class); @@ -34,21 +37,14 @@ $else$ private final $type$[] values; $endif$ - private final $Type$Block block; - - public $Type$ArrayVector($if(BytesRef)$BytesRefArray$else$$type$[]$endif$ values, int positionCount) { - this(values, positionCount, BlockFactory.getNonBreakingInstance()); - } - - public $Type$ArrayVector($if(BytesRef)$BytesRefArray$else$$type$[]$endif$ values, int positionCount, BlockFactory blockFactory) { + $Type$ArrayVector($if(BytesRef)$BytesRefArray$else$$type$[]$endif$ values, int positionCount, BlockFactory blockFactory) { super(positionCount, blockFactory); this.values = values; - this.block = new $Type$VectorBlock(this); } @Override public $Type$Block asBlock() { - return block; + return new $Type$VectorBlock(this); } $if(BytesRef)$ @@ -124,11 +120,9 @@ $endif$ $if(BytesRef)$ @Override - public void close() { - if (released) { - throw new IllegalStateException("can't release already released vector [" + this + "]"); - } - released = true; + public void closeInternal() { + // The circuit breaker that tracks the values {@link BytesRefArray} is adjusted outside + // of this class. blockFactory().adjustBreaker(-ramBytesUsed() + values.bigArraysRamBytesUsed(), true); Releasables.closeExpectNoException(values); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayBlock.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayBlock.java.st new file mode 100644 index 0000000000000..989f119bca062 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayBlock.java.st @@ -0,0 +1,145 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data; + +import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.util.$if(boolean)$Bit$else$$Type$$endif$Array; +import org.elasticsearch.core.Releasables; + +import java.util.BitSet; + +/** + * Block implementation that stores values in a {@link $Type$BigArrayVector}. Does not take ownership of the given + * {@link $if(boolean)$Bit$else$$Type$$endif$Array} and does not adjust circuit breakers to account for it. + * This class is generated. Do not edit it. + */ +public final class $Type$BigArrayBlock extends AbstractArrayBlock implements $Type$Block { + + private static final long BASE_RAM_BYTES_USED = 0; // TODO: fix this + private final $Type$BigArrayVector vector; + + public $Type$BigArrayBlock( + $if(boolean)$Bit$else$$Type$$endif$Array values, + int positionCount, + int[] firstValueIndexes, + BitSet nulls, + MvOrdering mvOrdering, + BlockFactory blockFactory + ) { + super(positionCount, firstValueIndexes, nulls, mvOrdering, blockFactory); + this.vector = new $Type$BigArrayVector(values, (int) values.size(), blockFactory); + } + + @Override + public $Type$Vector asVector() { + return null; + } + + @Override + public $type$ get$Type$(int valueIndex) { + return vector.get$Type$(valueIndex); + } + + @Override + public $Type$Block filter(int... positions) { + try (var builder = blockFactory().new$Type$BlockBuilder(positions.length)) { + for (int pos : positions) { + if (isNull(pos)) { + builder.appendNull(); + continue; + } + int valueCount = getValueCount(pos); + int first = getFirstValueIndex(pos); + if (valueCount == 1) { + builder.append$Type$(get$Type$(getFirstValueIndex(pos)$if(BytesRef)$, scratch$endif$)); + } else { + builder.beginPositionEntry(); + for (int c = 0; c < valueCount; c++) { + builder.append$Type$(get$Type$(first + c$if(BytesRef)$, scratch$endif$)); + } + builder.endPositionEntry(); + } + } + return builder.mvOrdering(mvOrdering()).build(); + } + } + + @Override + public ElementType elementType() { + return ElementType.$TYPE$; + } + + @Override + public $Type$Block expand() { + if (firstValueIndexes == null) { + incRef(); + return this; + } + // TODO use reference counting to share the vector + try (var builder = blockFactory().new$Type$BlockBuilder(firstValueIndexes[getPositionCount()])) { + for (int pos = 0; pos < getPositionCount(); pos++) { + if (isNull(pos)) { + builder.appendNull(); + continue; + } + int first = getFirstValueIndex(pos); + int end = first + getValueCount(pos); + for (int i = first; i < end; i++) { + builder.append$Type$(get$Type$(i)); + } + } + return builder.mvOrdering(MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING).build(); + } + } + + private long ramBytesUsedOnlyBlock() { + return BASE_RAM_BYTES_USED + BlockRamUsageEstimator.sizeOf(firstValueIndexes) + BlockRamUsageEstimator.sizeOfBitSet(nullsMask); + } + + @Override + public long ramBytesUsed() { + return ramBytesUsedOnlyBlock() + RamUsageEstimator.sizeOf(vector); + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof $Type$Block that) { + return $Type$Block.equals(this, that); + } + return false; + } + + @Override + public int hashCode() { + return $Type$Block.hash(this); + } + + @Override + public String toString() { + return getClass().getSimpleName() + + "[positions=" + + getPositionCount() + + ", mvOrdering=" + + mvOrdering() + + ", ramBytesUsed=" + + vector.ramBytesUsed() + + ']'; + } + + @Override + public void allowPassingToDifferentDriver() { + super.allowPassingToDifferentDriver(); + vector.allowPassingToDifferentDriver(); + } + + @Override + public void closeInternal() { + blockFactory().adjustBreaker(-ramBytesUsedOnlyBlock(), true); + Releasables.closeExpectNoException(vector); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayVector.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayVector.java.st index 6a231d9ff6bf3..3664471b91e90 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayVector.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayVector.java.st @@ -12,30 +12,24 @@ import org.elasticsearch.common.util.$if(boolean)$Bit$else$$Type$$endif$Array; import org.elasticsearch.core.Releasable; /** - * Vector implementation that defers to an enclosed $Type$Array. + * Vector implementation that defers to an enclosed {@link $if(boolean)$Bit$else$$Type$$endif$Array}. + * Does not take ownership of the array and does not adjust circuit breakers to account for it. * This class is generated. Do not edit it. */ public final class $Type$BigArrayVector extends AbstractVector implements $Type$Vector, Releasable { - private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance($Type$BigArrayVector.class); + private static final long BASE_RAM_BYTES_USED = 0; // FIXME private final $if(boolean)$Bit$else$$Type$$endif$Array values; - private final $Type$Block block; - - public $Type$BigArrayVector($if(boolean)$Bit$else$$Type$$endif$Array values, int positionCount) { - this(values, positionCount, BlockFactory.getNonBreakingInstance()); - } - public $Type$BigArrayVector($if(boolean)$Bit$else$$Type$$endif$Array values, int positionCount, BlockFactory blockFactory) { super(positionCount, blockFactory); this.values = values; - this.block = new $Type$VectorBlock(this); } @Override public $Type$Block asBlock() { - return block; + return new $Type$VectorBlock(this); } @Override @@ -78,11 +72,9 @@ public final class $Type$BigArrayVector extends AbstractVector implements $Type$ } @Override - public void close() { - if (released) { - throw new IllegalStateException("can't release already released vector [" + this + "]"); - } - released = true; + public void closeInternal() { + // The circuit breaker that tracks the values {@link $if(boolean)$Bit$else$$Type$$endif$Array} is adjusted outside + // of this class. values.close(); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st index 2ff537016459c..d724374539e88 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st @@ -22,7 +22,7 @@ import java.io.IOException; * Block that stores $type$ values. * This class is generated. Do not edit it. */ -public sealed interface $Type$Block extends Block permits $Type$ArrayBlock, $Type$VectorBlock, ConstantNullBlock { +public sealed interface $Type$Block extends Block permits $Type$ArrayBlock, $Type$VectorBlock, ConstantNullBlock$if(BytesRef)$$else$, $Type$BigArrayBlock$endif$ { $if(BytesRef)$ BytesRef NULL_VALUE = new BytesRef(); @@ -203,44 +203,6 @@ $endif$ return result; } - /** - * Returns a builder using the {@link BlockFactory#getNonBreakingInstance non-breaking block factory}. - * @deprecated use {@link BlockFactory#new$Type$BlockBuilder} - */ - // Eventually, we want to remove this entirely, always passing an explicit BlockFactory - @Deprecated - static Builder newBlockBuilder(int estimatedSize) { - return newBlockBuilder(estimatedSize, BlockFactory.getNonBreakingInstance()); - } - - /** - * Returns a builder. - * @deprecated use {@link BlockFactory#new$Type$BlockBuilder} - */ - @Deprecated - static Builder newBlockBuilder(int estimatedSize, BlockFactory blockFactory) { - return blockFactory.new$Type$BlockBuilder(estimatedSize); - } - - /** - * Returns a constant block built by the {@link BlockFactory#getNonBreakingInstance non-breaking block factory}. - * @deprecated use {@link BlockFactory#newConstant$Type$BlockWith} - */ - // Eventually, we want to remove this entirely, always passing an explicit BlockFactory - @Deprecated - static $Type$Block newConstantBlockWith($type$ value, int positions) { - return newConstantBlockWith(value, positions, BlockFactory.getNonBreakingInstance()); - } - - /** - * Returns a constant block. - * @deprecated use {@link BlockFactory#newConstant$Type$BlockWith} - */ - @Deprecated - static $Type$Block newConstantBlockWith($type$ value, int positions, BlockFactory blockFactory) { - return blockFactory.newConstant$Type$BlockWith(value, positions); - } - /** * Builder for {@link $Type$Block} */ diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BlockBuilder.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BlockBuilder.java.st index 61527f166cfa9..63d16c09253e5 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BlockBuilder.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BlockBuilder.java.st @@ -17,6 +17,7 @@ import org.elasticsearch.core.Releasables; $else$ import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.common.util.$if(boolean)$Bit$else$$Type$$endif$Array; import java.util.Arrays; $endif$ @@ -246,60 +247,107 @@ $endif$ return this; } +$if(BytesRef)$ + private $Type$Block buildFromBytesArray() { + assert estimatedBytes == 0 || firstValueIndexes != null; + final $Type$Block theBlock; + if (hasNonNullValue && positionCount == 1 && valueCount == 1) { + theBlock = new ConstantBytesRefVector(BytesRef.deepCopyOf(values.get(0, new BytesRef())), 1, blockFactory).asBlock(); + /* + * Update the breaker with the actual bytes used. + * We pass false below even though we've used the bytes. That's weird, + * but if we break here we will throw away the used memory, letting + * it be deallocated. The exception will bubble up and the builder will + * still technically be open, meaning the calling code should close it + * which will return all used memory to the breaker. + */ + blockFactory.adjustBreaker(theBlock.ramBytesUsed() - estimatedBytes, false); + Releasables.closeExpectNoException(values); + } else { + if (isDense() && singleValued()) { + theBlock = new $Type$ArrayVector(values, positionCount, blockFactory).asBlock(); + } else { + theBlock = new $Type$ArrayBlock(values, positionCount, firstValueIndexes, nullsMask, mvOrdering, blockFactory); + } + /* + * Update the breaker with the actual bytes used. + * We pass false below even though we've used the bytes. That's weird, + * but if we break here we will throw away the used memory, letting + * it be deallocated. The exception will bubble up and the builder will + * still technically be open, meaning the calling code should close it + * which will return all used memory to the breaker. + */ + blockFactory.adjustBreaker(theBlock.ramBytesUsed() - estimatedBytes - values.bigArraysRamBytesUsed(), false); + } + return theBlock; + } + +$else$ + private $Type$Block buildBigArraysBlock() { + final $Type$Block theBlock; + $if(boolean)$ + final BitArray array = new BitArray(valueCount, blockFactory.bigArrays()); + for (int i = 0; i < valueCount; i++) { + if (values[i]) { + array.set(i); + } + } + $else$ + final $Type$Array array = blockFactory.bigArrays().new$Type$Array(valueCount, false); + for (int i = 0; i < valueCount; i++) { + array.set(i, values[i]); + } + $endif$ + if (isDense() && singleValued()) { + theBlock = new $Type$BigArrayVector(array, positionCount, blockFactory).asBlock(); + } else { + theBlock = new $Type$BigArrayBlock(array, positionCount, firstValueIndexes, nullsMask, mvOrdering, blockFactory); + } + /* + * Update the breaker with the actual bytes used. + * We pass false below even though we've used the bytes. That's weird, + * but if we break here we will throw away the used memory, letting + * it be deallocated. The exception will bubble up and the builder will + * still technically be open, meaning the calling code should close it + * which will return all used memory to the breaker. + */ + blockFactory.adjustBreaker(theBlock.ramBytesUsed() - estimatedBytes - array.ramBytesUsed(), false); + return theBlock; + } +$endif$ + @Override public $Type$Block build() { try { finish(); $Type$Block theBlock; $if(BytesRef)$ - assert estimatedBytes == 0 || firstValueIndexes != null; - if (hasNonNullValue && positionCount == 1 && valueCount == 1) { - theBlock = new ConstantBytesRefVector(BytesRef.deepCopyOf(values.get(0, new BytesRef())), 1, blockFactory).asBlock(); - /* - * Update the breaker with the actual bytes used. - * We pass false below even though we've used the bytes. That's weird, - * but if we break here we will throw away the used memory, letting - * it be deallocated. The exception will bubble up and the builder will - * still technically be open, meaning the calling code should close it - * which will return all used memory to the breaker. - */ - blockFactory.adjustBreaker(theBlock.ramBytesUsed() - estimatedBytes, false); - Releasables.closeExpectNoException(values); - } else { - if (isDense() && singleValued()) { - theBlock = new $Type$ArrayVector(values, positionCount, blockFactory).asBlock(); - } else { - theBlock = new $Type$ArrayBlock(values, positionCount, firstValueIndexes, nullsMask, mvOrdering, blockFactory); - } - /* - * Update the breaker with the actual bytes used. - * We pass false below even though we've used the bytes. That's weird, - * but if we break here we will throw away the used memory, letting - * it be deallocated. The exception will bubble up and the builder will - * still technically be open, meaning the calling code should close it - * which will return all used memory to the breaker. - */ - blockFactory.adjustBreaker(theBlock.ramBytesUsed() - estimatedBytes - values.bigArraysRamBytesUsed(), false); - } + theBlock = buildFromBytesArray(); values = null; $else$ if (hasNonNullValue && positionCount == 1 && valueCount == 1) { theBlock = blockFactory.newConstant$Type$BlockWith(values[0], 1, estimatedBytes); } else { - if (values.length - valueCount > 1024 || valueCount < (values.length / 2)) { - values = Arrays.copyOf(values, valueCount); - } - if (isDense() && singleValued()) { - theBlock = blockFactory.new$Type$ArrayVector(values, positionCount, estimatedBytes).asBlock(); + if (estimatedBytes > blockFactory.maxPrimitiveArrayBytes()) { + theBlock = buildBigArraysBlock(); } else { - theBlock = blockFactory.new$Type$ArrayBlock( - values, - positionCount, - firstValueIndexes, - nullsMask, - mvOrdering, - estimatedBytes - ); + if (values.length - valueCount > 1024 || valueCount < (values.length / 2)) { + adjustBreaker(valueCount * elementSize()); + values = Arrays.copyOf(values, valueCount); + adjustBreaker(-values.length * elementSize()); + } + if (isDense() && singleValued()) { + theBlock = blockFactory.new$Type$ArrayVector(values, positionCount, estimatedBytes).asBlock(); + } else { + theBlock = blockFactory.new$Type$ArrayBlock( + values, + positionCount, + firstValueIndexes, + nullsMask, + mvOrdering, + estimatedBytes + ); + } } } $endif$ diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ConstantVector.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ConstantVector.java.st index f685d38d6459b..625f014a20ffc 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ConstantVector.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ConstantVector.java.st @@ -16,7 +16,7 @@ import org.apache.lucene.util.RamUsageEstimator; * Vector implementation that stores a constant $type$ value. * This class is generated. Do not edit it. */ -public final class Constant$Type$Vector extends AbstractVector implements $Type$Vector { +final class Constant$Type$Vector extends AbstractVector implements $Type$Vector { $if(BytesRef)$ static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(ConstantBytesRefVector.class) + RamUsageEstimator @@ -27,16 +27,9 @@ $endif$ private final $type$ value; - private final $Type$Block block; - - public Constant$Type$Vector($type$ value, int positionCount) { - this(value, positionCount, BlockFactory.getNonBreakingInstance()); - } - - public Constant$Type$Vector($type$ value, int positionCount, BlockFactory blockFactory) { + Constant$Type$Vector($type$ value, int positionCount, BlockFactory blockFactory) { super(positionCount, blockFactory); this.value = value; - this.block = new $Type$VectorBlock(this); } @Override @@ -50,12 +43,12 @@ $endif$ @Override public $Type$Block asBlock() { - return block; + return new $Type$VectorBlock(this); } @Override public $Type$Vector filter(int... positions) { - return new Constant$Type$Vector(value, positions.length); + return blockFactory().newConstant$Type$Vector(value, positions.length); } @Override @@ -101,13 +94,4 @@ $endif$ public String toString() { return getClass().getSimpleName() + "[positions=" + getPositionCount() + ", value=" + value + ']'; } - - @Override - public void close() { - if (released) { - throw new IllegalStateException("can't release already released vector [" + this + "]"); - } - released = true; - blockFactory().adjustBreaker(-ramBytesUsed(), true); - } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Vector.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Vector.java.st index 6ec41ccdc6ab9..c303a8391ad18 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Vector.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Vector.java.st @@ -143,46 +143,6 @@ $endif$ } } - /** - * Returns a builder using the {@link BlockFactory#getNonBreakingInstance nonbreaking block factory}. - * @deprecated use {@link BlockFactory#new$Type$VectorBuilder} - */ - // Eventually, we want to remove this entirely, always passing an explicit BlockFactory - @Deprecated - static Builder newVectorBuilder(int estimatedSize) { - return newVectorBuilder(estimatedSize, BlockFactory.getNonBreakingInstance()); - } - -$if(BytesRef)$ - /** - * Creates a builder that grows as needed. - * @deprecated use {@link BlockFactory#new$Type$VectorBuilder} - */ -$else$ - /** - * Creates a builder that grows as needed. Prefer {@link #newVectorFixedBuilder} - * if you know the size up front because it's faster. - * @deprecated use {@link BlockFactory#new$Type$VectorBuilder} - */ -$endif$ - @Deprecated - static Builder newVectorBuilder(int estimatedSize, BlockFactory blockFactory) { - return blockFactory.new$Type$VectorBuilder(estimatedSize); - } - -$if(BytesRef)$ -$else$ - /** - * Creates a builder that never grows. Prefer this over {@link #newVectorBuilder} - * if you know the size up front because it's faster. - * @deprecated use {@link BlockFactory#new$Type$VectorFixedBuilder} - */ - @Deprecated - static FixedBuilder newVectorFixedBuilder(int size, BlockFactory blockFactory) { - return blockFactory.new$Type$VectorFixedBuilder(size); - } -$endif$ - $if(int)$ /** Create a vector for a range of ints. */ static IntVector range(int startInclusive, int endExclusive, BlockFactory blockFactory) { @@ -197,7 +157,11 @@ $endif$ /** * A builder that grows as needed. */ +$if(BytesRef)$ sealed interface Builder extends Vector.Builder permits $Type$VectorBuilder { +$else$ + sealed interface Builder extends Vector.Builder permits $Type$VectorBuilder, FixedBuilder { +$endif$ /** * Appends a $type$ to the current entry. */ @@ -212,14 +176,12 @@ $else$ /** * A builder that never grows. */ - sealed interface FixedBuilder extends Vector.Builder permits $Type$VectorFixedBuilder { + sealed interface FixedBuilder extends Builder permits $Type$VectorFixedBuilder { /** * Appends a $type$ to the current entry. */ - FixedBuilder append$Type$($type$ value); - @Override - $Type$Vector build(); + FixedBuilder append$Type$($type$ value); } $endif$ } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st index 8772e633ff14b..4bc3c66b65743 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st @@ -13,7 +13,7 @@ $endif$ import org.elasticsearch.core.Releasables; /** - * Block view of a $Type$Vector. + * Block view of a {@link $Type$Vector}. Cannot represent multi-values or nulls. * This class is generated. Do not edit it. */ public final class $Type$VectorBlock extends AbstractVectorBlock implements $Type$Block { @@ -81,11 +81,6 @@ $endif$ return getClass().getSimpleName() + "[vector=" + vector + "]"; } - @Override - public boolean isReleased() { - return super.isReleased() || vector.isReleased(); - } - @Override public void closeInternal() { assert (vector.isReleased() == false) : "can't release block [" + this + "] containing already released vector"; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/IdFieldIndexFieldData.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/IdFieldIndexFieldData.java deleted file mode 100644 index d91c758ab3bd9..0000000000000 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/IdFieldIndexFieldData.java +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.compute.lucene; - -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.SortField; -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.index.fielddata.IndexFieldData; -import org.elasticsearch.index.fielddata.LeafFieldData; -import org.elasticsearch.index.fielddata.SortedBinaryDocValues; -import org.elasticsearch.index.fieldvisitor.LeafStoredFieldLoader; -import org.elasticsearch.index.fieldvisitor.StoredFieldLoader; -import org.elasticsearch.index.mapper.IdFieldMapper; -import org.elasticsearch.script.field.DocValuesScriptFieldFactory; -import org.elasticsearch.search.DocValueFormat; -import org.elasticsearch.search.MultiValueMode; -import org.elasticsearch.search.aggregations.support.ValuesSourceType; -import org.elasticsearch.search.sort.BucketedSort; -import org.elasticsearch.search.sort.SortOrder; - -import java.io.IOException; -import java.util.Set; - -public class IdFieldIndexFieldData implements IndexFieldData { - - private static final String FIELD_NAME = IdFieldMapper.NAME; - private final ValuesSourceType valuesSourceType; - private final StoredFieldLoader loader; - - protected IdFieldIndexFieldData(ValuesSourceType valuesSourceType) { - this.valuesSourceType = valuesSourceType; - this.loader = StoredFieldLoader.create(false, Set.of(FIELD_NAME)); - } - - @Override - public String getFieldName() { - return FIELD_NAME; - } - - @Override - public ValuesSourceType getValuesSourceType() { - return valuesSourceType; - } - - @Override - public final IdFieldLeafFieldData load(LeafReaderContext context) { - try { - return loadDirect(context); - } catch (Exception e) { - throw ExceptionsHelper.convertToElastic(e); - } - } - - @Override - public final IdFieldLeafFieldData loadDirect(LeafReaderContext context) throws Exception { - return new IdFieldLeafFieldData(loader.getLoader(context, null)); - } - - @Override - public SortField sortField(Object missingValue, MultiValueMode sortMode, XFieldComparatorSource.Nested nested, boolean reverse) { - throw new IllegalArgumentException("not supported for stored field fallback"); - } - - @Override - public BucketedSort newBucketedSort( - BigArrays bigArrays, - Object missingValue, - MultiValueMode sortMode, - XFieldComparatorSource.Nested nested, - SortOrder sortOrder, - DocValueFormat format, - int bucketSize, - BucketedSort.ExtraData extra - ) { - throw new IllegalArgumentException("not supported for stored field fallback"); - } - - class IdFieldLeafFieldData implements LeafFieldData { - private final LeafStoredFieldLoader loader; - - protected IdFieldLeafFieldData(LeafStoredFieldLoader loader) { - this.loader = loader; - } - - @Override - public DocValuesScriptFieldFactory getScriptFieldFactory(String name) { - throw new IllegalArgumentException("not supported for _id field"); - } - - @Override - public long ramBytesUsed() { - return 0L; - } - - @Override - public void close() {} - - @Override - public SortedBinaryDocValues getBytesValues() { - return new SortedBinaryDocValues() { - private String id; - - @Override - public boolean advanceExact(int doc) throws IOException { - loader.advanceTo(doc); - id = loader.id(); - return id != null; - } - - @Override - public int docValueCount() { - return 1; - } - - @Override - public BytesRef nextValue() throws IOException { - return new BytesRef(id); - } - }; - } - } -} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneCountOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneCountOperator.java index 75bd230638928..4ed32d6552497 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneCountOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneCountOperator.java @@ -160,8 +160,8 @@ public Page getOutput() { LongBlock count = null; BooleanBlock seen = null; try { - count = LongBlock.newConstantBlockWith(totalHits, PAGE_SIZE, blockFactory); - seen = BooleanBlock.newConstantBlockWith(true, PAGE_SIZE, blockFactory); + count = blockFactory.newConstantLongBlockWith(totalHits, PAGE_SIZE); + seen = blockFactory.newConstantBooleanBlockWith(true, PAGE_SIZE); page = new Page(PAGE_SIZE, count, seen); } finally { if (page == null) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneOperator.java index 6536b08cd2419..21b2a4cfaeb0b 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneOperator.java @@ -16,6 +16,7 @@ import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; @@ -30,8 +31,13 @@ import java.io.IOException; import java.io.UncheckedIOException; +import java.util.Collections; +import java.util.HashSet; import java.util.Objects; +import java.util.Set; +import java.util.TreeSet; import java.util.function.Function; +import java.util.stream.Collectors; public abstract class LuceneOperator extends SourceOperator { private static final Logger logger = LogManager.getLogger(LuceneOperator.class); @@ -40,10 +46,16 @@ public abstract class LuceneOperator extends SourceOperator { protected final BlockFactory blockFactory; - private int processSlices; + /** + * Count of the number of slices processed. + */ + private int processedSlices; final int maxPageSize; private final LuceneSliceQueue sliceQueue; + private final Set processedQueries = new HashSet<>(); + private final Set processedShards = new HashSet<>(); + private LuceneSlice currentSlice; private int sliceIndex; @@ -52,7 +64,7 @@ public abstract class LuceneOperator extends SourceOperator { int pagesEmitted; boolean doneCollecting; - public LuceneOperator(BlockFactory blockFactory, int maxPageSize, LuceneSliceQueue sliceQueue) { + protected LuceneOperator(BlockFactory blockFactory, int maxPageSize, LuceneSliceQueue sliceQueue) { this.blockFactory = blockFactory; this.maxPageSize = maxPageSize; this.sliceQueue = sliceQueue; @@ -73,18 +85,23 @@ LuceneScorer getCurrentOrLoadNextScorer() { if (currentSlice == null) { doneCollecting = true; return null; - } else { - processSlices++; } if (currentSlice.numLeaves() == 0) { continue; } + processedSlices++; + processedShards.add( + currentSlice.searchContext().getSearchExecutionContext().getFullyQualifiedIndex().getName() + + ":" + + currentSlice.searchContext().getSearchExecutionContext().getShardId() + ); } final PartialLeafReaderContext partialLeaf = currentSlice.getLeaf(sliceIndex++); logger.trace("Starting {}", partialLeaf); final LeafReaderContext leaf = partialLeaf.leafReaderContext(); if (currentScorer == null || currentScorer.leafReaderContext() != leaf) { final Weight weight = currentSlice.weight().get(); + processedQueries.add(weight.getQuery()); currentScorer = new LuceneScorer(currentSlice.shardIndex(), currentSlice.searchContext(), weight, leaf); } assert currentScorer.maxPosition <= partialLeaf.maxDoc() : currentScorer.maxPosition + ">" + partialLeaf.maxDoc(); @@ -190,6 +207,8 @@ public static class Status implements Operator.Status { ); private final int processedSlices; + private final Set processedQueries; + private final Set processedShards; private final int totalSlices; private final int pagesEmitted; private final int sliceIndex; @@ -198,7 +217,9 @@ public static class Status implements Operator.Status { private final int current; private Status(LuceneOperator operator) { - processedSlices = operator.processSlices; + processedSlices = operator.processedSlices; + processedQueries = operator.processedQueries.stream().map(Query::toString).collect(Collectors.toCollection(TreeSet::new)); + processedShards = new TreeSet<>(operator.processedShards); sliceIndex = operator.sliceIndex; totalSlices = operator.sliceQueue.totalSlices(); LuceneSlice slice = operator.currentSlice; @@ -219,8 +240,20 @@ private Status(LuceneOperator operator) { pagesEmitted = operator.pagesEmitted; } - Status(int processedSlices, int sliceIndex, int totalSlices, int pagesEmitted, int sliceMin, int sliceMax, int current) { + Status( + int processedSlices, + Set processedQueries, + Set processedShards, + int sliceIndex, + int totalSlices, + int pagesEmitted, + int sliceMin, + int sliceMax, + int current + ) { this.processedSlices = processedSlices; + this.processedQueries = processedQueries; + this.processedShards = processedShards; this.sliceIndex = sliceIndex; this.totalSlices = totalSlices; this.pagesEmitted = pagesEmitted; @@ -231,6 +264,13 @@ private Status(LuceneOperator operator) { Status(StreamInput in) throws IOException { processedSlices = in.readVInt(); + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_STATUS_INCLUDE_LUCENE_QUERIES)) { + processedQueries = in.readCollectionAsSet(StreamInput::readString); + processedShards = in.readCollectionAsSet(StreamInput::readString); + } else { + processedQueries = Collections.emptySet(); + processedShards = Collections.emptySet(); + } sliceIndex = in.readVInt(); totalSlices = in.readVInt(); pagesEmitted = in.readVInt(); @@ -242,6 +282,10 @@ private Status(LuceneOperator operator) { @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(processedSlices); + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_STATUS_INCLUDE_LUCENE_QUERIES)) { + out.writeCollection(processedQueries, StreamOutput::writeString); + out.writeCollection(processedShards, StreamOutput::writeString); + } out.writeVInt(sliceIndex); out.writeVInt(totalSlices); out.writeVInt(pagesEmitted); @@ -259,6 +303,14 @@ public int processedSlices() { return processedSlices; } + public Set processedQueries() { + return processedQueries; + } + + public Set processedShards() { + return processedShards; + } + public int sliceIndex() { return sliceIndex; } @@ -287,6 +339,8 @@ public int current() { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field("processed_slices", processedSlices); + builder.field("processed_queries", processedQueries); + builder.field("processed_shards", processedShards); builder.field("slice_index", sliceIndex); builder.field("total_slices", totalSlices); builder.field("pages_emitted", pagesEmitted); @@ -302,6 +356,8 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) return false; Status status = (Status) o; return processedSlices == status.processedSlices + && processedQueries.equals(status.processedQueries) + && processedShards.equals(status.processedShards) && sliceIndex == status.sliceIndex && totalSlices == status.totalSlices && pagesEmitted == status.pagesEmitted diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSourceOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSourceOperator.java index 7b2b276a619c6..b636e4aba8a5e 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSourceOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSourceOperator.java @@ -95,7 +95,7 @@ public LuceneSourceOperator(BlockFactory blockFactory, int maxPageSize, LuceneSl super(blockFactory, maxPageSize, sliceQueue); this.minPageSize = Math.max(1, maxPageSize / 2); this.remainingDocs = limit; - this.docsBuilder = IntVector.newVectorBuilder(Math.min(limit, maxPageSize), blockFactory); + this.docsBuilder = blockFactory.newIntVectorBuilder(Math.min(limit, maxPageSize)); this.leafCollector = new LeafCollector() { @Override public void setScorer(Scorable scorer) { @@ -149,10 +149,10 @@ public Page getOutput() { IntBlock leaf = null; IntVector docs = null; try { - shard = IntBlock.newConstantBlockWith(scorer.shardIndex(), currentPagePos, blockFactory); - leaf = IntBlock.newConstantBlockWith(scorer.leafReaderContext().ord, currentPagePos, blockFactory); + shard = blockFactory.newConstantIntBlockWith(scorer.shardIndex(), currentPagePos); + leaf = blockFactory.newConstantIntBlockWith(scorer.leafReaderContext().ord, currentPagePos); docs = docsBuilder.build(); - docsBuilder = IntVector.newVectorBuilder(Math.min(remainingDocs, maxPageSize), blockFactory); + docsBuilder = blockFactory.newIntVectorBuilder(Math.min(remainingDocs, maxPageSize)); page = new Page(currentPagePos, new DocVector(shard.asVector(), leaf.asVector(), docs, true).asBlock()); } finally { if (page == null) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperator.java index 9624fa48ef20d..7f08c8ca66821 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperator.java @@ -200,8 +200,8 @@ private Page emit(boolean startEmitting) { IntVector docs = null; Page page = null; try ( - IntVector.Builder currentSegmentBuilder = IntVector.newVectorBuilder(size, blockFactory); - IntVector.Builder currentDocsBuilder = IntVector.newVectorBuilder(size, blockFactory) + IntVector.Builder currentSegmentBuilder = blockFactory.newIntVectorFixedBuilder(size); + IntVector.Builder currentDocsBuilder = blockFactory.newIntVectorFixedBuilder(size) ) { int start = offset; offset += size; @@ -213,7 +213,7 @@ private Page emit(boolean startEmitting) { currentDocsBuilder.appendInt(doc - leafContexts.get(segment).docBase); // the offset inside the segment } - shard = IntBlock.newConstantBlockWith(perShardCollector.shardIndex, size, blockFactory); + shard = blockFactory.newConstantIntBlockWith(perShardCollector.shardIndex, size); segments = currentSegmentBuilder.build(); docs = currentDocsBuilder.build(); page = new Page(size, new DocVector(shard.asVector(), segments, docs, null).asBlock()); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TextValueSource.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TextValueSource.java deleted file mode 100644 index 04dbcd91c18c8..0000000000000 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/TextValueSource.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.compute.lucene; - -import org.apache.lucene.index.LeafReaderContext; -import org.elasticsearch.index.fielddata.IndexFieldData; -import org.elasticsearch.index.fielddata.LeafFieldData; -import org.elasticsearch.index.fielddata.SortedBinaryDocValues; -import org.elasticsearch.script.field.TextDocValuesField; -import org.elasticsearch.search.aggregations.support.ValuesSource; - -public class TextValueSource extends ValuesSource.Bytes { - - private final IndexFieldData indexFieldData; - - public TextValueSource(IndexFieldData indexFieldData) { - this.indexFieldData = indexFieldData; - } - - @Override - public SortedBinaryDocValues bytesValues(LeafReaderContext leafReaderContext) { - String fieldName = indexFieldData.getFieldName(); - LeafFieldData fieldData = indexFieldData.load(leafReaderContext); - return ((TextDocValuesFieldWrapper) fieldData.getScriptFieldFactory(fieldName)).bytesValues(); - } - - /** Wrapper around TextDocValuesField that provides access to the SortedBinaryDocValues. */ - static final class TextDocValuesFieldWrapper extends TextDocValuesField { - TextDocValuesFieldWrapper(SortedBinaryDocValues input, String name) { - super(input, name); - } - - SortedBinaryDocValues bytesValues() { - return input; - } - } -} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverTaskRunner.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverTaskRunner.java index 1293118680824..38d879f8f7ad4 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverTaskRunner.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverTaskRunner.java @@ -11,9 +11,9 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.CompositeIndicesRequest; +import org.elasticsearch.action.support.ChannelActionListener; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.compute.OwningChannelActionListener; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; @@ -117,7 +117,7 @@ public Status getStatus() { private record DriverRequestHandler(TransportService transportService) implements TransportRequestHandler { @Override public void messageReceived(DriverRequest request, TransportChannel channel, Task task) { - var listener = new OwningChannelActionListener(channel); + var listener = new ChannelActionListener(channel); Driver.start( transportService.getThreadPool().getThreadContext(), request.executor, diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/EvalOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/EvalOperator.java index 2a6a3c9b6210b..10f23ed29094f 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/EvalOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/EvalOperator.java @@ -75,27 +75,22 @@ interface Factory { public static final ExpressionEvaluator.Factory CONSTANT_NULL_FACTORY = new ExpressionEvaluator.Factory() { @Override public ExpressionEvaluator get(DriverContext driverContext) { - return CONSTANT_NULL; - } + return new ExpressionEvaluator() { + @Override + public Block eval(Page page) { + return driverContext.blockFactory().newConstantNullBlock(page.getPositionCount()); + } - @Override - public String toString() { - return CONSTANT_NULL.toString(); - } - }; + @Override + public void close() { - public static final ExpressionEvaluator CONSTANT_NULL = new ExpressionEvaluator() { - @Override - public Block eval(Page page) { - return Block.constantNullBlock(page.getPositionCount()); + } + }; } @Override public String toString() { return "ConstantNull"; } - - @Override - public void close() {} }; } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/MultivalueDedupeBoolean.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/MultivalueDedupeBoolean.java index f3570bf7b853b..d6a908306e2f4 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/MultivalueDedupeBoolean.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/MultivalueDedupeBoolean.java @@ -46,7 +46,7 @@ public BooleanBlock dedupeToBlock(BlockFactory blockFactory) { block.incRef(); return block; } - try (BooleanBlock.Builder builder = BooleanBlock.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try (BooleanBlock.Builder builder = blockFactory.newBooleanBlockBuilder(block.getPositionCount())) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/OrdinalsGroupingOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/OrdinalsGroupingOperator.java index 4fb90ddb57e25..a895525add46f 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/OrdinalsGroupingOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/OrdinalsGroupingOperator.java @@ -475,7 +475,7 @@ private static class ValuesAggregator implements Releasable { DriverContext driverContext ) { this.extractor = new ValuesSourceReaderOperator( - BlockFactory.getNonBreakingInstance(), + driverContext.blockFactory(), List.of(new ValuesSourceReaderOperator.FieldInfo(groupingField, blockLoaders)), shardContexts, docChannel diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/RowOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/RowOperator.java index ff124021ea3ad..4b4379eb6a4d8 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/RowOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/RowOperator.java @@ -22,8 +22,7 @@ public record RowOperatorFactory(List objects) implements SourceOperator @Override public SourceOperator get(DriverContext driverContext) { - // We aren't yet ready to use the read block factory - return new RowOperator(BlockFactory.getNonBreakingInstance(), objects); + return new RowOperator(driverContext.blockFactory(), objects); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/StringExtractOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/StringExtractOperator.java index 4ffa530bc5d3a..ec61408954219 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/StringExtractOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/StringExtractOperator.java @@ -65,7 +65,7 @@ protected Page process(Page page) { BytesRefBlock.Builder[] blockBuilders = new BytesRefBlock.Builder[fieldNames.length]; try { for (int i = 0; i < fieldNames.length; i++) { - blockBuilders[i] = BytesRefBlock.newBlockBuilder(rowsCount, driverContext.blockFactory()); + blockBuilders[i] = driverContext.blockFactory().newBytesRefBlockBuilder(rowsCount); } try (BytesRefBlock input = (BytesRefBlock) inputEvaluator.eval(page)) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ThrowingDriverContext.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ThrowingDriverContext.java index 313ec0b682602..c1029db4c32e4 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ThrowingDriverContext.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ThrowingDriverContext.java @@ -7,6 +7,7 @@ package org.elasticsearch.compute.operator; +import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.ByteArray; import org.elasticsearch.common.util.DoubleArray; @@ -18,7 +19,7 @@ public class ThrowingDriverContext extends DriverContext { public ThrowingDriverContext() { - super(new ThrowingBigArrays(), BlockFactory.getNonBreakingInstance()); + super(new ThrowingBigArrays(), BlockFactory.getInstance(new NoopCircuitBreaker("throwing-context"), new ThrowingBigArrays())); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/X-MultivalueDedupe.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/X-MultivalueDedupe.java.st index 169e7aa427717..a4e07cfaadd44 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/X-MultivalueDedupe.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/X-MultivalueDedupe.java.st @@ -77,7 +77,7 @@ $endif$ block.incRef(); return block; } - try ($Type$Block.Builder builder = $Type$Block.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try ($Type$Block.Builder builder = blockFactory.new$Type$BlockBuilder(block.getPositionCount())) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -131,7 +131,7 @@ $endif$ block.incRef(); return block; } - try ($Type$Block.Builder builder = $Type$Block.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try ($Type$Block.Builder builder = blockFactory.new$Type$BlockBuilder(block.getPositionCount())) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); @@ -165,7 +165,7 @@ $endif$ block.incRef(); return block; } - try ($Type$Block.Builder builder = $Type$Block.newBlockBuilder(block.getPositionCount(), blockFactory)) { + try ($Type$Block.Builder builder = blockFactory.new$Type$BlockBuilder(block.getPositionCount())) { for (int p = 0; p < block.getPositionCount(); p++) { int count = block.getValueCount(p); int first = block.getFirstValueIndex(p); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java index 8fb38ccf907d6..3173b716467be 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java @@ -13,7 +13,7 @@ import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; -import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.action.support.ChannelActionListener; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.io.stream.StreamInput; @@ -21,14 +21,12 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AbstractAsyncTask; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.compute.OwningChannelActionListener; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockStreamInput; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; -import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestHandler; @@ -43,7 +41,7 @@ /** * {@link ExchangeService} is responsible for exchanging pages between exchange sinks and sources on the same or different nodes. - * It holds a map of {@link ExchangeSourceHandler} and {@link ExchangeSinkHandler} instances for each node in the cluster. + * It holds a map of {@link ExchangeSinkHandler} instances for each node in the cluster to serve {@link ExchangeRequest}s * To connect exchange sources to exchange sinks, use the {@link ExchangeSourceHandler#addRemoteSink(RemoteSink, int)} method. */ public final class ExchangeService extends AbstractLifecycleComponent { @@ -66,7 +64,6 @@ public final class ExchangeService extends AbstractLifecycleComponent { private final BlockFactory blockFactory; private final Map sinks = ConcurrentCollections.newConcurrentMap(); - private final Map sources = ConcurrentCollections.newConcurrentMap(); private final InactiveSinksReaper inactiveSinksReaper; @@ -125,35 +122,22 @@ public void finishSinkHandler(String exchangeId, Exception failure) { } } - /** - * Creates an {@link ExchangeSourceHandler} for the specified exchange id. - * - * @throws IllegalStateException if a source handler for the given id already exists - */ - public ExchangeSourceHandler createSourceHandler(String exchangeId, int maxBufferSize, String fetchExecutor) { - ExchangeSourceHandler sourceHandler = new ExchangeSourceHandler(maxBufferSize, threadPool.executor(fetchExecutor)); - if (sources.putIfAbsent(exchangeId, sourceHandler) != null) { - throw new IllegalStateException("source exchanger for id [" + exchangeId + "] already exists"); - } - sourceHandler.addCompletionListener(ActionListener.releasing(() -> sources.remove(exchangeId))); - return sourceHandler; - } - /** * Opens a remote sink handler on the remote node for the given session ID. */ public static void openExchange( TransportService transportService, - DiscoveryNode targetNode, + Transport.Connection connection, String sessionId, int exchangeBuffer, Executor responseExecutor, ActionListener listener ) { transportService.sendRequest( - targetNode, + connection, OPEN_EXCHANGE_ACTION_NAME, new OpenExchangeRequest(sessionId, exchangeBuffer), + TransportRequestOptions.EMPTY, new ActionListenerResponseHandler<>(listener.map(unused -> null), in -> TransportResponse.Empty.INSTANCE, responseExecutor) ); } @@ -193,15 +177,11 @@ private class ExchangeTransportAction implements TransportRequestHandler listener = new OwningChannelActionListener<>(channel); + ActionListener listener = new ChannelActionListener<>(channel); final ExchangeSinkHandler sinkHandler = sinks.get(exchangeId); if (sinkHandler == null) { listener.onResponse(new ExchangeResponse(null, true)); } else { - // the data-node request hasn't arrived yet; use the task framework to cancel the request if needed. - if (sinkHandler.hasData() == false) { - ((CancellableTask) task).addListener(() -> sinkHandler.onFailure(new TaskCancelledException("task cancelled"))); - } sinkHandler.fetchPageAsync(request.sourcesFinished(), listener); } } @@ -251,16 +231,16 @@ protected void runInternal() { * @param parentTask the parent task that initialized the ESQL request * @param exchangeId the exchange ID * @param transportService the transport service - * @param remoteNode the node where the remote exchange sink is located + * @param conn the connection to the remote node where the remote exchange sink is located */ - public RemoteSink newRemoteSink(Task parentTask, String exchangeId, TransportService transportService, DiscoveryNode remoteNode) { - return new TransportRemoteSink(transportService, blockFactory, remoteNode, parentTask, exchangeId, executor); + public RemoteSink newRemoteSink(Task parentTask, String exchangeId, TransportService transportService, Transport.Connection conn) { + return new TransportRemoteSink(transportService, blockFactory, conn, parentTask, exchangeId, executor); } record TransportRemoteSink( TransportService transportService, BlockFactory blockFactory, - DiscoveryNode node, + Transport.Connection connection, Task parentTask, String exchangeId, Executor responseExecutor @@ -269,7 +249,7 @@ record TransportRemoteSink( @Override public void fetchPageAsync(boolean allSourcesFinished, ActionListener listener) { transportService.sendChildRequest( - node, + connection, EXCHANGE_ACTION_NAME, new ExchangeRequest(exchangeId, allSourcesFinished), parentTask, @@ -285,7 +265,7 @@ public void fetchPageAsync(boolean allSourcesFinished, ActionListener input = CannedSourceOperator.collectPages(simpleInput(blockFactory, end)); - List origInput = BlockTestUtils.deepCopyOf(input, BlockFactory.getNonBreakingInstance()); + List origInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); try ( Driver d = new Driver( @@ -120,7 +121,7 @@ public final void testMultivalued() { List input = CannedSourceOperator.collectPages( new PositionMergingSourceOperator(simpleInput(driverContext.blockFactory(), end), blockFactory) ); - List origInput = BlockTestUtils.deepCopyOf(input, BlockFactory.getNonBreakingInstance()); + List origInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); assertSimpleOutput(origInput, drive(simple(BigArrays.NON_RECYCLING_INSTANCE).get(driverContext), input.iterator(), driverContext)); } @@ -134,7 +135,7 @@ public final void testMultivaluedWithNulls() { blockFactory ) ); - List origInput = BlockTestUtils.deepCopyOf(input, BlockFactory.getNonBreakingInstance()); + List origInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); assertSimpleOutput(origInput, drive(simple(BigArrays.NON_RECYCLING_INSTANCE).get(driverContext), input.iterator(), driverContext)); } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/GroupingAggregatorFunctionTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/GroupingAggregatorFunctionTestCase.java index 6afd285987696..730a8d1ee66a2 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/GroupingAggregatorFunctionTestCase.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/GroupingAggregatorFunctionTestCase.java @@ -23,6 +23,7 @@ import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.LongVector; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.data.TestBlockFactory; import org.elasticsearch.compute.operator.CannedSourceOperator; import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.ForkingOperatorTestCase; @@ -146,8 +147,8 @@ protected final void assertSimpleOutput(List input, List results) { } @Override - protected ByteSizeValue smallEnoughToCircuitBreak() { - return ByteSizeValue.ofBytes(between(1, 32)); + protected ByteSizeValue memoryLimitForSimple() { + return ByteSizeValue.ofBytes(100); } public final void testNullGroupsAndValues() { @@ -157,7 +158,7 @@ public final void testNullGroupsAndValues() { List input = CannedSourceOperator.collectPages( new NullInsertingSourceOperator(simpleInput(driverContext.blockFactory(), end), blockFactory) ); - List origInput = BlockTestUtils.deepCopyOf(input, BlockFactory.getNonBreakingInstance()); + List origInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); List results = drive( simple(nonBreakingBigArrays().withCircuitBreaking()).get(driverContext), input.iterator(), @@ -171,7 +172,7 @@ public final void testNullGroups() { BlockFactory blockFactory = driverContext.blockFactory(); int end = between(50, 60); List input = CannedSourceOperator.collectPages(nullGroups(simpleInput(blockFactory, end), blockFactory)); - List origInput = BlockTestUtils.deepCopyOf(input, BlockFactory.getNonBreakingInstance()); + List origInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); List results = drive( simple(nonBreakingBigArrays().withCircuitBreaking()).get(driverContext), input.iterator(), @@ -189,7 +190,7 @@ public void testAllKeyNulls() { input.add(p); } else { Block[] blocks = new Block[p.getBlockCount()]; - blocks[0] = Block.constantNullBlock(p.getPositionCount(), blockFactory); + blocks[0] = blockFactory.newConstantNullBlock(p.getPositionCount()); for (int i = 1; i < blocks.length; i++) { blocks[i] = p.getBlock(i); } @@ -197,7 +198,7 @@ public void testAllKeyNulls() { input.add(new Page(blocks)); } } - List origInput = BlockTestUtils.deepCopyOf(input, BlockFactory.getNonBreakingInstance()); + List origInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); List results = drive( simple(nonBreakingBigArrays().withCircuitBreaking()).get(driverContext), input.iterator(), @@ -232,7 +233,7 @@ public final void testNullValues() { BlockFactory blockFactory = driverContext.blockFactory(); int end = between(50, 60); List input = CannedSourceOperator.collectPages(nullValues(simpleInput(driverContext.blockFactory(), end), blockFactory)); - List origInput = BlockTestUtils.deepCopyOf(input, BlockFactory.getNonBreakingInstance()); + List origInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); List results = drive( simple(nonBreakingBigArrays().withCircuitBreaking()).get(driverContext), input.iterator(), @@ -246,7 +247,7 @@ public final void testNullValuesInitialIntermediateFinal() { BlockFactory blockFactory = driverContext.blockFactory(); int end = between(50, 60); List input = CannedSourceOperator.collectPages(nullValues(simpleInput(driverContext.blockFactory(), end), blockFactory)); - List origInput = BlockTestUtils.deepCopyOf(input, BlockFactory.getNonBreakingInstance()); + List origInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); List results = drive( List.of( simpleWithMode(nonBreakingBigArrays().withCircuitBreaking(), AggregatorMode.INITIAL).get(driverContext), @@ -278,7 +279,7 @@ public final void testMultivalued() { List input = CannedSourceOperator.collectPages( mergeValues(simpleInput(driverContext.blockFactory(), end), driverContext.blockFactory()) ); - List origInput = BlockTestUtils.deepCopyOf(input, BlockFactory.getNonBreakingInstance()); + List origInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); List results = drive( simple(nonBreakingBigArrays().withCircuitBreaking()).get(driverContext), input.iterator(), @@ -294,7 +295,7 @@ public final void testMulitvaluedNullGroupsAndValues() { List input = CannedSourceOperator.collectPages( new NullInsertingSourceOperator(mergeValues(simpleInput(driverContext.blockFactory(), end), blockFactory), blockFactory) ); - List origInput = BlockTestUtils.deepCopyOf(input, BlockFactory.getNonBreakingInstance()); + List origInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); List results = drive( simple(nonBreakingBigArrays().withCircuitBreaking()).get(driverContext), input.iterator(), @@ -309,7 +310,7 @@ public void testMulitvaluedNullGroup() { int end = between(1, 2); // TODO revert var inputOperator = nullGroups(mergeValues(simpleInput(driverContext.blockFactory(), end), blockFactory), blockFactory); List input = CannedSourceOperator.collectPages(inputOperator); - List origInput = BlockTestUtils.deepCopyOf(input, BlockFactory.getNonBreakingInstance()); + List origInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); List results = drive( simple(nonBreakingBigArrays().withCircuitBreaking()).get(driverContext), input.iterator(), @@ -325,7 +326,7 @@ public final void testMulitvaluedNullValues() { List input = CannedSourceOperator.collectPages( nullValues(mergeValues(simpleInput(blockFactory, end), blockFactory), blockFactory) ); - List origInput = BlockTestUtils.deepCopyOf(input, BlockFactory.getNonBreakingInstance()); + List origInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); List results = drive( simple(nonBreakingBigArrays().withCircuitBreaking()).get(driverContext), input.iterator(), @@ -366,18 +367,20 @@ public final void testNullOnlyInputInitialIntermediateFinal() { * Run the aggregation passing only null values. */ private void assertNullOnly(List operators, DriverContext driverContext) { - LongBlock.Builder groupBuilder = LongBlock.newBlockBuilder(1); - if (randomBoolean()) { - groupBuilder.appendLong(1); - } else { - groupBuilder.appendNull(); - } - List source = List.of(new Page(groupBuilder.build(), Block.constantNullBlock(1))); - List results = drive(operators, source.iterator(), driverContext); + BlockFactory blockFactory = driverContext.blockFactory(); + try (var groupBuilder = blockFactory.newLongBlockBuilder(1)) { + if (randomBoolean()) { + groupBuilder.appendLong(1); + } else { + groupBuilder.appendNull(); + } + List source = List.of(new Page(groupBuilder.build(), blockFactory.newConstantNullBlock(1))); + List results = drive(operators, source.iterator(), driverContext); - assertThat(results, hasSize(1)); - Block resultBlock = results.get(0).getBlock(1); - assertOutputFromNullOnly(resultBlock, 0); + assertThat(results, hasSize(1)); + Block resultBlock = results.get(0).getBlock(1); + assertOutputFromNullOnly(resultBlock, 0); + } } public final void testNullSome() { @@ -465,7 +468,7 @@ protected Block merge(int blockIndex, Block block) { if (blockIndex != 0) { return super.merge(blockIndex, block); } - Block.Builder builder = block.elementType().newBlockBuilder(block.getPositionCount() / 2); + Block.Builder builder = block.elementType().newBlockBuilder(block.getPositionCount() / 2, blockFactory); for (int p = 0; p + 1 < block.getPositionCount(); p += 2) { builder.copyFrom(block, p, p + 1); } @@ -565,7 +568,7 @@ public AddInput prepareProcessPage(SeenGroupIds ignoredSeenGroupIds, Page page) @Override public void add(int positionOffset, IntBlock groupIds) { for (int offset = 0; offset < groupIds.getPositionCount(); offset += emitChunkSize) { - IntBlock.Builder builder = IntBlock.newBlockBuilder(emitChunkSize); + IntBlock.Builder builder = blockFactory().newIntBlockBuilder(emitChunkSize); int endP = Math.min(groupIds.getPositionCount(), offset + emitChunkSize); for (int p = offset; p < endP; p++) { int start = groupIds.getFirstValueIndex(p); @@ -603,7 +606,7 @@ public void add(int positionOffset, IntVector groupIds) { seenGroupIds.set(group); chunk[count++] = group; } - BlockFactory blockFactory = BlockFactory.getNonBreakingInstance(); // TODO: just for compile + BlockFactory blockFactory = TestBlockFactory.getNonBreakingInstance(); // TODO: just for compile delegateAddInput.add(positionOffset + offset, blockFactory.newIntArrayVector(chunk, count)); } } @@ -618,7 +621,7 @@ public void addIntermediateInput(int positionOffset, IntVector groupIds, Page pa for (int i = offset; i < Math.min(groupIds.getPositionCount(), offset + emitChunkSize); i++) { chunk[count++] = groupIds.getInt(i); } - BlockFactory blockFactory = BlockFactory.getNonBreakingInstance(); // TODO: just for compile + BlockFactory blockFactory = TestBlockFactory.getNonBreakingInstance(); // TODO: just for compile delegate.addIntermediateInput(positionOffset + offset, blockFactory.newIntArrayVector(chunk, count), page); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/BlockHashTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/BlockHashTests.java index 567f58d0dee75..0ccf2d3af04d9 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/BlockHashTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/aggregation/blockhash/BlockHashTests.java @@ -18,7 +18,6 @@ import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.compute.aggregation.GroupingAggregatorFunction; import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.DoubleBlock; @@ -27,6 +26,7 @@ import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.MockBlockFactory; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.data.TestBlockFactory; import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.HashAggregationOperator; import org.elasticsearch.core.Releasables; @@ -498,12 +498,12 @@ public void testBooleanHashTrueOnly() { assertThat(ordsAndKeys.description, startsWith("PackedValuesBlockHash{groups=[0:BOOLEAN], entries=1, size=")); assertOrds(ordsAndKeys.ords, 0, 0, 0, 0); assertKeys(ordsAndKeys.keys, true); - assertThat(ordsAndKeys.nonEmpty, equalTo(IntVector.newVectorBuilder(1).appendInt(0).build())); + assertThat(ordsAndKeys.nonEmpty, equalTo(TestBlockFactory.getNonBreakingInstance().newConstantIntVector(0, 1))); } else { assertThat(ordsAndKeys.description, equalTo("BooleanBlockHash{channel=0, seenFalse=false, seenTrue=true, seenNull=false}")); assertOrds(ordsAndKeys.ords, 2, 2, 2, 2); assertKeys(ordsAndKeys.keys, true); - assertThat(ordsAndKeys.nonEmpty, equalTo(IntVector.newVectorBuilder(1).appendInt(2).build())); + assertThat(ordsAndKeys.nonEmpty, equalTo(TestBlockFactory.getNonBreakingInstance().newConstantIntVector(2, 1))); } }, blockFactory.newBooleanArrayVector(values, values.length).asBlock()); } @@ -514,11 +514,11 @@ public void testBooleanHashFalseOnly() { if (forcePackedHash) { assertThat(ordsAndKeys.description, startsWith("PackedValuesBlockHash{groups=[0:BOOLEAN], entries=1, size=")); assertOrds(ordsAndKeys.ords, 0, 0, 0, 0); - assertThat(ordsAndKeys.nonEmpty, equalTo(IntVector.newVectorBuilder(1).appendInt(0).build())); + assertThat(ordsAndKeys.nonEmpty, equalTo(TestBlockFactory.getNonBreakingInstance().newConstantIntVector(0, 1))); } else { assertThat(ordsAndKeys.description, equalTo("BooleanBlockHash{channel=0, seenFalse=true, seenTrue=false, seenNull=false}")); assertOrds(ordsAndKeys.ords, 1, 1, 1, 1); - assertThat(ordsAndKeys.nonEmpty, equalTo(IntVector.newVectorBuilder(1).appendInt(1).build())); + assertThat(ordsAndKeys.nonEmpty, equalTo(TestBlockFactory.getNonBreakingInstance().newConstantIntVector(1, 1))); } assertKeys(ordsAndKeys.keys, false); }, blockFactory.newBooleanArrayVector(values, values.length).asBlock()); @@ -1262,6 +1262,6 @@ static CircuitBreakerService mockBreakerService(CircuitBreaker breaker) { } IntVector intRange(int startInclusive, int endExclusive) { - return IntVector.range(startInclusive, endExclusive, BlockFactory.getNonBreakingInstance()); + return IntVector.range(startInclusive, endExclusive, TestBlockFactory.getNonBreakingInstance()); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicBlockTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicBlockTests.java index 2a49feeab9a30..4cde556120465 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicBlockTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicBlockTests.java @@ -11,9 +11,15 @@ import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.BitArray; import org.elasticsearch.common.util.BytesRefArray; +import org.elasticsearch.common.util.DoubleArray; +import org.elasticsearch.common.util.IntArray; +import org.elasticsearch.common.util.LongArray; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.core.RefCounted; +import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.test.ESTestCase; @@ -55,52 +61,58 @@ public void testEmpty() { void testEmpty(BlockFactory bf) { assertZeroPositionsAndRelease(bf.newIntArrayBlock(new int[] {}, 0, new int[] {}, new BitSet(), randomOrdering())); - assertZeroPositionsAndRelease(IntBlock.newBlockBuilder(0, bf).build()); + assertZeroPositionsAndRelease(bf.newIntBlockBuilder(0).build()); assertZeroPositionsAndRelease(bf.newIntArrayVector(new int[] {}, 0)); - assertZeroPositionsAndRelease(IntVector.newVectorBuilder(0, bf).build()); + assertZeroPositionsAndRelease(bf.newIntVectorBuilder(0).build()); assertZeroPositionsAndRelease(bf.newLongArrayBlock(new long[] {}, 0, new int[] {}, new BitSet(), randomOrdering())); - assertZeroPositionsAndRelease(LongBlock.newBlockBuilder(0, bf).build()); + assertZeroPositionsAndRelease(bf.newLongBlockBuilder(0).build()); assertZeroPositionsAndRelease(bf.newLongArrayVector(new long[] {}, 0)); - assertZeroPositionsAndRelease(LongVector.newVectorBuilder(0, bf).build()); + assertZeroPositionsAndRelease(bf.newLongVectorBuilder(0).build()); assertZeroPositionsAndRelease(bf.newDoubleArrayBlock(new double[] {}, 0, new int[] {}, new BitSet(), randomOrdering())); - assertZeroPositionsAndRelease(DoubleBlock.newBlockBuilder(0, bf).build()); + assertZeroPositionsAndRelease(bf.newDoubleBlockBuilder(0).build()); assertZeroPositionsAndRelease(bf.newDoubleArrayVector(new double[] {}, 0)); - assertZeroPositionsAndRelease(DoubleVector.newVectorBuilder(0, bf).build()); + assertZeroPositionsAndRelease(bf.newDoubleVectorBuilder(0).build()); assertZeroPositionsAndRelease( bf.newBytesRefArrayBlock(new BytesRefArray(0, bf.bigArrays()), 0, new int[] {}, new BitSet(), randomOrdering()) ); - assertZeroPositionsAndRelease(BytesRefBlock.newBlockBuilder(0, bf).build()); + assertZeroPositionsAndRelease(bf.newBytesRefBlockBuilder(0).build()); assertZeroPositionsAndRelease(bf.newBytesRefArrayVector(new BytesRefArray(0, bf.bigArrays()), 0)); - assertZeroPositionsAndRelease(BytesRefVector.newVectorBuilder(0, bf).build()); + assertZeroPositionsAndRelease(bf.newBytesRefVectorBuilder(0).build()); assertZeroPositionsAndRelease(bf.newBooleanArrayBlock(new boolean[] {}, 0, new int[] {}, new BitSet(), randomOrdering())); - assertZeroPositionsAndRelease(BooleanBlock.newBlockBuilder(0, bf).build()); + assertZeroPositionsAndRelease(bf.newBooleanBlockBuilder(0).build()); assertZeroPositionsAndRelease(bf.newBooleanArrayVector(new boolean[] {}, 0)); - assertZeroPositionsAndRelease(BooleanVector.newVectorBuilder(0, bf).build()); + assertZeroPositionsAndRelease(bf.newBooleanVectorBuilder(0).build()); } public void testSmallSingleValueDenseGrowthInt() { for (int initialSize : List.of(0, 1, 2, 3, 4, 5)) { - try (var blockBuilder = IntBlock.newBlockBuilder(initialSize)) { + try (var blockBuilder = blockFactory.newIntBlockBuilder(initialSize)) { IntStream.range(0, 10).forEach(blockBuilder::appendInt); - assertSingleValueDenseBlock(blockBuilder.build()); + IntBlock block = blockBuilder.build(); + assertSingleValueDenseBlock(block); + block.close(); } } } public void testSmallSingleValueDenseGrowthLong() { for (int initialSize : List.of(0, 1, 2, 3, 4, 5)) { - try (var blockBuilder = LongBlock.newBlockBuilder(initialSize)) { + try (var blockBuilder = blockFactory.newLongBlockBuilder(initialSize)) { IntStream.range(0, 10).forEach(blockBuilder::appendLong); - assertSingleValueDenseBlock(blockBuilder.build()); + LongBlock block = blockBuilder.build(); + assertSingleValueDenseBlock(block); + block.close(); } } } public void testSmallSingleValueDenseGrowthDouble() { for (int initialSize : List.of(0, 1, 2, 3, 4, 5)) { - try (var blockBuilder = DoubleBlock.newBlockBuilder(initialSize)) { + try (var blockBuilder = blockFactory.newDoubleBlockBuilder(initialSize)) { IntStream.range(0, 10).forEach(blockBuilder::appendDouble); - assertSingleValueDenseBlock(blockBuilder.build()); + DoubleBlock block = blockBuilder.build(); + assertSingleValueDenseBlock(block); + block.close(); } } } @@ -108,18 +120,22 @@ public void testSmallSingleValueDenseGrowthDouble() { public void testSmallSingleValueDenseGrowthBytesRef() { final BytesRef NULL_VALUE = new BytesRef(); for (int initialSize : List.of(0, 1, 2, 3, 4, 5)) { - try (var blockBuilder = BytesRefBlock.newBlockBuilder(initialSize)) { + try (var blockBuilder = blockFactory.newBytesRefBlockBuilder(initialSize)) { IntStream.range(0, 10).mapToObj(i -> NULL_VALUE).forEach(blockBuilder::appendBytesRef); - assertSingleValueDenseBlock(blockBuilder.build()); + BytesRefBlock block = blockBuilder.build(); + assertSingleValueDenseBlock(block); + block.close(); } } } public void testSmallSingleValueDenseGrowthBoolean() { for (int initialSize : List.of(0, 1, 2, 3, 4, 5)) { - try (var blockBuilder = BooleanBlock.newBlockBuilder(initialSize)) { + try (var blockBuilder = blockFactory.newBooleanBlockBuilder(initialSize)) { IntStream.range(0, 10).forEach(i -> blockBuilder.appendBoolean(i % 3 == 0)); - assertSingleValueDenseBlock(blockBuilder.build()); + BooleanBlock block = blockBuilder.build(); + assertSingleValueDenseBlock(block); + block.close(); } } } @@ -156,7 +172,7 @@ public void testIntBlock() { IntBlock block; if (randomBoolean()) { final int builderEstimateSize = randomBoolean() ? randomIntBetween(1, positionCount) : positionCount; - try (IntBlock.Builder blockBuilder = IntBlock.newBlockBuilder(builderEstimateSize, blockFactory)) { + try (IntBlock.Builder blockBuilder = blockFactory.newIntBlockBuilder(builderEstimateSize)) { IntStream.range(0, positionCount).forEach(blockBuilder::appendInt); block = blockBuilder.build(); } @@ -171,7 +187,7 @@ public void testIntBlock() { assertThat(pos, is(block.getInt(pos))); assertSingleValueDenseBlock(block); - try (IntBlock.Builder blockBuilder = IntBlock.newBlockBuilder(1, blockFactory)) { + try (IntBlock.Builder blockBuilder = blockFactory.newIntBlockBuilder(1)) { IntBlock copy = blockBuilder.copyFrom(block, 0, block.getPositionCount()).build(); assertThat(copy, equalTo(block)); releaseAndAssertBreaker(block, copy); @@ -180,20 +196,19 @@ public void testIntBlock() { if (positionCount > 1) { assertNullValues( positionCount, - size -> IntBlock.newBlockBuilder(size, blockFactory), - (bb, value) -> bb.appendInt(value), + blockFactory::newIntBlockBuilder, + IntBlock.Builder::appendInt, position -> position, IntBlock.Builder::build, (randomNonNullPosition, b) -> { - assertThat((int) randomNonNullPosition, is(b.getInt(randomNonNullPosition.intValue()))); + assertThat(randomNonNullPosition, is(b.getInt(randomNonNullPosition.intValue()))); } ); } try ( - IntVector.Builder vectorBuilder = IntVector.newVectorBuilder( - randomBoolean() ? randomIntBetween(1, positionCount) : positionCount, - blockFactory + IntVector.Builder vectorBuilder = blockFactory.newIntVectorBuilder( + randomBoolean() ? randomIntBetween(1, positionCount) : positionCount ) ) { IntStream.range(0, positionCount).forEach(vectorBuilder::appendInt); @@ -209,12 +224,7 @@ public void testConstantIntBlock() { assertThat(breaker.getUsed(), is(0L)); int positionCount = randomIntBetween(1, 16 * 1024); int value = randomInt(); - IntBlock block; - if (randomBoolean()) { - block = IntBlock.newConstantBlockWith(value, positionCount, blockFactory); - } else { - block = blockFactory.newConstantIntBlockWith(value, positionCount); - } + IntBlock block = blockFactory.newConstantIntBlockWith(value, positionCount); assertThat(positionCount, is(block.getPositionCount())); assertThat(value, is(block.getInt(0))); assertThat(value, is(block.getInt(positionCount - 1))); @@ -255,8 +265,8 @@ public void testLongBlock() { if (positionCount > 1) { assertNullValues( positionCount, - size -> LongBlock.newBlockBuilder(size, blockFactory), - (bb, value) -> bb.appendLong(value), + blockFactory::newLongBlockBuilder, + LongBlock.Builder::appendLong, position -> (long) position, LongBlock.Builder::build, (randomNonNullPosition, b) -> { @@ -280,12 +290,7 @@ public void testConstantLongBlock() { assertThat(breaker.getUsed(), is(0L)); int positionCount = randomIntBetween(1, 16 * 1024); long value = randomLong(); - LongBlock block; - if (randomBoolean()) { - block = LongBlock.newConstantBlockWith(value, positionCount, blockFactory); - } else { - block = blockFactory.newConstantLongBlockWith(value, positionCount); - } + LongBlock block = blockFactory.newConstantLongBlockWith(value, positionCount); assertThat(positionCount, is(block.getPositionCount())); assertThat(value, is(block.getLong(0))); assertThat(value, is(block.getLong(positionCount - 1))); @@ -318,7 +323,7 @@ public void testDoubleBlock() { assertThat((double) pos, is(block.getDouble(pos))); assertSingleValueDenseBlock(block); - try (DoubleBlock.Builder blockBuilder = DoubleBlock.newBlockBuilder(1)) { + try (DoubleBlock.Builder blockBuilder = blockFactory.newDoubleBlockBuilder(1)) { DoubleBlock copy = blockBuilder.copyFrom(block, 0, block.getPositionCount()).build(); assertThat(copy, equalTo(block)); releaseAndAssertBreaker(block, copy); @@ -327,8 +332,8 @@ public void testDoubleBlock() { if (positionCount > 1) { assertNullValues( positionCount, - size -> DoubleBlock.newBlockBuilder(size, blockFactory), - (bb, value) -> bb.appendDouble(value), + blockFactory::newDoubleBlockBuilder, + DoubleBlock.Builder::appendDouble, position -> (double) position, DoubleBlock.Builder::build, (randomNonNullPosition, b) -> { @@ -354,12 +359,7 @@ public void testConstantDoubleBlock() { for (int i = 0; i < 1000; i++) { int positionCount = randomIntBetween(1, 16 * 1024); double value = randomDouble(); - DoubleBlock block; - if (randomBoolean()) { - block = DoubleBlock.newConstantBlockWith(value, positionCount, blockFactory); - } else { - block = blockFactory.newConstantDoubleBlockWith(value, positionCount); - } + DoubleBlock block = blockFactory.newConstantDoubleBlockWith(value, positionCount); assertThat(positionCount, is(block.getPositionCount())); assertThat(value, is(block.getDouble(0))); assertThat(value, is(block.getDouble(positionCount - 1))); @@ -403,7 +403,7 @@ public void testBytesRefBlock() { } assertSingleValueDenseBlock(block); - try (BytesRefBlock.Builder blockBuilder = BytesRefBlock.newBlockBuilder(1)) { + try (BytesRefBlock.Builder blockBuilder = blockFactory.newBytesRefBlockBuilder(1)) { BytesRefBlock copy = blockBuilder.copyFrom(block, 0, block.getPositionCount()).build(); assertThat(copy, equalTo(block)); releaseAndAssertBreaker(block, copy); @@ -412,8 +412,8 @@ public void testBytesRefBlock() { if (positionCount > 1) { assertNullValues( positionCount, - size -> BytesRefBlock.newBlockBuilder(size, blockFactory), - (bb, value) -> bb.appendBytesRef(value), + blockFactory::newBytesRefBlockBuilder, + BytesRefBlock.Builder::appendBytesRef, position -> values[position], BytesRefBlock.Builder::build, (randomNonNullPosition, b) -> assertThat( @@ -481,12 +481,7 @@ public void testConstantBytesRefBlock() { for (int i = 0; i < 1000; i++) { int positionCount = randomIntBetween(1, 16 * 1024); BytesRef value = new BytesRef(randomByteArrayOfLength(between(1, 20))); - BytesRefBlock block; - if (randomBoolean()) { - block = BytesRefBlock.newConstantBlockWith(value, positionCount, blockFactory); - } else { - block = blockFactory.newConstantBytesRefBlockWith(value, positionCount); - } + BytesRefBlock block = blockFactory.newConstantBytesRefBlockWith(value, positionCount); assertThat(block.getPositionCount(), is(positionCount)); BytesRef bytes = new BytesRef(); @@ -524,7 +519,7 @@ public void testBooleanBlock() { assertThat(block.getBoolean(positionCount - 1), is((positionCount - 1) % 10 == 0)); assertSingleValueDenseBlock(block); - try (BooleanBlock.Builder blockBuilder = BooleanBlock.newBlockBuilder(1)) { + try (BooleanBlock.Builder blockBuilder = blockFactory.newBooleanBlockBuilder(1)) { BooleanBlock copy = blockBuilder.copyFrom(block, 0, block.getPositionCount()).build(); assertThat(copy, equalTo(block)); releaseAndAssertBreaker(block, copy); @@ -533,7 +528,7 @@ public void testBooleanBlock() { if (positionCount > 1) { assertNullValues( positionCount, - size -> BooleanBlock.newBlockBuilder(size, blockFactory), + size -> blockFactory.newBooleanBlockBuilder(size), (bb, value) -> bb.appendBoolean(value), position -> position % 10 == 0, BooleanBlock.Builder::build, @@ -557,12 +552,7 @@ public void testConstantBooleanBlock() { for (int i = 0; i < 1000; i++) { int positionCount = randomIntBetween(1, 16 * 1024); boolean value = randomBoolean(); - BooleanBlock block; - if (randomBoolean()) { - block = BooleanBlock.newConstantBlockWith(value, positionCount, blockFactory); - } else { - block = blockFactory.newConstantBooleanBlockWith(value, positionCount); - } + BooleanBlock block = blockFactory.newConstantBooleanBlockWith(value, positionCount); assertThat(positionCount, is(block.getPositionCount())); assertThat(block.getBoolean(0), is(value)); assertThat(block.getBoolean(positionCount - 1), is(value)); @@ -576,7 +566,7 @@ public void testConstantNullBlock() { for (int i = 0; i < 100; i++) { assertThat(breaker.getUsed(), is(0L)); int positionCount = randomIntBetween(1, 16 * 1024); - Block block = Block.constantNullBlock(positionCount, blockFactory); + Block block = blockFactory.newConstantNullBlock(positionCount); assertTrue(block.areAllValuesNull()); assertThat(block, instanceOf(BooleanBlock.class)); assertThat(block, instanceOf(IntBlock.class)); @@ -599,7 +589,7 @@ public void testConstantNullBlock() { public void testSingleValueSparseInt() { int positionCount = randomIntBetween(2, 16 * 1024); final int builderEstimateSize = randomBoolean() ? randomIntBetween(1, positionCount) : positionCount; - try (var blockBuilder = IntBlock.newBlockBuilder(builderEstimateSize)) { + try (var blockBuilder = blockFactory.newIntBlockBuilder(builderEstimateSize)) { int actualValueCount = 0; int[] values = new int[positionCount]; @@ -627,13 +617,14 @@ public void testSingleValueSparseInt() { } assertThat(block.nullValuesCount(), is(nullCount)); assertThat(block.asVector(), nullCount > 0 ? is(nullValue()) : is(notNullValue())); + block.close(); } } public void testSingleValueSparseLong() { int positionCount = randomIntBetween(2, 16 * 1024); final int builderEstimateSize = randomBoolean() ? randomIntBetween(1, positionCount) : positionCount; - try (var blockBuilder = LongBlock.newBlockBuilder(builderEstimateSize)) { + try (var blockBuilder = blockFactory.newLongBlockBuilder(builderEstimateSize)) { int actualValueCount = 0; long[] values = new long[positionCount]; @@ -660,13 +651,14 @@ public void testSingleValueSparseLong() { } assertThat(block.nullValuesCount(), is(nullCount)); assertThat(block.asVector(), nullCount > 0 ? is(nullValue()) : is(notNullValue())); + block.close(); } } public void testSingleValueSparseDouble() { int positionCount = randomIntBetween(2, 16 * 1024); final int builderEstimateSize = randomBoolean() ? randomIntBetween(1, positionCount) : positionCount; - try (var blockBuilder = DoubleBlock.newBlockBuilder(builderEstimateSize)) { + try (var blockBuilder = blockFactory.newDoubleBlockBuilder(builderEstimateSize)) { int actualValueCount = 0; double[] values = new double[positionCount]; @@ -693,13 +685,14 @@ public void testSingleValueSparseDouble() { } assertThat(block.nullValuesCount(), is(nullCount)); assertThat(block.asVector(), nullCount > 0 ? is(nullValue()) : is(notNullValue())); + block.close(); } } public void testSingleValueSparseBoolean() { int positionCount = randomIntBetween(2, 16 * 1024); final int builderEstimateSize = randomBoolean() ? randomIntBetween(1, positionCount) : positionCount; - try (var blockBuilder = BooleanBlock.newBlockBuilder(builderEstimateSize)) { + try (var blockBuilder = blockFactory.newBooleanBlockBuilder(builderEstimateSize)) { boolean[] values = new boolean[positionCount]; int actualValueCount = 0; @@ -726,6 +719,7 @@ public void testSingleValueSparseBoolean() { } assertThat(block.nullValuesCount(), is(nullCount)); assertThat(block.asVector(), nullCount > 0 ? is(nullValue()) : is(notNullValue())); + block.close(); } } @@ -733,8 +727,8 @@ public void testToStringSmall() { final int estimatedSize = randomIntBetween(1024, 4096); try ( - var boolBlock = BooleanBlock.newBlockBuilder(estimatedSize).appendBoolean(true).appendBoolean(false).build(); - var boolVector = BooleanVector.newVectorBuilder(estimatedSize).appendBoolean(true).appendBoolean(false).build() + var boolBlock = blockFactory.newBooleanBlockBuilder(estimatedSize).appendBoolean(true).appendBoolean(false).build(); + var boolVector = blockFactory.newBooleanVectorBuilder(estimatedSize).appendBoolean(true).appendBoolean(false).build() ) { for (Object obj : List.of(boolVector, boolBlock, boolBlock.asVector())) { String s = obj.toString(); @@ -744,8 +738,8 @@ public void testToStringSmall() { } try ( - var intBlock = IntBlock.newBlockBuilder(estimatedSize).appendInt(1).appendInt(2).build(); - var intVector = IntVector.newVectorBuilder(estimatedSize).appendInt(1).appendInt(2).build() + var intBlock = blockFactory.newIntBlockBuilder(estimatedSize).appendInt(1).appendInt(2).build(); + var intVector = blockFactory.newIntVectorBuilder(estimatedSize).appendInt(1).appendInt(2).build() ) { for (Object obj : List.of(intVector, intBlock, intBlock.asVector())) { String s = obj.toString(); @@ -753,25 +747,38 @@ public void testToStringSmall() { assertThat(s, containsString("positions=2")); } for (IntBlock block : List.of(intBlock, intVector.asBlock())) { - assertThat(block.filter(0).toString(), containsString("IntVectorBlock[vector=ConstantIntVector[positions=1, value=1]]")); - assertThat(block.filter(1).toString(), containsString("IntVectorBlock[vector=ConstantIntVector[positions=1, value=2]]")); - assertThat( - block.filter(0, 1).toString(), - containsString("IntVectorBlock[vector=IntArrayVector[positions=2, values=[1, 2]]]") - ); - assertThat(block.filter().toString(), containsString("IntVectorBlock[vector=IntArrayVector[positions=0, values=[]]]")); + try (var filter = block.filter(0)) { + assertThat(filter.toString(), containsString("IntVectorBlock[vector=ConstantIntVector[positions=1, value=1]]")); + } + try (var filter = block.filter(1)) { + assertThat(filter.toString(), containsString("IntVectorBlock[vector=ConstantIntVector[positions=1, value=2]]")); + } + try (var filter = block.filter(0, 1)) { + assertThat(filter.toString(), containsString("IntVectorBlock[vector=IntArrayVector[positions=2, values=[1, 2]]]")); + } + try (var filter = block.filter()) { + assertThat(filter.toString(), containsString("IntVectorBlock[vector=IntArrayVector[positions=0, values=[]]]")); + } } for (IntVector vector : List.of(intVector, intBlock.asVector())) { - assertThat(vector.filter(0).toString(), containsString("ConstantIntVector[positions=1, value=1]")); - assertThat(vector.filter(1).toString(), containsString("ConstantIntVector[positions=1, value=2]")); - assertThat(vector.filter(0, 1).toString(), containsString("IntArrayVector[positions=2, values=[1, 2]]")); - assertThat(vector.filter().toString(), containsString("IntArrayVector[positions=0, values=[]]")); + try (var filter = vector.filter(0)) { + assertThat(filter.toString(), containsString("ConstantIntVector[positions=1, value=1]")); + } + try (IntVector filter = vector.filter(1)) { + assertThat(filter.toString(), containsString("ConstantIntVector[positions=1, value=2]")); + } + try (IntVector filter = vector.filter(0, 1)) { + assertThat(filter.toString(), containsString("IntArrayVector[positions=2, values=[1, 2]]")); + } + try (IntVector filter = vector.filter()) { + assertThat(filter.toString(), containsString("IntArrayVector[positions=0, values=[]]")); + } } } try ( - var longBlock = LongBlock.newBlockBuilder(estimatedSize).appendLong(10L).appendLong(20L).build(); - var longVector = LongVector.newVectorBuilder(estimatedSize).appendLong(10L).appendLong(20L).build() + var longBlock = blockFactory.newLongBlockBuilder(estimatedSize).appendLong(10L).appendLong(20L).build(); + var longVector = blockFactory.newLongVectorBuilder(estimatedSize).appendLong(10L).appendLong(20L).build() ) { for (Object obj : List.of(longVector, longBlock, longBlock.asVector())) { String s = obj.toString(); @@ -781,8 +788,8 @@ public void testToStringSmall() { } try ( - var doubleBlock = DoubleBlock.newBlockBuilder(estimatedSize).appendDouble(3.3).appendDouble(4.4).build(); - var doubleVector = DoubleVector.newVectorBuilder(estimatedSize).appendDouble(3.3).appendDouble(4.4).build() + var doubleBlock = blockFactory.newDoubleBlockBuilder(estimatedSize).appendDouble(3.3).appendDouble(4.4).build(); + var doubleVector = blockFactory.newDoubleVectorBuilder(estimatedSize).appendDouble(3.3).appendDouble(4.4).build() ) { for (Object obj : List.of(doubleVector, doubleBlock, doubleBlock.asVector())) { String s = obj.toString(); @@ -793,8 +800,8 @@ public void testToStringSmall() { assert new BytesRef("1a").toString().equals("[31 61]") && new BytesRef("2b").toString().equals("[32 62]"); try ( - var blockBuilder = BytesRefBlock.newBlockBuilder(estimatedSize); - var vectorBuilder = BytesRefVector.newVectorBuilder(estimatedSize) + var blockBuilder = blockFactory.newBytesRefBlockBuilder(estimatedSize); + var vectorBuilder = blockFactory.newBytesRefVectorBuilder(estimatedSize) ) { var bytesRefBlock = blockBuilder.appendBytesRef(new BytesRef("1a")).appendBytesRef(new BytesRef("2b")).build(); var bytesRefVector = vectorBuilder.appendBytesRef(new BytesRef("1a")).appendBytesRef(new BytesRef("2b")).build(); @@ -802,6 +809,7 @@ public void testToStringSmall() { String s = obj.toString(); assertThat(s, containsString("positions=2")); } + Releasables.close(bytesRefBlock, bytesRefVector); } } @@ -846,7 +854,7 @@ public static RandomBlock randomBlock( int maxDupsPerPosition ) { return randomBlock( - BlockFactory.getNonBreakingInstance(), + TestBlockFactory.getNonBreakingInstance(), elementType, positionCount, nullAllowed, @@ -1000,13 +1008,7 @@ void releaseAndAssertBreaker(Vector vector) { static void assertCannotDoubleRelease(Block block) { var ex = expectThrows(IllegalStateException.class, () -> block.close()); - assertThat(ex.getMessage(), containsString("can't release already released block")); - } - - static void assertCannotReleaseIfVectorAlreadyReleased(Block block) { - var ex = expectThrows(IllegalStateException.class, () -> block.close()); - assertThat(ex.getMessage(), containsString("can't release block")); - assertThat(ex.getMessage(), containsString("containing already released vector")); + assertThat(ex.getMessage(), containsString("can't release already released object")); } static void assertCannotReadFromPage(Page page) { @@ -1041,6 +1043,13 @@ public void testRefCountingArrayBlock() { assertThat(breaker.getUsed(), is(0L)); } + public void testRefCountingBigArrayBlock() { + Block block = randomBigArrayBlock(); + assertThat(breaker.getUsed(), greaterThan(0L)); + assertRefCountingBehavior(block); + assertThat(breaker.getUsed(), is(0L)); + } + public void testRefCountingConstantNullBlock() { Block block = blockFactory.newConstantNullBlock(10); assertThat(breaker.getUsed(), greaterThan(0L)); @@ -1057,83 +1066,165 @@ public void testRefCountingDocBlock() { } public void testRefCountingVectorBlock() { - Block block = randomNonDocVector().asBlock(); + Block block = randomConstantVector().asBlock(); assertThat(breaker.getUsed(), greaterThan(0L)); assertRefCountingBehavior(block); assertThat(breaker.getUsed(), is(0L)); } - // Take a block with exactly 1 reference and assert that ref counting works fine. - static void assertRefCountingBehavior(Block b) { - assertTrue(b.hasReferences()); + public void testRefCountingArrayVector() { + Vector vector = randomArrayVector(); + assertThat(breaker.getUsed(), greaterThan(0L)); + assertRefCountingBehavior(vector); + assertThat(breaker.getUsed(), is(0L)); + } + + public void testRefCountingBigArrayVector() { + Vector vector = randomBigArrayVector(); + assertThat(breaker.getUsed(), greaterThan(0L)); + assertRefCountingBehavior(vector); + assertThat(breaker.getUsed(), is(0L)); + } + + public void testRefCountingConstantVector() { + Vector vector = randomConstantVector(); + assertThat(breaker.getUsed(), greaterThan(0L)); + assertRefCountingBehavior(vector); + assertThat(breaker.getUsed(), is(0L)); + } + + public void testRefCountingDocVector() { + int positionCount = randomIntBetween(0, 100); + DocVector vector = new DocVector(intVector(positionCount), intVector(positionCount), intVector(positionCount), true); + assertThat(breaker.getUsed(), greaterThan(0L)); + assertRefCountingBehavior(vector); + assertThat(breaker.getUsed(), is(0L)); + } + + /** + * Take an object with exactly 1 reference and assert that ref counting works fine. + * Assumes that {@link Releasable#close()} and {@link RefCounted#decRef()} are equivalent. + */ + static void assertRefCountingBehavior(T object) { + assertTrue(object.hasReferences()); int numShallowCopies = randomIntBetween(0, 15); for (int i = 0; i < numShallowCopies; i++) { if (randomBoolean()) { - b.incRef(); + object.incRef(); } else { - assertTrue(b.tryIncRef()); + assertTrue(object.tryIncRef()); } } for (int i = 0; i < numShallowCopies; i++) { if (randomBoolean()) { - b.close(); + object.close(); } else { // closing and decRef'ing must be equivalent - assertFalse(b.decRef()); + assertFalse(object.decRef()); } - assertTrue(b.hasReferences()); + assertTrue(object.hasReferences()); } if (randomBoolean()) { - b.close(); + object.close(); } else { - assertTrue(b.decRef()); + assertTrue(object.decRef()); } - assertFalse(b.hasReferences()); - assertFalse(b.tryIncRef()); + assertFalse(object.hasReferences()); + assertFalse(object.tryIncRef()); - expectThrows(IllegalStateException.class, b::close); - expectThrows(IllegalStateException.class, b::incRef); + expectThrows(IllegalStateException.class, object::close); + expectThrows(IllegalStateException.class, object::incRef); } - public void testReleasedVectorInvalidatesBlockState() { - Vector vector = randomNonDocVector(); - Block block = vector.asBlock(); - - int numRefs = randomIntBetween(1, 10); - for (int i = 0; i < numRefs - 1; i++) { - block.incRef(); - } - - vector.close(); - assertEquals(false, block.tryIncRef()); - expectThrows(IllegalStateException.class, block::close); - expectThrows(IllegalStateException.class, block::incRef); + private IntVector intVector(int positionCount) { + return blockFactory.newIntArrayVector(IntStream.range(0, positionCount).toArray(), positionCount); } - public void testReleasedDocVectorInvalidatesBlockState() { + private Vector randomArrayVector() { int positionCount = randomIntBetween(0, 100); - DocVector vector = new DocVector(intVector(positionCount), intVector(positionCount), intVector(positionCount), true); - DocBlock block = vector.asBlock(); + int vectorType = randomIntBetween(0, 4); - int numRefs = randomIntBetween(1, 10); - for (int i = 0; i < numRefs - 1; i++) { - block.incRef(); - } + return switch (vectorType) { + case 0 -> { + boolean[] values = new boolean[positionCount]; + Arrays.fill(values, randomBoolean()); + yield blockFactory.newBooleanArrayVector(values, positionCount); + } + case 1 -> { + BytesRefArray values = new BytesRefArray(positionCount, BigArrays.NON_RECYCLING_INSTANCE); + for (int i = 0; i < positionCount; i++) { + values.append(new BytesRef(randomByteArrayOfLength(between(1, 20)))); + } - vector.close(); - assertEquals(false, block.tryIncRef()); - expectThrows(IllegalStateException.class, block::close); - expectThrows(IllegalStateException.class, block::incRef); + yield blockFactory.newBytesRefArrayVector(values, positionCount); + } + case 2 -> { + double[] values = new double[positionCount]; + Arrays.fill(values, 1.0); + + yield blockFactory.newDoubleArrayVector(values, positionCount); + } + case 3 -> { + int[] values = new int[positionCount]; + Arrays.fill(values, 1); + + yield blockFactory.newIntArrayVector(values, positionCount); + } + default -> { + long[] values = new long[positionCount]; + Arrays.fill(values, 1L); + + yield blockFactory.newLongArrayVector(values, positionCount); + } + }; } - private IntVector intVector(int positionCount) { - return blockFactory.newIntArrayVector(IntStream.range(0, positionCount).toArray(), positionCount); + private Vector randomBigArrayVector() { + int positionCount = randomIntBetween(0, 10000); + int arrayType = randomIntBetween(0, 3); + + return switch (arrayType) { + case 0 -> { + BitArray values = new BitArray(positionCount, blockFactory.bigArrays()); + for (int i = 0; i < positionCount; i++) { + if (randomBoolean()) { + values.set(positionCount); + } + } + + yield new BooleanBigArrayVector(values, positionCount, blockFactory); + } + case 1 -> { + DoubleArray values = blockFactory.bigArrays().newDoubleArray(positionCount, false); + for (int i = 0; i < positionCount; i++) { + values.set(i, randomDouble()); + } + + yield new DoubleBigArrayVector(values, positionCount, blockFactory); + } + case 2 -> { + IntArray values = blockFactory.bigArrays().newIntArray(positionCount, false); + for (int i = 0; i < positionCount; i++) { + values.set(i, randomInt()); + } + + yield new IntBigArrayVector(values, positionCount, blockFactory); + } + default -> { + LongArray values = blockFactory.bigArrays().newLongArray(positionCount, false); + for (int i = 0; i < positionCount; i++) { + values.set(i, randomLong()); + } + + yield new LongBigArrayVector(values, positionCount, blockFactory); + } + }; } - private Vector randomNonDocVector() { + private Vector randomConstantVector() { int positionCount = randomIntBetween(0, 100); int vectorType = randomIntBetween(0, 4); @@ -1153,7 +1244,7 @@ private Block randomArrayBlock() { return switch (arrayType) { case 0 -> { boolean[] values = new boolean[positionCount]; - Arrays.fill(values, true); + Arrays.fill(values, randomBoolean()); yield blockFactory.newBooleanArrayBlock(values, positionCount, new int[] {}, new BitSet(), randomOrdering()); } @@ -1185,4 +1276,46 @@ private Block randomArrayBlock() { } }; } + + private Block randomBigArrayBlock() { + int positionCount = randomIntBetween(0, 10000); + int arrayType = randomIntBetween(0, 3); + + return switch (arrayType) { + case 0 -> { + BitArray values = new BitArray(positionCount, blockFactory.bigArrays()); + for (int i = 0; i < positionCount; i++) { + if (randomBoolean()) { + values.set(positionCount); + } + } + + yield new BooleanBigArrayBlock(values, positionCount, null, new BitSet(), Block.MvOrdering.UNORDERED, blockFactory); + } + case 1 -> { + DoubleArray values = blockFactory.bigArrays().newDoubleArray(positionCount, false); + for (int i = 0; i < positionCount; i++) { + values.set(i, randomDouble()); + } + + yield new DoubleBigArrayBlock(values, positionCount, null, new BitSet(), Block.MvOrdering.UNORDERED, blockFactory); + } + case 2 -> { + IntArray values = blockFactory.bigArrays().newIntArray(positionCount, false); + for (int i = 0; i < positionCount; i++) { + values.set(i, randomInt()); + } + + yield new IntBigArrayBlock(values, positionCount, null, new BitSet(), Block.MvOrdering.UNORDERED, blockFactory); + } + default -> { + LongArray values = blockFactory.bigArrays().newLongArray(positionCount, false); + for (int i = 0; i < positionCount; i++) { + values.set(i, randomLong()); + } + + yield new LongBigArrayBlock(values, positionCount, null, new BitSet(), Block.MvOrdering.UNORDERED, blockFactory); + } + }; + } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicPageTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicPageTests.java index 25cd9ed5b9fe5..f76ff0708120b 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicPageTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicPageTests.java @@ -41,41 +41,72 @@ public void testExceptions() { } public void testEqualityAndHashCodeSmallInput() { + Page in = new Page(0); EqualsHashCodeTestUtils.checkEqualsAndHashCode( - new Page(0, new Block[] {}), - page -> new Page(0, new Block[] {}), - page -> new Page(1, IntBlock.newConstantBlockWith(1, 1)) + in, + page -> new Page(0), + page -> new Page(1, blockFactory.newConstantIntBlockWith(1, 1)), + Page::releaseBlocks ); + in.releaseBlocks(); + + in = new Page(blockFactory.newIntArrayVector(new int[] {}, 0).asBlock()); EqualsHashCodeTestUtils.checkEqualsAndHashCode( - new Page(new IntArrayVector(new int[] {}, 0).asBlock()), - page -> new Page(new IntArrayVector(new int[] {}, 0).asBlock()), - page -> new Page(new IntArrayVector(new int[] { 1 }, 1).asBlock()) + in, + page -> new Page(blockFactory.newIntArrayVector(new int[] {}, 0).asBlock()), + page -> new Page(blockFactory.newIntArrayVector(new int[] { 1 }, 1).asBlock()), + Page::releaseBlocks ); + in.releaseBlocks(); + + in = new Page(blockFactory.newIntArrayVector(new int[] { 1 }, 0).asBlock()); EqualsHashCodeTestUtils.checkEqualsAndHashCode( - new Page(new IntArrayVector(new int[] { 1 }, 0).asBlock()), - page -> new Page(new IntArrayVector(new int[] { 1 }, 0).asBlock()), - page -> new Page(new IntArrayVector(new int[] { 1 }, 1).asBlock()) + in, + page -> new Page(blockFactory.newIntArrayVector(new int[] { 1 }, 0).asBlock()), + page -> new Page(blockFactory.newIntArrayVector(new int[] { 1 }, 1).asBlock()), + Page::releaseBlocks ); + in.releaseBlocks(); + + in = new Page(blockFactory.newIntArrayVector(new int[] { 1, 1, 1 }, 3).asBlock()); EqualsHashCodeTestUtils.checkEqualsAndHashCode( - new Page(new IntArrayVector(new int[] { 1, 1, 1 }, 3).asBlock()), - page -> new Page(IntBlock.newConstantBlockWith(1, 3)), - page -> new Page(IntBlock.newConstantBlockWith(1, 2)) + in, + page -> new Page(blockFactory.newConstantIntBlockWith(1, 3)), + page -> new Page(blockFactory.newConstantIntBlockWith(1, 2)), + Page::releaseBlocks ); + in.releaseBlocks(); + + in = new Page(blockFactory.newIntArrayVector(IntStream.range(0, 10).toArray(), 10).asBlock()); EqualsHashCodeTestUtils.checkEqualsAndHashCode( - new Page(new IntArrayVector(IntStream.range(0, 10).toArray(), 10).asBlock()), - page -> new Page(new IntArrayVector(IntStream.range(0, 10).toArray(), 10).asBlock()), - page -> new Page(new IntArrayVector(IntStream.range(0, 10).toArray(), 9).asBlock()) + in, + page -> new Page(blockFactory.newIntArrayVector(IntStream.range(0, 10).toArray(), 10).asBlock()), + page -> new Page(blockFactory.newIntArrayVector(IntStream.range(0, 10).toArray(), 9).asBlock()), + Page::releaseBlocks ); + in.releaseBlocks(); + + in = new Page(blockFactory.newIntArrayVector(IntStream.range(0, 100).toArray(), 100).asBlock()); EqualsHashCodeTestUtils.checkEqualsAndHashCode( - new Page(new IntArrayVector(IntStream.range(0, 100).toArray(), 100).asBlock()), - page -> new Page(new IntArrayVector(IntStream.range(0, 100).toArray(), 100).asBlock()), - page -> new Page(new LongArrayVector(LongStream.range(0, 100).toArray(), 100).asBlock()) + in, + page -> new Page(blockFactory.newIntArrayVector(IntStream.range(0, 100).toArray(), 100).asBlock()), + page -> new Page(blockFactory.newLongArrayVector(LongStream.range(0, 100).toArray(), 100).asBlock()), + Page::releaseBlocks ); - EqualsHashCodeTestUtils.checkEqualsAndHashCode( - new Page(new IntArrayVector(new int[] { 1 }, 1).asBlock()), - page -> new Page(1, page.getBlock(0)), - page -> new Page(new IntArrayVector(new int[] { 1 }, 1).asBlock(), new IntArrayVector(new int[] { 1 }, 1).asBlock()) + in.releaseBlocks(); + + in = new Page(blockFactory.newIntArrayVector(new int[] { 1 }, 1).asBlock()); + EqualsHashCodeTestUtils.checkEqualsAndHashCode(in, page -> { + page.getBlock(0).incRef(); + return new Page(1, page.getBlock(0)); + }, + page -> new Page( + blockFactory.newIntArrayVector(new int[] { 1 }, 1).asBlock(), + blockFactory.newIntArrayVector(new int[] { 1 }, 1).asBlock() + ), + Page::releaseBlocks ); + in.releaseBlocks(); } public void testEqualityAndHashCode() throws IOException { @@ -93,7 +124,10 @@ public void testEqualityAndHashCode() throws IOException { int positions = randomInt(page.getPositionCount() - 1); for (int blockIndex = 0; blockIndex < blocks.length; blockIndex++) { Block block = page.getBlock(blockIndex); - blocks[blockIndex] = block.elementType().newBlockBuilder(positions).copyFrom(block, 0, page.getPositionCount() - 1).build(); + blocks[blockIndex] = block.elementType() + .newBlockBuilder(positions, TestBlockFactory.getNonBreakingInstance()) + .copyFrom(block, 0, page.getPositionCount() - 1) + .build(); } return new Page(blocks); }; @@ -103,13 +137,13 @@ public void testEqualityAndHashCode() throws IOException { Block[] blocks = new Block[blockCount]; for (int blockIndex = 0; blockIndex < blockCount; blockIndex++) { blocks[blockIndex] = switch (randomInt(6)) { - case 0 -> new IntArrayVector(randomInts(positions).toArray(), positions).asBlock(); - case 1 -> new LongArrayVector(randomLongs(positions).toArray(), positions).asBlock(); - case 2 -> new DoubleArrayVector(randomDoubles(positions).toArray(), positions).asBlock(); - case 3 -> IntBlock.newConstantBlockWith(randomInt(), positions); - case 4 -> LongBlock.newConstantBlockWith(randomLong(), positions); - case 5 -> DoubleBlock.newConstantBlockWith(randomDouble(), positions); - case 6 -> BytesRefBlock.newConstantBlockWith(new BytesRef(Integer.toHexString(randomInt())), positions); + case 0 -> blockFactory.newIntArrayVector(randomInts(positions).toArray(), positions).asBlock(); + case 1 -> blockFactory.newLongArrayVector(randomLongs(positions).toArray(), positions).asBlock(); + case 2 -> blockFactory.newDoubleArrayVector(randomDoubles(positions).toArray(), positions).asBlock(); + case 3 -> blockFactory.newConstantIntBlockWith(randomInt(), positions); + case 4 -> blockFactory.newConstantLongBlockWith(randomLong(), positions); + case 5 -> blockFactory.newConstantDoubleBlockWith(randomDouble(), positions); + case 6 -> blockFactory.newConstantBytesRefBlockWith(new BytesRef(Integer.toHexString(randomInt())), positions); default -> throw new AssertionError(); }; } @@ -125,36 +159,40 @@ public void testEqualityAndHashCode() throws IOException { public void testBasic() { int positions = randomInt(1024); - Page page = new Page(new IntArrayVector(IntStream.range(0, positions).toArray(), positions).asBlock()); + Page page = new Page(blockFactory.newIntArrayVector(IntStream.range(0, positions).toArray(), positions).asBlock()); assertThat(1, is(page.getBlockCount())); assertThat(positions, is(page.getPositionCount())); IntBlock block = page.getBlock(0); IntStream.range(0, positions).forEach(i -> assertThat(i, is(block.getInt(i)))); + page.releaseBlocks(); } public void testAppend() { - Page page1 = new Page(new IntArrayVector(IntStream.range(0, 10).toArray(), 10).asBlock()); - Page page2 = page1.appendBlock(new LongArrayVector(LongStream.range(0, 10).toArray(), 10).asBlock()); + Page page1 = new Page(blockFactory.newIntArrayVector(IntStream.range(0, 10).toArray(), 10).asBlock()); + Page page2 = page1.appendBlock(blockFactory.newLongArrayVector(LongStream.range(0, 10).toArray(), 10).asBlock()); assertThat(1, is(page1.getBlockCount())); assertThat(2, is(page2.getBlockCount())); IntBlock block1 = page2.getBlock(0); IntStream.range(0, 10).forEach(i -> assertThat(i, is(block1.getInt(i)))); LongBlock block2 = page2.getBlock(1); IntStream.range(0, 10).forEach(i -> assertThat((long) i, is(block2.getLong(i)))); + page2.releaseBlocks(); } public void testPageSerializationSimple() throws IOException { + IntVector toFilter = blockFactory.newIntArrayVector(IntStream.range(0, 20).toArray(), 20); Page origPage = new Page( - new IntArrayVector(IntStream.range(0, 10).toArray(), 10).asBlock(), - new LongArrayVector(LongStream.range(10, 20).toArray(), 10).asBlock(), - new DoubleArrayVector(LongStream.range(30, 40).mapToDouble(i -> i).toArray(), 10).asBlock(), - new BytesRefArrayVector(bytesRefArrayOf("0a", "1b", "2c", "3d", "4e", "5f", "6g", "7h", "8i", "9j"), 10).asBlock(), - IntBlock.newConstantBlockWith(randomInt(), 10), - LongBlock.newConstantBlockWith(randomInt(), 10), - DoubleBlock.newConstantBlockWith(randomInt(), 10), - BytesRefBlock.newConstantBlockWith(new BytesRef(Integer.toHexString(randomInt())), 10), - new IntArrayVector(IntStream.range(0, 20).toArray(), 20).filter(5, 6, 7, 8, 9, 10, 11, 12, 13, 14).asBlock() + blockFactory.newIntArrayVector(IntStream.range(0, 10).toArray(), 10).asBlock(), + blockFactory.newLongArrayVector(LongStream.range(10, 20).toArray(), 10).asBlock(), + blockFactory.newDoubleArrayVector(LongStream.range(30, 40).mapToDouble(i -> i).toArray(), 10).asBlock(), + blockFactory.newBytesRefArrayVector(bytesRefArrayOf("0a", "1b", "2c", "3d", "4e", "5f", "6g", "7h", "8i", "9j"), 10).asBlock(), + blockFactory.newConstantIntBlockWith(randomInt(), 10), + blockFactory.newConstantLongBlockWith(randomLong(), 10), + blockFactory.newConstantDoubleBlockWith(randomDouble(), 10), + blockFactory.newConstantBytesRefBlockWith(new BytesRef(Integer.toHexString(randomInt())), 10), + toFilter.filter(5, 6, 7, 8, 9, 10, 11, 12, 13, 14).asBlock() ); + toFilter.close(); try { Page deserPage = serializeDeserializePage(origPage); try { @@ -177,12 +215,12 @@ public void testPageSerializationSimple() throws IOException { public void testSerializationListPages() throws IOException { final int positions = randomIntBetween(1, 64); List origPages = List.of( - new Page(new IntArrayVector(randomInts(positions).toArray(), positions).asBlock()), + new Page(blockFactory.newIntArrayVector(randomInts(positions).toArray(), positions).asBlock()), new Page( - new LongArrayVector(randomLongs(positions).toArray(), positions).asBlock(), - DoubleBlock.newConstantBlockWith(randomInt(), positions) + blockFactory.newLongArrayVector(randomLongs(positions).toArray(), positions).asBlock(), + blockFactory.newConstantDoubleBlockWith(randomInt(), positions) ), - new Page(BytesRefBlock.newConstantBlockWith(new BytesRef("Hello World"), positions)) + new Page(blockFactory.newConstantBytesRefBlockWith(new BytesRef("Hello World"), positions)) ); try { EqualsHashCodeTestUtils.checkEqualsAndHashCode(origPages, page -> { @@ -198,7 +236,7 @@ public void testSerializationListPages() throws IOException { public void testPageMultiRelease() { int positions = randomInt(1024); - var block = new IntArrayVector(IntStream.range(0, positions).toArray(), positions).asBlock(); + var block = blockFactory.newIntArrayVector(IntStream.range(0, positions).toArray(), positions).asBlock(); Page page = new Page(block); page.releaseBlocks(); assertThat(block.isReleased(), is(true)); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BigArrayBlockBuilderTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BigArrayBlockBuilderTests.java new file mode 100644 index 0000000000000..0fd78fd3cb9bf --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BigArrayBlockBuilderTests.java @@ -0,0 +1,260 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data; + +import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.common.unit.ByteSizeValue; + +import java.io.IOException; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; + +public class BigArrayBlockBuilderTests extends SerializationTestCase { + + static ByteSizeValue estimateArraySize(long elementSize, long numElements) { + long bytes = RamUsageEstimator.NUM_BYTES_ARRAY_HEADER + RamUsageEstimator.alignObjectSize(elementSize * numElements); + return ByteSizeValue.ofBytes(bytes); + } + + public void testLongVector() throws IOException { + int maxPrimitiveElements = randomIntBetween(100, 1000); + var maxPrimitiveSize = estimateArraySize(Long.BYTES, maxPrimitiveElements); + blockFactory = new BlockFactory(blockFactory.breaker(), blockFactory.bigArrays(), maxPrimitiveSize); + int numElements = between(2, maxPrimitiveElements / 2); + try (var builder = blockFactory.newLongBlockBuilder(between(1, maxPrimitiveElements / 2))) { + long[] elements = new long[numElements]; + for (int i = 0; i < numElements; i++) { + elements[i] = randomLong(); + builder.appendLong(elements[i]); + } + try (LongBlock block = builder.build()) { + assertThat(block, instanceOf(LongVectorBlock.class)); + assertThat(block.asVector(), instanceOf(LongArrayVector.class)); + assertThat(block.getPositionCount(), equalTo(numElements)); + for (int i = 0; i < numElements; i++) { + assertThat(block.getLong(i), equalTo(elements[i])); + } + try (LongBlock copy = serializeDeserializeBlock(block)) { + assertThat(copy, instanceOf(LongVectorBlock.class)); + assertThat(block.asVector(), instanceOf(LongArrayVector.class)); + assertThat(copy.getPositionCount(), equalTo(numElements)); + for (int i = 0; i < numElements; i++) { + assertThat(copy.getLong(i), equalTo(elements[i])); + } + } + } + } + assertThat(blockFactory.breaker().getUsed(), equalTo(0L)); + numElements = between(maxPrimitiveElements + 10, maxPrimitiveElements * 2); + try (var builder = blockFactory.newLongBlockBuilder(between(1, maxPrimitiveElements * 2))) { + long[] elements = new long[numElements]; + for (int i = 0; i < numElements; i++) { + elements[i] = randomLong(); + builder.appendLong(elements[i]); + } + try (LongBlock block = builder.build()) { + assertThat(block, instanceOf(LongVectorBlock.class)); + assertThat(block.asVector(), instanceOf(LongBigArrayVector.class)); + assertThat(block.getPositionCount(), equalTo(numElements)); + for (int i = 0; i < numElements; i++) { + assertThat(block.getLong(i), equalTo(elements[i])); + } + try (LongBlock copy = serializeDeserializeBlock(block)) { + assertThat(copy, instanceOf(LongVectorBlock.class)); + assertThat(block.asVector(), instanceOf(LongBigArrayVector.class)); + assertThat(copy.getPositionCount(), equalTo(numElements)); + for (int i = 0; i < numElements; i++) { + assertThat(copy.getLong(i), equalTo(elements[i])); + } + } + } + } + assertThat(blockFactory.breaker().getUsed(), equalTo(0L)); + } + + public void testLongBlock() throws IOException { + int maxPrimitiveElements = randomIntBetween(1000, 5000); + var maxPrimitiveSize = estimateArraySize(Long.BYTES, maxPrimitiveElements); + blockFactory = new BlockFactory(blockFactory.breaker(), blockFactory.bigArrays(), maxPrimitiveSize); + int numElements = between(2, maxPrimitiveElements / 2); + try (var builder = blockFactory.newLongBlockBuilder(between(1, maxPrimitiveElements / 2))) { + long[] elements = new long[numElements]; + builder.beginPositionEntry(); + for (int i = 0; i < numElements; i++) { + elements[i] = randomLong(); + builder.appendLong(elements[i]); + } + builder.endPositionEntry(); + try (LongBlock block = builder.build()) { + assertThat(block, instanceOf(LongArrayBlock.class)); + assertNull(block.asVector()); + assertThat(block.getPositionCount(), equalTo(1)); + for (int i = 0; i < numElements; i++) { + assertThat(block.getLong(i), equalTo(elements[i])); + } + try (LongBlock copy = serializeDeserializeBlock(block)) { + assertThat(copy, instanceOf(LongArrayBlock.class)); + assertNull(copy.asVector()); + assertThat(copy.getPositionCount(), equalTo(1)); + for (int i = 0; i < numElements; i++) { + assertThat(copy.getLong(i), equalTo(elements[i])); + } + } + } + } + assertThat(blockFactory.breaker().getUsed(), equalTo(0L)); + numElements = between(maxPrimitiveElements + 10, maxPrimitiveElements * 2); + try (var builder = blockFactory.newLongBlockBuilder(between(1, maxPrimitiveElements * 2))) { + long[] elements = new long[numElements]; + builder.beginPositionEntry(); + for (int i = 0; i < numElements; i++) { + elements[i] = randomLong(); + builder.appendLong(elements[i]); + } + builder.endPositionEntry(); + try (LongBlock block = builder.build()) { + assertThat(block, instanceOf(LongBigArrayBlock.class)); + assertNull(block.asVector()); + assertThat(block.getPositionCount(), equalTo(1)); + assertThat(block.getTotalValueCount(), equalTo(numElements)); + for (int i = 0; i < numElements; i++) { + assertThat(block.getLong(i), equalTo(elements[i])); + } + try (LongBlock copy = serializeDeserializeBlock(block)) { + assertThat(copy, instanceOf(LongBigArrayBlock.class)); + assertNull(block.asVector()); + assertThat(copy.getPositionCount(), equalTo(1)); + assertThat(copy.getTotalValueCount(), equalTo(numElements)); + for (int i = 0; i < numElements; i++) { + assertThat(copy.getLong(i), equalTo(elements[i])); + } + } + } + } + assertThat(blockFactory.breaker().getUsed(), equalTo(0L)); + } + + public void testBooleanVector() throws IOException { + int maxPrimitiveElements = randomIntBetween(100, 1000); + var maxPrimitiveSize = estimateArraySize(Byte.BYTES, maxPrimitiveElements); + blockFactory = new BlockFactory(blockFactory.breaker(), blockFactory.bigArrays(), maxPrimitiveSize); + int numElements = between(2, maxPrimitiveElements / 2); + try (var builder = blockFactory.newBooleanBlockBuilder(between(1, maxPrimitiveElements / 2))) { + boolean[] elements = new boolean[numElements]; + for (int i = 0; i < numElements; i++) { + elements[i] = randomBoolean(); + builder.appendBoolean(elements[i]); + } + try (var block = builder.build()) { + assertThat(block, instanceOf(BooleanVectorBlock.class)); + assertThat(block.asVector(), instanceOf(BooleanArrayVector.class)); + assertThat(block.getPositionCount(), equalTo(numElements)); + for (int i = 0; i < numElements; i++) { + assertThat(block.getBoolean(i), equalTo(elements[i])); + } + try (var copy = serializeDeserializeBlock(block)) { + assertThat(copy, instanceOf(BooleanVectorBlock.class)); + assertThat(block.asVector(), instanceOf(BooleanArrayVector.class)); + assertThat(copy.getPositionCount(), equalTo(numElements)); + for (int i = 0; i < numElements; i++) { + assertThat(copy.getBoolean(i), equalTo(elements[i])); + } + } + } + } + assertThat(blockFactory.breaker().getUsed(), equalTo(0L)); + numElements = between(maxPrimitiveElements + 10, maxPrimitiveElements * 2); + try (var builder = blockFactory.newBooleanBlockBuilder(between(1, maxPrimitiveElements * 2))) { + boolean[] elements = new boolean[numElements]; + for (int i = 0; i < numElements; i++) { + elements[i] = randomBoolean(); + builder.appendBoolean(elements[i]); + } + try (var block = builder.build()) { + assertThat(block, instanceOf(BooleanVectorBlock.class)); + assertThat(block.asVector(), instanceOf(BooleanBigArrayVector.class)); + assertThat(block.getPositionCount(), equalTo(numElements)); + for (int i = 0; i < numElements; i++) { + assertThat(block.getBoolean(i), equalTo(elements[i])); + } + try (var copy = serializeDeserializeBlock(block)) { + assertThat(copy, instanceOf(BooleanVectorBlock.class)); + assertThat(block.asVector(), instanceOf(BooleanBigArrayVector.class)); + assertThat(copy.getPositionCount(), equalTo(numElements)); + for (int i = 0; i < numElements; i++) { + assertThat(copy.getBoolean(i), equalTo(elements[i])); + } + } + } + } + assertThat(blockFactory.breaker().getUsed(), equalTo(0L)); + } + + public void testBooleanBlock() throws IOException { + int maxPrimitiveElements = randomIntBetween(1000, 5000); + var maxPrimitiveSize = estimateArraySize(Byte.BYTES, maxPrimitiveElements); + blockFactory = new BlockFactory(blockFactory.breaker(), blockFactory.bigArrays(), maxPrimitiveSize); + int numElements = between(2, maxPrimitiveElements / 2); + try (var builder = blockFactory.newBooleanBlockBuilder(between(1, maxPrimitiveElements / 2))) { + boolean[] elements = new boolean[numElements]; + builder.beginPositionEntry(); + for (int i = 0; i < numElements; i++) { + elements[i] = randomBoolean(); + builder.appendBoolean(elements[i]); + } + builder.endPositionEntry(); + try (var block = builder.build()) { + assertThat(block, instanceOf(BooleanArrayBlock.class)); + assertNull(block.asVector()); + assertThat(block.getPositionCount(), equalTo(1)); + for (int i = 0; i < numElements; i++) { + assertThat(block.getBoolean(i), equalTo(elements[i])); + } + try (var copy = serializeDeserializeBlock(block)) { + assertThat(copy, instanceOf(BooleanArrayBlock.class)); + assertNull(copy.asVector()); + assertThat(copy.getPositionCount(), equalTo(1)); + for (int i = 0; i < numElements; i++) { + assertThat(copy.getBoolean(i), equalTo(elements[i])); + } + } + } + } + assertThat(blockFactory.breaker().getUsed(), equalTo(0L)); + numElements = between(maxPrimitiveElements + 10, maxPrimitiveElements * 2); + try (var builder = blockFactory.newBooleanBlockBuilder(between(1, maxPrimitiveElements * 2))) { + boolean[] elements = new boolean[numElements]; + builder.beginPositionEntry(); + for (int i = 0; i < numElements; i++) { + elements[i] = randomBoolean(); + builder.appendBoolean(elements[i]); + } + builder.endPositionEntry(); + try (var block = builder.build()) { + assertThat(block, instanceOf(BooleanBigArrayBlock.class)); + assertNull(block.asVector()); + assertThat(block.getPositionCount(), equalTo(1)); + assertThat(block.getTotalValueCount(), equalTo(numElements)); + for (int i = 0; i < numElements; i++) { + assertThat(block.getBoolean(i), equalTo(elements[i])); + } + try (var copy = serializeDeserializeBlock(block)) { + assertThat(copy, instanceOf(BooleanBigArrayBlock.class)); + assertNull(block.asVector()); + assertThat(copy.getPositionCount(), equalTo(1)); + assertThat(copy.getTotalValueCount(), equalTo(numElements)); + for (int i = 0; i < numElements; i++) { + assertThat(copy.getBoolean(i), equalTo(elements[i])); + } + } + } + } + assertThat(blockFactory.breaker().getUsed(), equalTo(0L)); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BigArrayVectorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BigArrayVectorTests.java index 3033f672f897f..74d7e3e142d04 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BigArrayVectorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BigArrayVectorTests.java @@ -32,14 +32,15 @@ public void testBoolean() throws IOException { Boolean[] values = IntStream.range(0, positionCount).mapToObj(i -> randomBoolean()).toArray(Boolean[]::new); BitArray array = new BitArray(positionCount, bigArrays); IntStream.range(0, positionCount).filter(i -> values[i]).forEach(array::set); - try (var vector = new BooleanBigArrayVector(array, positionCount)) { + try (var vector = new BooleanBigArrayVector(array, positionCount, blockFactory)) { assertThat(vector.elementType(), is(ElementType.BOOLEAN)); assertThat(positionCount, is(vector.getPositionCount())); IntStream.range(0, positionCount).forEach(i -> assertThat(vector.getBoolean(i), is(values[i]))); assertThat(vector.isConstant(), is(false)); - BooleanVector filtered = vector.filter(IntStream.range(0, positionCount).toArray()); - IntStream.range(0, positionCount).forEach(i -> assertThat(filtered.getBoolean(i), is(values[i]))); - assertThat(filtered.isConstant(), is(false)); + try (BooleanVector filtered = vector.filter(IntStream.range(0, positionCount).toArray())) { + IntStream.range(0, positionCount).forEach(i -> assertThat(filtered.getBoolean(i), is(values[i]))); + assertThat(filtered.isConstant(), is(false)); + } BooleanBlock block = vector.asBlock(); assertThat(block, is(notNullValue())); IntStream.range(0, positionCount).forEach(i -> { @@ -47,7 +48,9 @@ public void testBoolean() throws IOException { assertThat(block.isNull(i), is(false)); assertThat(block.getValueCount(i), is(1)); assertThat(block.getFirstValueIndex(i), is(i)); - assertThat(block.filter(i).getBoolean(0), is(values[i])); + try (BooleanBlock filter = block.filter(i)) { + assertThat(filter.getBoolean(0), is(values[i])); + } }); BasicBlockTests.assertSingleValueDenseBlock(vector.asBlock()); assertSerialization(block); @@ -60,14 +63,15 @@ public void testInt() throws IOException { int[] values = IntStream.range(0, positionCount).map(i -> randomInt()).toArray(); IntArray array = bigArrays.newIntArray(positionCount); IntStream.range(0, positionCount).forEach(i -> array.set(i, values[i])); - try (var vector = new IntBigArrayVector(array, positionCount)) { + try (var vector = new IntBigArrayVector(array, positionCount, blockFactory)) { assertThat(vector.elementType(), is(ElementType.INT)); assertThat(positionCount, is(vector.getPositionCount())); IntStream.range(0, positionCount).forEach(i -> assertThat(vector.getInt(i), is(values[i]))); assertThat(vector.isConstant(), is(false)); - IntVector filtered = vector.filter(IntStream.range(0, positionCount).toArray()); - IntStream.range(0, positionCount).forEach(i -> assertThat(filtered.getInt(i), is(values[i]))); - assertThat(filtered.isConstant(), is(false)); + try (IntVector filtered = vector.filter(IntStream.range(0, positionCount).toArray())) { + IntStream.range(0, positionCount).forEach(i -> assertThat(filtered.getInt(i), is(values[i]))); + assertThat(filtered.isConstant(), is(false)); + } IntBlock block = vector.asBlock(); assertThat(block, is(notNullValue())); IntStream.range(0, positionCount).forEach(i -> { @@ -75,7 +79,9 @@ public void testInt() throws IOException { assertThat(block.isNull(i), is(false)); assertThat(block.getValueCount(i), is(1)); assertThat(block.getFirstValueIndex(i), is(i)); - assertThat(block.filter(i).getInt(0), is(values[i])); + try (IntBlock filter = block.filter(i)) { + assertThat(filter.getInt(0), is(values[i])); + } }); BasicBlockTests.assertSingleValueDenseBlock(vector.asBlock()); assertSerialization(block); @@ -88,14 +94,15 @@ public void testLong() throws IOException { long[] values = IntStream.range(0, positionCount).mapToLong(i -> randomLong()).toArray(); LongArray array = bigArrays.newLongArray(positionCount); IntStream.range(0, positionCount).forEach(i -> array.set(i, values[i])); - try (var vector = new LongBigArrayVector(array, positionCount)) { + try (var vector = new LongBigArrayVector(array, positionCount, blockFactory)) { assertThat(vector.elementType(), is(ElementType.LONG)); assertThat(positionCount, is(vector.getPositionCount())); IntStream.range(0, positionCount).forEach(i -> assertThat(vector.getLong(i), is(values[i]))); assertThat(vector.isConstant(), is(false)); - LongVector filtered = vector.filter(IntStream.range(0, positionCount).toArray()); - IntStream.range(0, positionCount).forEach(i -> assertThat(filtered.getLong(i), is(values[i]))); - assertThat(filtered.isConstant(), is(false)); + try (LongVector filtered = vector.filter(IntStream.range(0, positionCount).toArray())) { + IntStream.range(0, positionCount).forEach(i -> assertThat(filtered.getLong(i), is(values[i]))); + assertThat(filtered.isConstant(), is(false)); + } LongBlock block = vector.asBlock(); assertThat(block, is(notNullValue())); IntStream.range(0, positionCount).forEach(i -> { @@ -103,7 +110,9 @@ public void testLong() throws IOException { assertThat(block.isNull(i), is(false)); assertThat(block.getValueCount(i), is(1)); assertThat(block.getFirstValueIndex(i), is(i)); - assertThat(block.filter(i).getLong(0), is(values[i])); + try (LongBlock filter = block.filter(i)) { + assertThat(filter.getLong(0), is(values[i])); + } }); BasicBlockTests.assertSingleValueDenseBlock(vector.asBlock()); assertSerialization(block); @@ -116,14 +125,15 @@ public void testDouble() throws IOException { double[] values = IntStream.range(0, positionCount).mapToDouble(i -> randomDouble()).toArray(); DoubleArray array = bigArrays.newDoubleArray(positionCount); IntStream.range(0, positionCount).forEach(i -> array.set(i, values[i])); - try (var vector = new DoubleBigArrayVector(array, positionCount)) { + try (var vector = new DoubleBigArrayVector(array, positionCount, blockFactory)) { assertThat(vector.elementType(), is(ElementType.DOUBLE)); assertThat(positionCount, is(vector.getPositionCount())); IntStream.range(0, positionCount).forEach(i -> assertThat(vector.getDouble(i), is(values[i]))); assertThat(vector.isConstant(), is(false)); - DoubleVector filtered = vector.filter(IntStream.range(0, positionCount).toArray()); - IntStream.range(0, positionCount).forEach(i -> assertThat(filtered.getDouble(i), is(values[i]))); - assertThat(filtered.isConstant(), is(false)); + try (DoubleVector filtered = vector.filter(IntStream.range(0, positionCount).toArray())) { + IntStream.range(0, positionCount).forEach(i -> assertThat(filtered.getDouble(i), is(values[i]))); + assertThat(filtered.isConstant(), is(false)); + } DoubleBlock block = vector.asBlock(); assertThat(block, is(notNullValue())); IntStream.range(0, positionCount).forEach(i -> { @@ -131,7 +141,9 @@ public void testDouble() throws IOException { assertThat(block.isNull(i), is(false)); assertThat(block.getValueCount(i), is(1)); assertThat(block.getFirstValueIndex(i), is(i)); - assertThat(block.filter(i).getDouble(0), is(values[i])); + try (DoubleBlock filter = block.filter(i)) { + assertThat(filter.getDouble(0), is(values[i])); + } }); BasicBlockTests.assertSingleValueDenseBlock(vector.asBlock()); assertSerialization(block); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockAccountingTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockAccountingTests.java index d62fd75abbcdd..b5155f3199c1c 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockAccountingTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockAccountingTests.java @@ -13,7 +13,8 @@ import org.elasticsearch.common.util.BigArray; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.BytesRefArray; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.compute.operator.ComputeTestCase; +import org.elasticsearch.core.Releasables; import org.hamcrest.Matcher; import java.lang.reflect.Field; @@ -29,7 +30,7 @@ import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; -public class BlockAccountingTests extends ESTestCase { +public class BlockAccountingTests extends ComputeTestCase { static final Accumulator RAM_USAGE_ACCUMULATOR = new TestRamUsageAccumulator(); @@ -38,182 +39,277 @@ public class BlockAccountingTests extends ESTestCase { // Array Vectors public void testBooleanVector() { - Vector empty = new BooleanArrayVector(new boolean[] {}, 0); + BlockFactory blockFactory = blockFactory(); + Vector empty = blockFactory.newBooleanArrayVector(new boolean[] {}, 0); long expectedEmptyUsed = RamUsageTester.ramUsed(empty, RAM_USAGE_ACCUMULATOR); assertThat(empty.ramBytesUsed(), is(expectedEmptyUsed)); - Vector emptyPlusOne = new BooleanArrayVector(new boolean[] { randomBoolean() }, 1); + Vector emptyPlusOne = blockFactory.newBooleanArrayVector(new boolean[] { randomBoolean() }, 1); assertThat(emptyPlusOne.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + 1))); boolean[] randomData = new boolean[randomIntBetween(2, 1024)]; - Vector emptyPlusSome = new BooleanArrayVector(randomData, randomData.length); + Vector emptyPlusSome = blockFactory.newBooleanArrayVector(randomData, randomData.length); assertThat(emptyPlusSome.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + randomData.length))); Vector filterVector = emptyPlusSome.filter(1); assertThat(filterVector.ramBytesUsed(), lessThan(emptyPlusSome.ramBytesUsed())); + Releasables.close(empty, emptyPlusOne, emptyPlusSome, filterVector); } public void testIntVector() { - Vector empty = new IntArrayVector(new int[] {}, 0); + BlockFactory blockFactory = blockFactory(); + Vector empty = blockFactory.newIntArrayVector(new int[] {}, 0); long expectedEmptyUsed = RamUsageTester.ramUsed(empty, RAM_USAGE_ACCUMULATOR); assertThat(empty.ramBytesUsed(), is(expectedEmptyUsed)); - Vector emptyPlusOne = new IntArrayVector(new int[] { randomInt() }, 1); + Vector emptyPlusOne = blockFactory.newIntArrayVector(new int[] { randomInt() }, 1); assertThat(emptyPlusOne.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + Integer.BYTES))); int[] randomData = new int[randomIntBetween(2, 1024)]; - Vector emptyPlusSome = new IntArrayVector(randomData, randomData.length); + Vector emptyPlusSome = blockFactory.newIntArrayVector(randomData, randomData.length); assertThat(emptyPlusSome.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + (long) Integer.BYTES * randomData.length))); Vector filterVector = emptyPlusSome.filter(1); assertThat(filterVector.ramBytesUsed(), lessThan(emptyPlusSome.ramBytesUsed())); + Releasables.close(empty, emptyPlusOne, emptyPlusSome, filterVector); } public void testLongVector() { - Vector empty = new LongArrayVector(new long[] {}, 0); + BlockFactory blockFactory = blockFactory(); + Vector empty = blockFactory.newLongArrayVector(new long[] {}, 0); long expectedEmptyUsed = RamUsageTester.ramUsed(empty, RAM_USAGE_ACCUMULATOR); assertThat(empty.ramBytesUsed(), is(expectedEmptyUsed)); - Vector emptyPlusOne = new LongArrayVector(new long[] { randomLong() }, 1); + Vector emptyPlusOne = blockFactory.newLongArrayVector(new long[] { randomLong() }, 1); assertThat(emptyPlusOne.ramBytesUsed(), is(empty.ramBytesUsed() + Long.BYTES)); long[] randomData = new long[randomIntBetween(2, 1024)]; - Vector emptyPlusSome = new LongArrayVector(randomData, randomData.length); + Vector emptyPlusSome = blockFactory.newLongArrayVector(randomData, randomData.length); assertThat(emptyPlusSome.ramBytesUsed(), is(empty.ramBytesUsed() + (long) Long.BYTES * randomData.length)); Vector filterVector = emptyPlusSome.filter(1); assertThat(filterVector.ramBytesUsed(), lessThan(emptyPlusSome.ramBytesUsed())); + + Releasables.close(empty, emptyPlusOne, emptyPlusSome, filterVector); } public void testDoubleVector() { - Vector empty = new DoubleArrayVector(new double[] {}, 0); + BlockFactory blockFactory = blockFactory(); + Vector empty = blockFactory.newDoubleArrayVector(new double[] {}, 0); long expectedEmptyUsed = RamUsageTester.ramUsed(empty, RAM_USAGE_ACCUMULATOR); assertThat(empty.ramBytesUsed(), is(expectedEmptyUsed)); - Vector emptyPlusOne = new DoubleArrayVector(new double[] { randomDouble() }, 1); + Vector emptyPlusOne = blockFactory.newDoubleArrayVector(new double[] { randomDouble() }, 1); assertThat(emptyPlusOne.ramBytesUsed(), is(empty.ramBytesUsed() + Double.BYTES)); double[] randomData = new double[randomIntBetween(2, 1024)]; - Vector emptyPlusSome = new DoubleArrayVector(randomData, randomData.length); + Vector emptyPlusSome = blockFactory.newDoubleArrayVector(randomData, randomData.length); assertThat(emptyPlusSome.ramBytesUsed(), is(empty.ramBytesUsed() + (long) Double.BYTES * randomData.length)); // a filter becomes responsible for it's enclosing data, both in terms of accountancy and releasability Vector filterVector = emptyPlusSome.filter(1); assertThat(filterVector.ramBytesUsed(), lessThan(emptyPlusSome.ramBytesUsed())); + + Releasables.close(empty, emptyPlusOne, emptyPlusSome, filterVector); } public void testBytesRefVector() { - try ( - var emptyArray = new BytesRefArray(0, BigArrays.NON_RECYCLING_INSTANCE); - var arrayWithOne = new BytesRefArray(0, BigArrays.NON_RECYCLING_INSTANCE) - ) { - Vector emptyVector = new BytesRefArrayVector(emptyArray, 0); - long expectedEmptyVectorUsed = RamUsageTester.ramUsed(emptyVector, RAM_USAGE_ACCUMULATOR); - assertThat(emptyVector.ramBytesUsed(), is(expectedEmptyVectorUsed)); - - var bytesRef = new BytesRef(randomAlphaOfLengthBetween(1, 16)); - arrayWithOne.append(bytesRef); - Vector emptyPlusOne = new BytesRefArrayVector(arrayWithOne, 1); - assertThat(emptyPlusOne.ramBytesUsed(), between(emptyVector.ramBytesUsed() + bytesRef.length, UPPER_BOUND)); - - Vector filterVector = emptyPlusOne.filter(0); - assertThat(filterVector.ramBytesUsed(), lessThan(emptyPlusOne.ramBytesUsed())); - } + BlockFactory blockFactory = blockFactory(); + var emptyArray = new BytesRefArray(0, blockFactory.bigArrays()); + var arrayWithOne = new BytesRefArray(0, blockFactory.bigArrays()); + Vector emptyVector = blockFactory.newBytesRefArrayVector(emptyArray, 0); + long expectedEmptyVectorUsed = RamUsageTester.ramUsed(emptyVector, RAM_USAGE_ACCUMULATOR); + assertThat(emptyVector.ramBytesUsed(), is(expectedEmptyVectorUsed)); + + var bytesRef = new BytesRef(randomAlphaOfLengthBetween(1, 16)); + arrayWithOne.append(bytesRef); + Vector emptyPlusOne = blockFactory.newBytesRefArrayVector(arrayWithOne, 1); + assertThat(emptyPlusOne.ramBytesUsed(), between(emptyVector.ramBytesUsed() + bytesRef.length, UPPER_BOUND)); + + Vector filterVector = emptyPlusOne.filter(0); + assertThat(filterVector.ramBytesUsed(), lessThan(emptyPlusOne.ramBytesUsed())); + Releasables.close(emptyVector, emptyPlusOne, filterVector); } // Array Blocks public void testBooleanBlock() { - Block empty = new BooleanArrayBlock(new boolean[] {}, 0, new int[0], null, Block.MvOrdering.UNORDERED); + BlockFactory blockFactory = blockFactory(); + Block empty = new BooleanArrayBlock(new boolean[] {}, 0, new int[0], null, Block.MvOrdering.UNORDERED, blockFactory); long expectedEmptyUsed = RamUsageTester.ramUsed(empty, RAM_USAGE_ACCUMULATOR); assertThat(empty.ramBytesUsed(), is(expectedEmptyUsed)); - Block emptyPlusOne = new BooleanArrayBlock(new boolean[] { randomBoolean() }, 1, new int[] { 0 }, null, Block.MvOrdering.UNORDERED); + Block emptyPlusOne = new BooleanArrayBlock( + new boolean[] { randomBoolean() }, + 1, + new int[] { 0 }, + null, + Block.MvOrdering.UNORDERED, + blockFactory + ); assertThat(emptyPlusOne.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + 1) + alignObjectSize(Integer.BYTES))); boolean[] randomData = new boolean[randomIntBetween(2, 1024)]; int[] valueIndices = IntStream.range(0, randomData.length + 1).toArray(); - Block emptyPlusSome = new BooleanArrayBlock(randomData, randomData.length, valueIndices, null, Block.MvOrdering.UNORDERED); + Block emptyPlusSome = new BooleanArrayBlock( + randomData, + randomData.length, + valueIndices, + null, + Block.MvOrdering.UNORDERED, + blockFactory + ); long expected = empty.ramBytesUsed() + ramBytesForBooleanArray(randomData) + ramBytesForIntArray(valueIndices); assertThat(emptyPlusSome.ramBytesUsed(), is(expected)); Block filterBlock = emptyPlusSome.filter(1); assertThat(filterBlock.ramBytesUsed(), lessThan(emptyPlusOne.ramBytesUsed())); + Releasables.close(filterBlock); } public void testBooleanBlockWithNullFirstValues() { - Block empty = new BooleanArrayBlock(new boolean[] {}, 0, null, BitSet.valueOf(new byte[] { 1 }), Block.MvOrdering.UNORDERED); + Block empty = new BooleanArrayBlock( + new boolean[] {}, + 0, + null, + BitSet.valueOf(new byte[] { 1 }), + Block.MvOrdering.UNORDERED, + blockFactory() + ); long expectedEmptyUsed = RamUsageTester.ramUsed(empty, RAM_USAGE_ACCUMULATOR); assertThat(empty.ramBytesUsed(), lessThanOrEqualTo(expectedEmptyUsed)); } public void testIntBlock() { - Block empty = new IntArrayBlock(new int[] {}, 0, new int[] {}, null, Block.MvOrdering.UNORDERED); + BlockFactory blockFactory = blockFactory(); + Block empty = new IntArrayBlock(new int[] {}, 0, new int[] {}, null, Block.MvOrdering.UNORDERED, blockFactory); long expectedEmptyUsed = RamUsageTester.ramUsed(empty, RAM_USAGE_ACCUMULATOR); assertThat(empty.ramBytesUsed(), is(expectedEmptyUsed)); - Block emptyPlusOne = new IntArrayBlock(new int[] { randomInt() }, 1, new int[] { 0 }, null, Block.MvOrdering.UNORDERED); + Block emptyPlusOne = new IntArrayBlock( + new int[] { randomInt() }, + 1, + new int[] { 0 }, + null, + Block.MvOrdering.UNORDERED, + blockFactory + ); assertThat(emptyPlusOne.ramBytesUsed(), is(empty.ramBytesUsed() + alignObjectSize(Integer.BYTES) + alignObjectSize(Integer.BYTES))); int[] randomData = new int[randomIntBetween(2, 1024)]; int[] valueIndices = IntStream.range(0, randomData.length + 1).toArray(); - Block emptyPlusSome = new IntArrayBlock(randomData, randomData.length, valueIndices, null, Block.MvOrdering.UNORDERED); + Block emptyPlusSome = new IntArrayBlock( + randomData, + randomData.length, + valueIndices, + null, + Block.MvOrdering.UNORDERED, + blockFactory + ); long expected = empty.ramBytesUsed() + ramBytesForIntArray(randomData) + ramBytesForIntArray(valueIndices); assertThat(emptyPlusSome.ramBytesUsed(), is(expected)); Block filterBlock = emptyPlusSome.filter(1); assertThat(filterBlock.ramBytesUsed(), lessThan(emptyPlusOne.ramBytesUsed())); + Releasables.close(filterBlock); } public void testIntBlockWithNullFirstValues() { - Block empty = new IntArrayBlock(new int[] {}, 0, null, BitSet.valueOf(new byte[] { 1 }), Block.MvOrdering.UNORDERED); + BlockFactory blockFactory = blockFactory(); + Block empty = new IntArrayBlock(new int[] {}, 0, null, BitSet.valueOf(new byte[] { 1 }), Block.MvOrdering.UNORDERED, blockFactory); long expectedEmptyUsed = RamUsageTester.ramUsed(empty, RAM_USAGE_ACCUMULATOR); assertThat(empty.ramBytesUsed(), is(expectedEmptyUsed)); } public void testLongBlock() { - Block empty = new LongArrayBlock(new long[] {}, 0, new int[0], null, Block.MvOrdering.UNORDERED); + BlockFactory blockFactory = blockFactory(); + Block empty = new LongArrayBlock(new long[] {}, 0, new int[0], null, Block.MvOrdering.UNORDERED, blockFactory); long expectedEmptyUsed = RamUsageTester.ramUsed(empty, RAM_USAGE_ACCUMULATOR); assertThat(empty.ramBytesUsed(), is(expectedEmptyUsed)); - Block emptyPlusOne = new LongArrayBlock(new long[] { randomInt() }, 1, new int[] { 0 }, null, Block.MvOrdering.UNORDERED); + Block emptyPlusOne = new LongArrayBlock( + new long[] { randomInt() }, + 1, + new int[] { 0 }, + null, + Block.MvOrdering.UNORDERED, + blockFactory + ); assertThat(emptyPlusOne.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + Long.BYTES) + alignObjectSize(Integer.BYTES))); long[] randomData = new long[randomIntBetween(2, 1024)]; int[] valueIndices = IntStream.range(0, randomData.length + 1).toArray(); - Block emptyPlusSome = new LongArrayBlock(randomData, randomData.length, valueIndices, null, Block.MvOrdering.UNORDERED); + Block emptyPlusSome = new LongArrayBlock( + randomData, + randomData.length, + valueIndices, + null, + Block.MvOrdering.UNORDERED, + blockFactory + ); long expected = empty.ramBytesUsed() + ramBytesForLongArray(randomData) + ramBytesForIntArray(valueIndices); assertThat(emptyPlusSome.ramBytesUsed(), is(expected)); Block filterBlock = emptyPlusSome.filter(1); assertThat(filterBlock.ramBytesUsed(), lessThan(emptyPlusOne.ramBytesUsed())); + Releasables.close(filterBlock); } public void testLongBlockWithNullFirstValues() { - Block empty = new LongArrayBlock(new long[] {}, 0, null, BitSet.valueOf(new byte[] { 1 }), Block.MvOrdering.UNORDERED); + Block empty = new LongArrayBlock( + new long[] {}, + 0, + null, + BitSet.valueOf(new byte[] { 1 }), + Block.MvOrdering.UNORDERED, + blockFactory() + ); long expectedEmptyUsed = RamUsageTester.ramUsed(empty, RAM_USAGE_ACCUMULATOR); assertThat(empty.ramBytesUsed(), is(expectedEmptyUsed)); } public void testDoubleBlock() { - Block empty = new DoubleArrayBlock(new double[] {}, 0, new int[0], null, Block.MvOrdering.UNORDERED); + BlockFactory blockFactory = blockFactory(); + Block empty = new DoubleArrayBlock(new double[] {}, 0, new int[0], null, Block.MvOrdering.UNORDERED, blockFactory); long expectedEmptyUsed = RamUsageTester.ramUsed(empty, RAM_USAGE_ACCUMULATOR); assertThat(empty.ramBytesUsed(), is(expectedEmptyUsed)); - Block emptyPlusOne = new DoubleArrayBlock(new double[] { randomInt() }, 1, new int[] { 0 }, null, Block.MvOrdering.UNORDERED); + Block emptyPlusOne = new DoubleArrayBlock( + new double[] { randomInt() }, + 1, + new int[] { 0 }, + null, + Block.MvOrdering.UNORDERED, + blockFactory + ); assertThat(emptyPlusOne.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + Double.BYTES) + alignObjectSize(Integer.BYTES))); double[] randomData = new double[randomIntBetween(2, 1024)]; int[] valueIndices = IntStream.range(0, randomData.length + 1).toArray(); - Block emptyPlusSome = new DoubleArrayBlock(randomData, randomData.length, valueIndices, null, Block.MvOrdering.UNORDERED); + Block emptyPlusSome = new DoubleArrayBlock( + randomData, + randomData.length, + valueIndices, + null, + Block.MvOrdering.UNORDERED, + blockFactory + ); long expected = empty.ramBytesUsed() + ramBytesForDoubleArray(randomData) + ramBytesForIntArray(valueIndices); assertThat(emptyPlusSome.ramBytesUsed(), is(expected)); Block filterBlock = emptyPlusSome.filter(1); assertThat(filterBlock.ramBytesUsed(), lessThan(emptyPlusOne.ramBytesUsed())); + Releasables.close(filterBlock); } public void testDoubleBlockWithNullFirstValues() { - Block empty = new DoubleArrayBlock(new double[] {}, 0, null, BitSet.valueOf(new byte[] { 1 }), Block.MvOrdering.UNORDERED); + Block empty = new DoubleArrayBlock( + new double[] {}, + 0, + null, + BitSet.valueOf(new byte[] { 1 }), + Block.MvOrdering.UNORDERED, + blockFactory() + ); long expectedEmptyUsed = RamUsageTester.ramUsed(empty, RAM_USAGE_ACCUMULATOR); assertThat(empty.ramBytesUsed(), is(expectedEmptyUsed)); } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockBuilderAppendBlockTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockBuilderAppendBlockTests.java index a9f08eee02d70..9c1b02aa74107 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockBuilderAppendBlockTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockBuilderAppendBlockTests.java @@ -7,17 +7,19 @@ package org.elasticsearch.compute.data; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.compute.operator.ComputeTestCase; import java.util.ArrayList; import java.util.List; import static org.hamcrest.Matchers.equalTo; -public class BlockBuilderAppendBlockTests extends ESTestCase { +public class BlockBuilderAppendBlockTests extends ComputeTestCase { public void testBasic() { - IntBlock src = new IntBlockBuilder(10, BlockFactory.getNonBreakingInstance()).appendInt(1) + BlockFactory blockFactory = blockFactory(); + IntBlock src = blockFactory.newIntBlockBuilder(10) + .appendInt(1) .appendNull() .beginPositionEntry() .appendInt(4) @@ -32,40 +34,48 @@ public void testBasic() { .endPositionEntry() .build(); // copy position by position - { - IntBlock.Builder dst = IntBlock.newBlockBuilder(randomIntBetween(1, 20)); + try (IntBlock.Builder dst = blockFactory.newIntBlockBuilder(randomIntBetween(1, 20))) { for (int i = 0; i < src.getPositionCount(); i++) { - dst.appendAllValuesToCurrentPosition(src.filter(i)); + try (IntBlock filter = src.filter(i)) { + dst.appendAllValuesToCurrentPosition(filter); + } + } + try (IntBlock block = dst.build()) { + assertThat(block, equalTo(src)); } - assertThat(dst.build(), equalTo(src)); } // copy all block - { - IntBlock.Builder dst = IntBlock.newBlockBuilder(randomIntBetween(1, 20)); - IntBlock block = dst.appendAllValuesToCurrentPosition(src).build(); - assertThat(block.getPositionCount(), equalTo(1)); - assertThat(BlockUtils.toJavaObject(block, 0), equalTo(List.of(1, 4, 6, 10, 20, 30, 1))); + try (IntBlock.Builder dst = blockFactory.newIntBlockBuilder(randomIntBetween(1, 20))) { + try (IntBlock block = dst.appendAllValuesToCurrentPosition(src).build()) { + assertThat(block.getPositionCount(), equalTo(1)); + assertThat(BlockUtils.toJavaObject(block, 0), equalTo(List.of(1, 4, 6, 10, 20, 30, 1))); + } } - { - Block dst = randomlyDivideAndMerge(src); + try (Block dst = randomlyDivideAndMerge(src)) { assertThat(dst.getPositionCount(), equalTo(1)); assertThat(BlockUtils.toJavaObject(dst, 0), equalTo(List.of(1, 4, 6, 10, 20, 30, 1))); } } public void testRandomNullBlock() { - IntBlock.Builder src = IntBlock.newBlockBuilder(10); - src.appendAllValuesToCurrentPosition(new ConstantNullBlock(between(1, 100))); + BlockFactory blockFactory = blockFactory(); + IntBlock.Builder src = blockFactory.newIntBlockBuilder(10); + try (var nullBlock = blockFactory.newConstantNullBlock(between(1, 100))) { + src.appendAllValuesToCurrentPosition(nullBlock); + } src.appendInt(101); - src.appendAllValuesToCurrentPosition(new ConstantNullBlock(between(1, 100))); + try (var nullBlock = blockFactory.newConstantNullBlock(between(1, 100))) { + src.appendAllValuesToCurrentPosition(nullBlock); + } IntBlock block = src.build(); assertThat(block.getPositionCount(), equalTo(3)); assertTrue(block.isNull(0)); assertThat(block.getInt(1), equalTo(101)); assertTrue(block.isNull(2)); - Block flatten = randomlyDivideAndMerge(block); - assertThat(flatten.getPositionCount(), equalTo(1)); - assertThat(BlockUtils.toJavaObject(flatten, 0), equalTo(101)); + try (Block flatten = randomlyDivideAndMerge(block)) { + assertThat(flatten.getPositionCount(), equalTo(1)); + assertThat(BlockUtils.toJavaObject(flatten, 0), equalTo(101)); + } } public void testRandom() { @@ -79,14 +89,17 @@ public void testRandom() { 0, between(0, 16) ).block(); - randomlyDivideAndMerge(block); + + block = randomlyDivideAndMerge(block); + block.close(); } private Block randomlyDivideAndMerge(Block block) { while (block.getPositionCount() > 1 || randomBoolean()) { int positionCount = block.getPositionCount(); int offset = 0; - Block.Builder builder = block.elementType().newBlockBuilder(randomIntBetween(1, 100)); + Block.Builder builder = block.elementType() + .newBlockBuilder(randomIntBetween(1, 100), TestBlockFactory.getNonBreakingInstance()); List expected = new ArrayList<>(); while (offset < positionCount) { int length = randomIntBetween(1, positionCount - offset); @@ -98,7 +111,9 @@ private Block randomlyDivideAndMerge(Block block) { Block sub = block.filter(positions); expected.add(extractAndFlattenBlockValues(sub)); builder.appendAllValuesToCurrentPosition(sub); + sub.close(); } + block.close(); block = builder.build(); assertThat(block.getPositionCount(), equalTo(expected.size())); for (int i = 0; i < block.getPositionCount(); i++) { diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockBuilderCopyFromTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockBuilderCopyFromTests.java index 529c1afeaaf44..e3a9aba0d1b7f 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockBuilderCopyFromTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockBuilderCopyFromTests.java @@ -69,22 +69,24 @@ public void testEvensFiltered() { } public void testSmallAllNull() { - assertSmall(Block.constantNullBlock(10)); + assertSmall(TestBlockFactory.getNonBreakingInstance().newConstantNullBlock(10)); } public void testEvensAllNull() { - assertEvens(Block.constantNullBlock(10)); + assertEvens(TestBlockFactory.getNonBreakingInstance().newConstantNullBlock(10)); } private void assertSmall(Block block) { int smallSize = Math.min(block.getPositionCount(), 10); - Block.Builder builder = elementType.newBlockBuilder(smallSize); + BlockFactory blockFactory = TestBlockFactory.getNonBreakingInstance(); + Block.Builder builder = elementType.newBlockBuilder(smallSize, blockFactory); builder.copyFrom(block, 0, smallSize); assertBlockValues(builder.build(), BasicBlockTests.valuesAtPositions(block, 0, smallSize)); } private void assertEvens(Block block) { - Block.Builder builder = elementType.newBlockBuilder(block.getPositionCount() / 2); + BlockFactory blockFactory = TestBlockFactory.getNonBreakingInstance(); + Block.Builder builder = elementType.newBlockBuilder(block.getPositionCount() / 2, blockFactory); List> expected = new ArrayList<>(); for (int i = 0; i < block.getPositionCount(); i += 2) { builder.copyFrom(block, i, i + 1); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockFactoryTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockFactoryTests.java index 96e5de20ba35c..e794215a4c212 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockFactoryTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockFactoryTests.java @@ -568,7 +568,6 @@ public void testReleaseVector() { vector.close(); } assertTrue(vector.isReleased()); - assertTrue(vector.asBlock().isReleased()); assertThat(breaker.getUsed(), equalTo(0L)); } @@ -651,7 +650,7 @@ public void testAllowPassingBlockToDifferentContext() throws Exception { public void testOwningFactoryOfVectorBlock() { BlockFactory parentFactory = blockFactory(ByteSizeValue.ofBytes(between(1024, 4096))); LocalCircuitBreaker localBreaker = new LocalCircuitBreaker(parentFactory.breaker(), between(0, 1024), between(0, 1024)); - BlockFactory localFactory = new BlockFactory(localBreaker, bigArrays, parentFactory); + BlockFactory localFactory = parentFactory.newChildFactory(localBreaker); int numValues = between(2, 10); try (var builder = localFactory.newIntVectorBuilder(numValues)) { for (int i = 0; i < numValues; i++) { diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockSerializationTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockSerializationTests.java index e44697ab8534c..b13aa040f307d 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockSerializationTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockSerializationTests.java @@ -30,129 +30,179 @@ public class BlockSerializationTests extends SerializationTestCase { public void testConstantIntBlock() throws IOException { - assertConstantBlockImpl(IntBlock.newConstantBlockWith(randomInt(), randomIntBetween(1, 8192))); + assertConstantBlockImpl(blockFactory.newConstantIntBlockWith(randomInt(), randomIntBetween(1, 8192))); } public void testConstantLongBlockLong() throws IOException { - assertConstantBlockImpl(LongBlock.newConstantBlockWith(randomLong(), randomIntBetween(1, 8192))); + assertConstantBlockImpl(blockFactory.newConstantLongBlockWith(randomLong(), randomIntBetween(1, 8192))); } public void testConstantDoubleBlock() throws IOException { - assertConstantBlockImpl(DoubleBlock.newConstantBlockWith(randomDouble(), randomIntBetween(1, 8192))); + assertConstantBlockImpl(blockFactory.newConstantDoubleBlockWith(randomDouble(), randomIntBetween(1, 8192))); } public void testConstantBytesRefBlock() throws IOException { - Block block = BytesRefBlock.newConstantBlockWith(new BytesRef(((Integer) randomInt()).toString()), randomIntBetween(1, 8192)); + Block block = blockFactory.newConstantBytesRefBlockWith( + new BytesRef(((Integer) randomInt()).toString()), + randomIntBetween(1, 8192) + ); assertConstantBlockImpl(block); } private void assertConstantBlockImpl(Block origBlock) throws IOException { assertThat(origBlock.asVector().isConstant(), is(true)); - try (Block deserBlock = serializeDeserializeBlock(origBlock)) { + try (origBlock; Block deserBlock = serializeDeserializeBlock(origBlock)) { EqualsHashCodeTestUtils.checkEqualsAndHashCode(origBlock, unused -> deserBlock); assertThat(deserBlock.asVector().isConstant(), is(true)); } } public void testEmptyIntBlock() throws IOException { - assertEmptyBlock(IntBlock.newBlockBuilder(0).build()); - assertEmptyBlock(IntBlock.newBlockBuilder(0).appendNull().build().filter()); - assertEmptyBlock(IntVector.newVectorBuilder(0).build().asBlock()); - assertEmptyBlock(IntVector.newVectorBuilder(0).appendInt(randomInt()).build().filter().asBlock()); + assertEmptyBlock(blockFactory.newIntBlockBuilder(0).build()); + try (IntBlock toFilter = blockFactory.newIntBlockBuilder(0).appendNull().build()) { + assertEmptyBlock(toFilter.filter()); + } + assertEmptyBlock(blockFactory.newIntVectorBuilder(0).build().asBlock()); + try (IntVector toFilter = blockFactory.newIntVectorBuilder(0).appendInt(randomInt()).build()) { + assertEmptyBlock(toFilter.filter().asBlock()); + } } public void testEmptyLongBlock() throws IOException { - assertEmptyBlock(LongBlock.newBlockBuilder(0).build()); - assertEmptyBlock(LongBlock.newBlockBuilder(0).appendNull().build().filter()); - assertEmptyBlock(LongVector.newVectorBuilder(0).build().asBlock()); - assertEmptyBlock(LongVector.newVectorBuilder(0).appendLong(randomLong()).build().filter().asBlock()); + assertEmptyBlock(blockFactory.newLongBlockBuilder(0).build()); + try (LongBlock toFilter = blockFactory.newLongBlockBuilder(0).appendNull().build()) { + assertEmptyBlock(toFilter.filter()); + } + assertEmptyBlock(blockFactory.newLongVectorBuilder(0).build().asBlock()); + try (LongVector toFilter = blockFactory.newLongVectorBuilder(0).appendLong(randomLong()).build()) { + assertEmptyBlock(toFilter.filter().asBlock()); + } } public void testEmptyDoubleBlock() throws IOException { - assertEmptyBlock(DoubleBlock.newBlockBuilder(0).build()); - assertEmptyBlock(DoubleBlock.newBlockBuilder(0).appendNull().build().filter()); - assertEmptyBlock(DoubleVector.newVectorBuilder(0).build().asBlock()); - assertEmptyBlock(DoubleVector.newVectorBuilder(0).appendDouble(randomDouble()).build().filter().asBlock()); + assertEmptyBlock(blockFactory.newDoubleBlockBuilder(0).build()); + try (DoubleBlock toFilter = blockFactory.newDoubleBlockBuilder(0).appendNull().build()) { + assertEmptyBlock(toFilter.filter()); + } + assertEmptyBlock(blockFactory.newDoubleVectorBuilder(0).build().asBlock()); + try (DoubleVector toFilter = blockFactory.newDoubleVectorBuilder(0).appendDouble(randomDouble()).build()) { + assertEmptyBlock(toFilter.filter().asBlock()); + } } public void testEmptyBytesRefBlock() throws IOException { - assertEmptyBlock(BytesRefBlock.newBlockBuilder(0).build()); - assertEmptyBlock(BytesRefBlock.newBlockBuilder(0).appendNull().build().filter()); - assertEmptyBlock(BytesRefVector.newVectorBuilder(0).build().asBlock()); - assertEmptyBlock(BytesRefVector.newVectorBuilder(0).appendBytesRef(randomBytesRef()).build().filter().asBlock()); + assertEmptyBlock(blockFactory.newBytesRefBlockBuilder(0).build()); + try (BytesRefBlock toFilter = blockFactory.newBytesRefBlockBuilder(0).appendNull().build()) { + assertEmptyBlock(toFilter.filter()); + } + assertEmptyBlock(blockFactory.newBytesRefVectorBuilder(0).build().asBlock()); + try (BytesRefVector toFilter = blockFactory.newBytesRefVectorBuilder(0).appendBytesRef(randomBytesRef()).build()) { + assertEmptyBlock(toFilter.filter().asBlock()); + } } private void assertEmptyBlock(Block origBlock) throws IOException { assertThat(origBlock.getPositionCount(), is(0)); - try (Block deserBlock = serializeDeserializeBlock(origBlock)) { + try (origBlock; Block deserBlock = serializeDeserializeBlock(origBlock)) { EqualsHashCodeTestUtils.checkEqualsAndHashCode(origBlock, unused -> deserBlock); } } public void testFilterIntBlock() throws IOException { - assertFilterBlock(IntBlock.newBlockBuilder(0).appendInt(1).appendInt(2).build().filter(1)); - assertFilterBlock(IntBlock.newBlockBuilder(1).appendInt(randomInt()).appendNull().build().filter(0)); - assertFilterBlock(IntVector.newVectorBuilder(1).appendInt(randomInt()).build().filter(0).asBlock()); - assertFilterBlock(IntVector.newVectorBuilder(1).appendInt(randomInt()).appendInt(randomInt()).build().filter(0).asBlock()); + try (IntBlock toFilter = blockFactory.newIntBlockBuilder(0).appendInt(1).appendInt(2).build()) { + assertFilterBlock(toFilter.filter(1)); + } + try (IntBlock toFilter = blockFactory.newIntBlockBuilder(1).appendInt(randomInt()).appendNull().build()) { + assertFilterBlock(toFilter.filter(0)); + } + try (IntVector toFilter = blockFactory.newIntVectorBuilder(1).appendInt(randomInt()).build()) { + assertFilterBlock(toFilter.filter(0).asBlock()); + } + try (IntVector toFilter = blockFactory.newIntVectorBuilder(1).appendInt(randomInt()).appendInt(randomInt()).build()) { + assertFilterBlock(toFilter.filter(0).asBlock()); + } } public void testFilterLongBlock() throws IOException { - assertFilterBlock(LongBlock.newBlockBuilder(0).appendLong(1).appendLong(2).build().filter(1)); - assertFilterBlock(LongBlock.newBlockBuilder(1).appendLong(randomLong()).appendNull().build().filter(0)); - assertFilterBlock(LongVector.newVectorBuilder(1).appendLong(randomLong()).build().filter(0).asBlock()); - assertFilterBlock(LongVector.newVectorBuilder(1).appendLong(randomLong()).appendLong(randomLong()).build().filter(0).asBlock()); + try (LongBlock toFilter = blockFactory.newLongBlockBuilder(0).appendLong(1).appendLong(2).build()) { + assertFilterBlock(toFilter.filter(1)); + } + try (LongBlock toFilter = blockFactory.newLongBlockBuilder(1).appendLong(randomLong()).appendNull().build()) { + assertFilterBlock(toFilter.filter(0)); + } + try (LongVector toFilter = blockFactory.newLongVectorBuilder(1).appendLong(randomLong()).build()) { + assertFilterBlock(toFilter.filter(0).asBlock()); + } + try (LongVector toFilter = blockFactory.newLongVectorBuilder(1).appendLong(randomLong()).appendLong(randomLong()).build()) { + assertFilterBlock(toFilter.filter(0).asBlock()); + } } public void testFilterDoubleBlock() throws IOException { - assertFilterBlock(DoubleBlock.newBlockBuilder(0).appendDouble(1).appendDouble(2).build().filter(1)); - assertFilterBlock(DoubleBlock.newBlockBuilder(1).appendDouble(randomDouble()).appendNull().build().filter(0)); - assertFilterBlock(DoubleVector.newVectorBuilder(1).appendDouble(randomDouble()).build().filter(0).asBlock()); - assertFilterBlock( - DoubleVector.newVectorBuilder(1).appendDouble(randomDouble()).appendDouble(randomDouble()).build().filter(0).asBlock() - ); + try (DoubleBlock toFilter = blockFactory.newDoubleBlockBuilder(0).appendDouble(1).appendDouble(2).build()) { + assertFilterBlock(toFilter.filter(1)); + } + try (DoubleBlock toFilter = blockFactory.newDoubleBlockBuilder(1).appendDouble(randomDouble()).appendNull().build()) { + assertFilterBlock(toFilter.filter(0)); + } + try (DoubleVector toFilter = blockFactory.newDoubleVectorBuilder(1).appendDouble(randomDouble()).build()) { + assertFilterBlock(toFilter.filter(0).asBlock()); + + } + try ( + DoubleVector toFilter = blockFactory.newDoubleVectorBuilder(1).appendDouble(randomDouble()).appendDouble(randomDouble()).build() + ) { + assertFilterBlock(toFilter.filter(0).asBlock()); + } } public void testFilterBytesRefBlock() throws IOException { - assertFilterBlock( - BytesRefBlock.newBlockBuilder(0) + try ( + BytesRefBlock toFilter = blockFactory.newBytesRefBlockBuilder(0) .appendBytesRef(randomBytesRef()) .appendBytesRef(randomBytesRef()) .build() - .filter(randomIntBetween(0, 1)) - ); - assertFilterBlock( - BytesRefBlock.newBlockBuilder(0).appendBytesRef(randomBytesRef()).appendNull().build().filter(randomIntBetween(0, 1)) - ); - assertFilterBlock(BytesRefVector.newVectorBuilder(0).appendBytesRef(randomBytesRef()).build().asBlock().filter(0)); - assertFilterBlock( - BytesRefVector.newVectorBuilder(0) + ) { + assertFilterBlock(toFilter.filter(randomIntBetween(0, 1))); + } + + try (BytesRefBlock toFilter = blockFactory.newBytesRefBlockBuilder(0).appendBytesRef(randomBytesRef()).appendNull().build()) { + assertFilterBlock(toFilter.filter(randomIntBetween(0, 1))); + } + + try (BytesRefVector toFilter = blockFactory.newBytesRefVectorBuilder(0).appendBytesRef(randomBytesRef()).build()) { + assertFilterBlock(toFilter.asBlock().filter(0)); + } + try ( + BytesRefVector toFilter = blockFactory.newBytesRefVectorBuilder(0) .appendBytesRef(randomBytesRef()) .appendBytesRef(randomBytesRef()) .build() - .asBlock() - .filter(randomIntBetween(0, 1)) - ); + ) { + assertFilterBlock(toFilter.asBlock().filter(randomIntBetween(0, 1))); + } } private void assertFilterBlock(Block origBlock) throws IOException { assertThat(origBlock.getPositionCount(), is(1)); - try (Block deserBlock = serializeDeserializeBlock(origBlock)) { + try (origBlock; Block deserBlock = serializeDeserializeBlock(origBlock)) { EqualsHashCodeTestUtils.checkEqualsAndHashCode(origBlock, unused -> deserBlock); assertThat(deserBlock.getPositionCount(), is(1)); } } public void testConstantNullBlock() throws IOException { - Block origBlock = new ConstantNullBlock(randomIntBetween(1, 8192)); - try (Block deserBlock = serializeDeserializeBlock(origBlock)) { - EqualsHashCodeTestUtils.checkEqualsAndHashCode(origBlock, unused -> deserBlock); + try (Block origBlock = blockFactory.newConstantNullBlock(randomIntBetween(1, 8192))) { + try (Block deserBlock = serializeDeserializeBlock(origBlock)) { + EqualsHashCodeTestUtils.checkEqualsAndHashCode(origBlock, unused -> deserBlock); + } } } // TODO: more types, grouping, etc... public void testSimulateAggs() { DriverContext driverCtx = driverContext(); - Page page = new Page(new LongArrayVector(new long[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }, 10).asBlock()); + Page page = new Page(blockFactory.newLongArrayVector(new long[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }, 10).asBlock()); var bigArrays = BigArrays.NON_RECYCLING_INSTANCE; var params = new Object[] {}; var function = SumLongAggregatorFunction.create(driverCtx, List.of(0)); @@ -167,18 +217,20 @@ public void testSimulateAggs() { .forEach(i -> EqualsHashCodeTestUtils.checkEqualsAndHashCode(blocks[i], unused -> deserBlocks[i])); var inputChannels = IntStream.range(0, SumLongAggregatorFunction.intermediateStateDesc().size()).boxed().toList(); - var finalAggregator = SumLongAggregatorFunction.create(driverCtx, inputChannels); - finalAggregator.addIntermediateInput(new Page(deserBlocks)); - Block[] finalBlocks = new Block[1]; - finalAggregator.evaluateFinal(finalBlocks, 0, driverCtx); - try (var finalBlock = (LongBlock) finalBlocks[0]) { - assertThat(finalBlock.getLong(0), is(55L)); + try (var finalAggregator = SumLongAggregatorFunction.create(driverCtx, inputChannels)) { + finalAggregator.addIntermediateInput(new Page(deserBlocks)); + Block[] finalBlocks = new Block[1]; + finalAggregator.evaluateFinal(finalBlocks, 0, driverCtx); + try (var finalBlock = (LongBlock) finalBlocks[0]) { + assertThat(finalBlock.getLong(0), is(55L)); + } } } finally { Releasables.close(deserBlocks); } } finally { Releasables.close(blocks); + page.releaseBlocks(); } } @@ -201,6 +253,6 @@ protected final BigArrays nonBreakingBigArrays() { * A {@link DriverContext} with a nonBreakingBigArrays. */ protected DriverContext driverContext() { // TODO make this final and return a breaking block factory - return new DriverContext(nonBreakingBigArrays(), BlockFactory.getNonBreakingInstance()); + return new DriverContext(nonBreakingBigArrays(), TestBlockFactory.getNonBreakingInstance()); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BooleanBlockEqualityTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BooleanBlockEqualityTests.java index ae0d56d8612ce..c0fc539cecc6c 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BooleanBlockEqualityTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BooleanBlockEqualityTests.java @@ -14,15 +14,17 @@ public class BooleanBlockEqualityTests extends ESTestCase { + static final BlockFactory blockFactory = TestBlockFactory.getNonBreakingInstance(); + public void testEmptyVector() { // all these "empty" vectors should be equivalent List vectors = List.of( - new BooleanArrayVector(new boolean[] {}, 0), - new BooleanArrayVector(new boolean[] { randomBoolean() }, 0), - BooleanBlock.newConstantBlockWith(randomBoolean(), 0).asVector(), - BooleanBlock.newConstantBlockWith(randomBoolean(), 0).filter().asVector(), - BooleanBlock.newBlockBuilder(0).build().asVector(), - BooleanBlock.newBlockBuilder(0).appendBoolean(randomBoolean()).build().asVector().filter() + blockFactory.newBooleanArrayVector(new boolean[] {}, 0), + blockFactory.newBooleanArrayVector(new boolean[] { randomBoolean() }, 0), + blockFactory.newConstantBooleanBlockWith(randomBoolean(), 0).asVector(), + blockFactory.newConstantBooleanBlockWith(randomBoolean(), 0).filter().asVector(), + blockFactory.newBooleanBlockBuilder(0).build().asVector(), + blockFactory.newBooleanBlockBuilder(0).appendBoolean(randomBoolean()).build().asVector().filter() ); assertAllEquals(vectors); } @@ -35,19 +37,21 @@ public void testEmptyBlock() { 0, new int[] {}, BitSet.valueOf(new byte[] { 0b00 }), - randomFrom(Block.MvOrdering.values()) + randomFrom(Block.MvOrdering.values()), + blockFactory ), new BooleanArrayBlock( new boolean[] { randomBoolean() }, 0, new int[] {}, BitSet.valueOf(new byte[] { 0b00 }), - randomFrom(Block.MvOrdering.values()) + randomFrom(Block.MvOrdering.values()), + blockFactory ), - BooleanBlock.newConstantBlockWith(randomBoolean(), 0), - BooleanBlock.newBlockBuilder(0).build(), - BooleanBlock.newBlockBuilder(0).appendBoolean(randomBoolean()).build().filter(), - BooleanBlock.newBlockBuilder(0).appendNull().build().filter() + blockFactory.newConstantBooleanBlockWith(randomBoolean(), 0), + blockFactory.newBooleanBlockBuilder(0).build(), + blockFactory.newBooleanBlockBuilder(0).appendBoolean(randomBoolean()).build().filter(), + blockFactory.newBooleanBlockBuilder(0).appendNull().build().filter() ); assertAllEquals(blocks); } @@ -55,16 +59,16 @@ public void testEmptyBlock() { public void testVectorEquality() { // all these vectors should be equivalent List vectors = List.of( - new BooleanArrayVector(new boolean[] { true, false, true }, 3), - new BooleanArrayVector(new boolean[] { true, false, true }, 3).asBlock().asVector(), - new BooleanArrayVector(new boolean[] { true, false, true, false }, 3), - new BooleanArrayVector(new boolean[] { true, false, true }, 3).filter(0, 1, 2), - new BooleanArrayVector(new boolean[] { true, false, true, false }, 4).filter(0, 1, 2), - new BooleanArrayVector(new boolean[] { false, true, false, true }, 4).filter(1, 2, 3), - new BooleanArrayVector(new boolean[] { true, true, false, true }, 4).filter(0, 2, 3), - BooleanBlock.newBlockBuilder(3).appendBoolean(true).appendBoolean(false).appendBoolean(true).build().asVector(), - BooleanBlock.newBlockBuilder(3).appendBoolean(true).appendBoolean(false).appendBoolean(true).build().asVector().filter(0, 1, 2), - BooleanBlock.newBlockBuilder(3) + blockFactory.newBooleanArrayVector(new boolean[] { true, false, true }, 3), + blockFactory.newBooleanArrayVector(new boolean[] { true, false, true }, 3).asBlock().asVector(), + blockFactory.newBooleanArrayVector(new boolean[] { true, false, true, false }, 3), + blockFactory.newBooleanArrayVector(new boolean[] { true, false, true }, 3).filter(0, 1, 2), + blockFactory.newBooleanArrayVector(new boolean[] { true, false, true, false }, 4).filter(0, 1, 2), + blockFactory.newBooleanArrayVector(new boolean[] { false, true, false, true }, 4).filter(1, 2, 3), + blockFactory.newBooleanArrayVector(new boolean[] { true, true, false, true }, 4).filter(0, 2, 3), + blockFactory.newBooleanVectorBuilder(3).appendBoolean(true).appendBoolean(false).appendBoolean(true).build(), + blockFactory.newBooleanVectorBuilder(3).appendBoolean(true).appendBoolean(false).appendBoolean(true).build().filter(0, 1, 2), + blockFactory.newBooleanBlockBuilder(3) .appendBoolean(true) .appendBoolean(true) .appendBoolean(false) @@ -72,7 +76,7 @@ public void testVectorEquality() { .build() .filter(0, 2, 3) .asVector(), - BooleanBlock.newBlockBuilder(3) + blockFactory.newBooleanBlockBuilder(3) .appendBoolean(true) .appendBoolean(true) .appendBoolean(false) @@ -85,17 +89,23 @@ public void testVectorEquality() { // all these constant-like vectors should be equivalent List moreVectors = List.of( - new BooleanArrayVector(new boolean[] { true, true, true }, 3), - new BooleanArrayVector(new boolean[] { true, true, true }, 3).asBlock().asVector(), - new BooleanArrayVector(new boolean[] { true, true, true, true }, 3), - new BooleanArrayVector(new boolean[] { true, true, true }, 3).filter(0, 1, 2), - new BooleanArrayVector(new boolean[] { true, true, true, false }, 4).filter(0, 1, 2), - new BooleanArrayVector(new boolean[] { false, true, true, true }, 4).filter(1, 2, 3), - new BooleanArrayVector(new boolean[] { true, false, true, true }, 4).filter(0, 2, 3), - BooleanBlock.newConstantBlockWith(true, 3).asVector(), - BooleanBlock.newBlockBuilder(3).appendBoolean(true).appendBoolean(true).appendBoolean(true).build().asVector(), - BooleanBlock.newBlockBuilder(3).appendBoolean(true).appendBoolean(true).appendBoolean(true).build().asVector().filter(0, 1, 2), - BooleanBlock.newBlockBuilder(3) + blockFactory.newBooleanArrayVector(new boolean[] { true, true, true }, 3), + blockFactory.newBooleanArrayVector(new boolean[] { true, true, true }, 3).asBlock().asVector(), + blockFactory.newBooleanArrayVector(new boolean[] { true, true, true, true }, 3), + blockFactory.newBooleanArrayVector(new boolean[] { true, true, true }, 3).filter(0, 1, 2), + blockFactory.newBooleanArrayVector(new boolean[] { true, true, true, false }, 4).filter(0, 1, 2), + blockFactory.newBooleanArrayVector(new boolean[] { false, true, true, true }, 4).filter(1, 2, 3), + blockFactory.newBooleanArrayVector(new boolean[] { true, false, true, true }, 4).filter(0, 2, 3), + blockFactory.newConstantBooleanBlockWith(true, 3).asVector(), + blockFactory.newBooleanBlockBuilder(3).appendBoolean(true).appendBoolean(true).appendBoolean(true).build().asVector(), + blockFactory.newBooleanBlockBuilder(3) + .appendBoolean(true) + .appendBoolean(true) + .appendBoolean(true) + .build() + .asVector() + .filter(0, 1, 2), + blockFactory.newBooleanBlockBuilder(3) .appendBoolean(true) .appendBoolean(false) .appendBoolean(true) @@ -103,7 +113,7 @@ public void testVectorEquality() { .build() .filter(0, 2, 3) .asVector(), - BooleanBlock.newBlockBuilder(3) + blockFactory.newBooleanBlockBuilder(3) .appendBoolean(true) .appendBoolean(false) .appendBoolean(true) @@ -118,35 +128,37 @@ public void testVectorEquality() { public void testBlockEquality() { // all these blocks should be equivalent List blocks = List.of( - new BooleanArrayVector(new boolean[] { true, false, true }, 3).asBlock(), + blockFactory.newBooleanArrayVector(new boolean[] { true, false, true }, 3).asBlock(), new BooleanArrayBlock( new boolean[] { true, false, true }, 3, new int[] { 0, 1, 2, 3 }, BitSet.valueOf(new byte[] { 0b000 }), - randomFrom(Block.MvOrdering.values()) + randomFrom(Block.MvOrdering.values()), + blockFactory ), new BooleanArrayBlock( new boolean[] { true, false, true, false }, 3, new int[] { 0, 1, 2, 3 }, BitSet.valueOf(new byte[] { 0b1000 }), - randomFrom(Block.MvOrdering.values()) + randomFrom(Block.MvOrdering.values()), + blockFactory ), - new BooleanArrayVector(new boolean[] { true, false, true }, 3).filter(0, 1, 2).asBlock(), - new BooleanArrayVector(new boolean[] { true, false, true, false }, 3).filter(0, 1, 2).asBlock(), - new BooleanArrayVector(new boolean[] { true, false, true, false }, 4).filter(0, 1, 2).asBlock(), - new BooleanArrayVector(new boolean[] { true, false, false, true }, 4).filter(0, 1, 3).asBlock(), - BooleanBlock.newBlockBuilder(3).appendBoolean(true).appendBoolean(false).appendBoolean(true).build(), - BooleanBlock.newBlockBuilder(3).appendBoolean(true).appendBoolean(false).appendBoolean(true).build().filter(0, 1, 2), - BooleanBlock.newBlockBuilder(3) + blockFactory.newBooleanArrayVector(new boolean[] { true, false, true }, 3).filter(0, 1, 2).asBlock(), + blockFactory.newBooleanArrayVector(new boolean[] { true, false, true, false }, 3).filter(0, 1, 2).asBlock(), + blockFactory.newBooleanArrayVector(new boolean[] { true, false, true, false }, 4).filter(0, 1, 2).asBlock(), + blockFactory.newBooleanArrayVector(new boolean[] { true, false, false, true }, 4).filter(0, 1, 3).asBlock(), + blockFactory.newBooleanBlockBuilder(3).appendBoolean(true).appendBoolean(false).appendBoolean(true).build(), + blockFactory.newBooleanBlockBuilder(3).appendBoolean(true).appendBoolean(false).appendBoolean(true).build().filter(0, 1, 2), + blockFactory.newBooleanBlockBuilder(3) .appendBoolean(true) .appendBoolean(true) .appendBoolean(false) .appendBoolean(true) .build() .filter(0, 2, 3), - BooleanBlock.newBlockBuilder(3) + blockFactory.newBooleanBlockBuilder(3) .appendBoolean(true) .appendNull() .appendBoolean(false) @@ -158,30 +170,32 @@ public void testBlockEquality() { // all these constant-like blocks should be equivalent List moreBlocks = List.of( - new BooleanArrayVector(new boolean[] { true, true }, 2).asBlock(), + blockFactory.newBooleanArrayVector(new boolean[] { true, true }, 2).asBlock(), new BooleanArrayBlock( new boolean[] { true, true }, 2, new int[] { 0, 1, 2 }, BitSet.valueOf(new byte[] { 0b000 }), - randomFrom(Block.MvOrdering.values()) + randomFrom(Block.MvOrdering.values()), + blockFactory ), new BooleanArrayBlock( new boolean[] { true, true, false }, 2, new int[] { 0, 1, 2 }, BitSet.valueOf(new byte[] { 0b100 }), - randomFrom(Block.MvOrdering.values()) + randomFrom(Block.MvOrdering.values()), + blockFactory ), - new BooleanArrayVector(new boolean[] { true, true }, 2).filter(0, 1).asBlock(), - new BooleanArrayVector(new boolean[] { true, true, false }, 2).filter(0, 1).asBlock(), - new BooleanArrayVector(new boolean[] { true, true, false }, 3).filter(0, 1).asBlock(), - new BooleanArrayVector(new boolean[] { true, false, true }, 3).filter(0, 2).asBlock(), - BooleanBlock.newConstantBlockWith(true, 2), - BooleanBlock.newBlockBuilder(2).appendBoolean(true).appendBoolean(true).build(), - BooleanBlock.newBlockBuilder(2).appendBoolean(true).appendBoolean(true).build().filter(0, 1), - BooleanBlock.newBlockBuilder(2).appendBoolean(true).appendBoolean(true).appendBoolean(true).build().filter(0, 2), - BooleanBlock.newBlockBuilder(2).appendBoolean(true).appendNull().appendBoolean(true).build().filter(0, 2) + blockFactory.newBooleanArrayVector(new boolean[] { true, true }, 2).filter(0, 1).asBlock(), + blockFactory.newBooleanArrayVector(new boolean[] { true, true, false }, 2).filter(0, 1).asBlock(), + blockFactory.newBooleanArrayVector(new boolean[] { true, true, false }, 3).filter(0, 1).asBlock(), + blockFactory.newBooleanArrayVector(new boolean[] { true, false, true }, 3).filter(0, 2).asBlock(), + blockFactory.newConstantBooleanBlockWith(true, 2), + blockFactory.newBooleanBlockBuilder(2).appendBoolean(true).appendBoolean(true).build(), + blockFactory.newBooleanBlockBuilder(2).appendBoolean(true).appendBoolean(true).build().filter(0, 1), + blockFactory.newBooleanBlockBuilder(2).appendBoolean(true).appendBoolean(true).appendBoolean(true).build().filter(0, 2), + blockFactory.newBooleanBlockBuilder(2).appendBoolean(true).appendNull().appendBoolean(true).build().filter(0, 2) ); assertAllEquals(moreBlocks); } @@ -189,15 +203,15 @@ public void testBlockEquality() { public void testVectorInequality() { // all these vectors should NOT be equivalent List notEqualVectors = List.of( - new BooleanArrayVector(new boolean[] { true }, 1), - new BooleanArrayVector(new boolean[] { false }, 1), - new BooleanArrayVector(new boolean[] { true, false }, 2), - new BooleanArrayVector(new boolean[] { true, false, true }, 3), - new BooleanArrayVector(new boolean[] { false, true, false }, 3), - BooleanBlock.newConstantBlockWith(true, 2).asVector(), - BooleanBlock.newBlockBuilder(2).appendBoolean(false).appendBoolean(true).build().asVector(), - BooleanBlock.newBlockBuilder(3).appendBoolean(false).appendBoolean(false).appendBoolean(true).build().asVector(), - BooleanBlock.newBlockBuilder(1) + blockFactory.newBooleanArrayVector(new boolean[] { true }, 1), + blockFactory.newBooleanArrayVector(new boolean[] { false }, 1), + blockFactory.newBooleanArrayVector(new boolean[] { true, false }, 2), + blockFactory.newBooleanArrayVector(new boolean[] { true, false, true }, 3), + blockFactory.newBooleanArrayVector(new boolean[] { false, true, false }, 3), + blockFactory.newConstantBooleanBlockWith(true, 2).asVector(), + blockFactory.newBooleanBlockBuilder(2).appendBoolean(false).appendBoolean(true).build().asVector(), + blockFactory.newBooleanBlockBuilder(3).appendBoolean(false).appendBoolean(false).appendBoolean(true).build().asVector(), + blockFactory.newBooleanBlockBuilder(1) .appendBoolean(false) .appendBoolean(false) .appendBoolean(false) @@ -211,18 +225,28 @@ public void testVectorInequality() { public void testBlockInequality() { // all these blocks should NOT be equivalent List notEqualBlocks = List.of( - new BooleanArrayVector(new boolean[] { false }, 1).asBlock(), - new BooleanArrayVector(new boolean[] { true }, 1).asBlock(), - new BooleanArrayVector(new boolean[] { false, true }, 2).asBlock(), - new BooleanArrayVector(new boolean[] { false, true, false }, 3).asBlock(), - new BooleanArrayVector(new boolean[] { false, false, true }, 3).asBlock(), - BooleanBlock.newConstantBlockWith(true, 2), - BooleanBlock.newBlockBuilder(3).appendBoolean(true).appendBoolean(false).appendBoolean(false).build(), - BooleanBlock.newBlockBuilder(1).appendBoolean(true).appendBoolean(false).appendBoolean(true).appendBoolean(false).build(), - BooleanBlock.newBlockBuilder(1).appendBoolean(true).appendNull().build(), - BooleanBlock.newBlockBuilder(1).appendBoolean(true).appendNull().appendBoolean(false).build(), - BooleanBlock.newBlockBuilder(1).appendBoolean(true).appendBoolean(false).build(), - BooleanBlock.newBlockBuilder(3).appendBoolean(true).beginPositionEntry().appendBoolean(false).appendBoolean(false).build() + blockFactory.newBooleanArrayVector(new boolean[] { false }, 1).asBlock(), + blockFactory.newBooleanArrayVector(new boolean[] { true }, 1).asBlock(), + blockFactory.newBooleanArrayVector(new boolean[] { false, true }, 2).asBlock(), + blockFactory.newBooleanArrayVector(new boolean[] { false, true, false }, 3).asBlock(), + blockFactory.newBooleanArrayVector(new boolean[] { false, false, true }, 3).asBlock(), + blockFactory.newConstantBooleanBlockWith(true, 2), + blockFactory.newBooleanBlockBuilder(3).appendBoolean(true).appendBoolean(false).appendBoolean(false).build(), + blockFactory.newBooleanBlockBuilder(1) + .appendBoolean(true) + .appendBoolean(false) + .appendBoolean(true) + .appendBoolean(false) + .build(), + blockFactory.newBooleanBlockBuilder(1).appendBoolean(true).appendNull().build(), + blockFactory.newBooleanBlockBuilder(1).appendBoolean(true).appendNull().appendBoolean(false).build(), + blockFactory.newBooleanBlockBuilder(1).appendBoolean(true).appendBoolean(false).build(), + blockFactory.newBooleanBlockBuilder(3) + .appendBoolean(true) + .beginPositionEntry() + .appendBoolean(false) + .appendBoolean(false) + .build() ); assertAllNotEquals(notEqualBlocks); } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BytesRefBlockEqualityTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BytesRefBlockEqualityTests.java index ee654497c1ec3..ec740db329c74 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BytesRefBlockEqualityTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BytesRefBlockEqualityTests.java @@ -12,27 +12,28 @@ import org.elasticsearch.common.util.BytesRefArray; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.compute.operator.ComputeTestCase; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; -import org.elasticsearch.test.ESTestCase; import java.util.Arrays; import java.util.BitSet; import java.util.List; -public class BytesRefBlockEqualityTests extends ESTestCase { +public class BytesRefBlockEqualityTests extends ComputeTestCase { final BigArrays bigArrays = new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, new NoneCircuitBreakerService()); + final BlockFactory blockFactory = TestBlockFactory.getNonBreakingInstance(); public void testEmptyVector() { // all these "empty" vectors should be equivalent try (var bytesRefArray1 = new BytesRefArray(0, bigArrays); var bytesRefArray2 = new BytesRefArray(1, bigArrays)) { List vectors = List.of( - new BytesRefArrayVector(bytesRefArray1, 0), - new BytesRefArrayVector(bytesRefArray2, 0), - BytesRefBlock.newConstantBlockWith(new BytesRef(), 0).asVector(), - BytesRefBlock.newConstantBlockWith(new BytesRef(), 0).filter().asVector(), - BytesRefBlock.newBlockBuilder(0).build().asVector(), - BytesRefBlock.newBlockBuilder(0).appendBytesRef(new BytesRef()).build().asVector().filter() + new BytesRefArrayVector(bytesRefArray1, 0, blockFactory), + new BytesRefArrayVector(bytesRefArray2, 0, blockFactory), + blockFactory.newConstantBytesRefBlockWith(new BytesRef(), 0).asVector(), + blockFactory.newConstantBytesRefBlockWith(new BytesRef(), 0).filter().asVector(), + blockFactory.newBytesRefBlockBuilder(0).build().asVector(), + blockFactory.newBytesRefBlockBuilder(0).appendBytesRef(new BytesRef()).build().asVector().filter() ); assertAllEquals(vectors); } @@ -47,19 +48,21 @@ public void testEmptyBlock() { 0, new int[] {}, BitSet.valueOf(new byte[] { 0b00 }), - randomFrom(Block.MvOrdering.values()) + randomFrom(Block.MvOrdering.values()), + blockFactory ), new BytesRefArrayBlock( bytesRefArray2, 0, new int[] {}, BitSet.valueOf(new byte[] { 0b00 }), - randomFrom(Block.MvOrdering.values()) + randomFrom(Block.MvOrdering.values()), + blockFactory ), - BytesRefBlock.newConstantBlockWith(new BytesRef(), 0), - BytesRefBlock.newBlockBuilder(0).build(), - BytesRefBlock.newBlockBuilder(0).appendBytesRef(new BytesRef()).build().filter(), - BytesRefBlock.newBlockBuilder(0).appendNull().build().filter() + blockFactory.newConstantBytesRefBlockWith(new BytesRef(), 0), + blockFactory.newBytesRefBlockBuilder(0).build(), + blockFactory.newBytesRefBlockBuilder(0).appendBytesRef(new BytesRef()).build().filter(), + blockFactory.newBytesRefBlockBuilder(0).appendNull().build().filter() ); assertAllEquals(blocks); } @@ -69,25 +72,25 @@ public void testVectorEquality() { // all these vectors should be equivalent try (var bytesRefArray1 = arrayOf("1", "2", "3"); var bytesRefArray2 = arrayOf("1", "2", "3", "4")) { List vectors = List.of( - new BytesRefArrayVector(bytesRefArray1, 3), - new BytesRefArrayVector(bytesRefArray1, 3).asBlock().asVector(), - new BytesRefArrayVector(bytesRefArray2, 3), - new BytesRefArrayVector(bytesRefArray1, 3).filter(0, 1, 2), - new BytesRefArrayVector(bytesRefArray2, 4).filter(0, 1, 2), - BytesRefBlock.newBlockBuilder(3) + new BytesRefArrayVector(bytesRefArray1, 3, blockFactory), + new BytesRefArrayVector(bytesRefArray1, 3, blockFactory).asBlock().asVector(), + new BytesRefArrayVector(bytesRefArray2, 3, blockFactory), + new BytesRefArrayVector(bytesRefArray1, 3, blockFactory).filter(0, 1, 2), + new BytesRefArrayVector(bytesRefArray2, 4, blockFactory).filter(0, 1, 2), + blockFactory.newBytesRefBlockBuilder(3) .appendBytesRef(new BytesRef("1")) .appendBytesRef(new BytesRef("2")) .appendBytesRef(new BytesRef("3")) .build() .asVector(), - BytesRefBlock.newBlockBuilder(3) + blockFactory.newBytesRefBlockBuilder(3) .appendBytesRef(new BytesRef("1")) .appendBytesRef(new BytesRef("2")) .appendBytesRef(new BytesRef("3")) .build() .asVector() .filter(0, 1, 2), - BytesRefBlock.newBlockBuilder(3) + blockFactory.newBytesRefBlockBuilder(3) .appendBytesRef(new BytesRef("1")) .appendBytesRef(new BytesRef("4")) .appendBytesRef(new BytesRef("2")) @@ -95,7 +98,7 @@ public void testVectorEquality() { .build() .filter(0, 2, 3) .asVector(), - BytesRefBlock.newBlockBuilder(3) + blockFactory.newBytesRefBlockBuilder(3) .appendBytesRef(new BytesRef("1")) .appendBytesRef(new BytesRef("4")) .appendBytesRef(new BytesRef("2")) @@ -110,26 +113,26 @@ public void testVectorEquality() { // all these constant-like vectors should be equivalent try (var bytesRefArray1 = arrayOf("1", "1", "1"); var bytesRefArray2 = arrayOf("1", "1", "1", "4")) { List moreVectors = List.of( - new BytesRefArrayVector(bytesRefArray1, 3), - new BytesRefArrayVector(bytesRefArray1, 3).asBlock().asVector(), - new BytesRefArrayVector(bytesRefArray2, 3), - new BytesRefArrayVector(bytesRefArray1, 3).filter(0, 1, 2), - new BytesRefArrayVector(bytesRefArray2, 4).filter(0, 1, 2), - BytesRefBlock.newConstantBlockWith(new BytesRef("1"), 3).asVector(), - BytesRefBlock.newBlockBuilder(3) + new BytesRefArrayVector(bytesRefArray1, 3, blockFactory), + new BytesRefArrayVector(bytesRefArray1, 3, blockFactory).asBlock().asVector(), + new BytesRefArrayVector(bytesRefArray2, 3, blockFactory), + new BytesRefArrayVector(bytesRefArray1, 3, blockFactory).filter(0, 1, 2), + new BytesRefArrayVector(bytesRefArray2, 4, blockFactory).filter(0, 1, 2), + blockFactory.newConstantBytesRefBlockWith(new BytesRef("1"), 3).asVector(), + blockFactory.newBytesRefBlockBuilder(3) .appendBytesRef(new BytesRef("1")) .appendBytesRef(new BytesRef("1")) .appendBytesRef(new BytesRef("1")) .build() .asVector(), - BytesRefBlock.newBlockBuilder(3) + blockFactory.newBytesRefBlockBuilder(3) .appendBytesRef(new BytesRef("1")) .appendBytesRef(new BytesRef("1")) .appendBytesRef(new BytesRef("1")) .build() .asVector() .filter(0, 1, 2), - BytesRefBlock.newBlockBuilder(3) + blockFactory.newBytesRefBlockBuilder(3) .appendBytesRef(new BytesRef("1")) .appendBytesRef(new BytesRef("4")) .appendBytesRef(new BytesRef("1")) @@ -137,7 +140,7 @@ public void testVectorEquality() { .build() .filter(0, 2, 3) .asVector(), - BytesRefBlock.newBlockBuilder(3) + blockFactory.newBytesRefBlockBuilder(3) .appendBytesRef(new BytesRef("1")) .appendBytesRef(new BytesRef("4")) .appendBytesRef(new BytesRef("1")) @@ -154,43 +157,45 @@ public void testBlockEquality() { // all these blocks should be equivalent try (var bytesRefArray1 = arrayOf("1", "2", "3"); var bytesRefArray2 = arrayOf("1", "2", "3", "4")) { List blocks = List.of( - new BytesRefArrayVector(bytesRefArray1, 3).asBlock(), + new BytesRefArrayVector(bytesRefArray1, 3, blockFactory).asBlock(), new BytesRefArrayBlock( bytesRefArray1, 3, new int[] { 0, 1, 2, 3 }, BitSet.valueOf(new byte[] { 0b000 }), - randomFrom(Block.MvOrdering.values()) + randomFrom(Block.MvOrdering.values()), + blockFactory ), new BytesRefArrayBlock( bytesRefArray2, 3, new int[] { 0, 1, 2, 3 }, BitSet.valueOf(new byte[] { 0b1000 }), - randomFrom(Block.MvOrdering.values()) + randomFrom(Block.MvOrdering.values()), + blockFactory ), - new BytesRefArrayVector(bytesRefArray1, 3).filter(0, 1, 2).asBlock(), - new BytesRefArrayVector(bytesRefArray2, 3).filter(0, 1, 2).asBlock(), - new BytesRefArrayVector(bytesRefArray2, 4).filter(0, 1, 2).asBlock(), - BytesRefBlock.newBlockBuilder(3) + new BytesRefArrayVector(bytesRefArray1, 3, blockFactory).filter(0, 1, 2).asBlock(), + new BytesRefArrayVector(bytesRefArray2, 3, blockFactory).filter(0, 1, 2).asBlock(), + new BytesRefArrayVector(bytesRefArray2, 4, blockFactory).filter(0, 1, 2).asBlock(), + blockFactory.newBytesRefBlockBuilder(3) .appendBytesRef(new BytesRef("1")) .appendBytesRef(new BytesRef("2")) .appendBytesRef(new BytesRef("3")) .build(), - BytesRefBlock.newBlockBuilder(3) + blockFactory.newBytesRefBlockBuilder(3) .appendBytesRef(new BytesRef("1")) .appendBytesRef(new BytesRef("2")) .appendBytesRef(new BytesRef("3")) .build() .filter(0, 1, 2), - BytesRefBlock.newBlockBuilder(3) + blockFactory.newBytesRefBlockBuilder(3) .appendBytesRef(new BytesRef("1")) .appendBytesRef(new BytesRef("4")) .appendBytesRef(new BytesRef("2")) .appendBytesRef(new BytesRef("3")) .build() .filter(0, 2, 3), - BytesRefBlock.newBlockBuilder(3) + blockFactory.newBytesRefBlockBuilder(3) .appendBytesRef(new BytesRef("1")) .appendNull() .appendBytesRef(new BytesRef("2")) @@ -204,34 +209,40 @@ public void testBlockEquality() { // all these constant-like blocks should be equivalent try (var bytesRefArray1 = arrayOf("9", "9"); var bytesRefArray2 = arrayOf("9", "9", "4")) { List moreBlocks = List.of( - new BytesRefArrayVector(bytesRefArray1, 2).asBlock(), + new BytesRefArrayVector(bytesRefArray1, 2, blockFactory).asBlock(), new BytesRefArrayBlock( bytesRefArray1, 2, new int[] { 0, 1, 2 }, BitSet.valueOf(new byte[] { 0b000 }), - randomFrom(Block.MvOrdering.values()) + randomFrom(Block.MvOrdering.values()), + blockFactory ), new BytesRefArrayBlock( bytesRefArray2, 2, new int[] { 0, 1, 2 }, BitSet.valueOf(new byte[] { 0b100 }), - randomFrom(Block.MvOrdering.values()) + randomFrom(Block.MvOrdering.values()), + blockFactory ), - new BytesRefArrayVector(bytesRefArray1, 2).filter(0, 1).asBlock(), - new BytesRefArrayVector(bytesRefArray2, 2).filter(0, 1).asBlock(), - new BytesRefArrayVector(bytesRefArray2, 3).filter(0, 1).asBlock(), - BytesRefBlock.newConstantBlockWith(new BytesRef("9"), 2), - BytesRefBlock.newBlockBuilder(2).appendBytesRef(new BytesRef("9")).appendBytesRef(new BytesRef("9")).build(), - BytesRefBlock.newBlockBuilder(2).appendBytesRef(new BytesRef("9")).appendBytesRef(new BytesRef("9")).build().filter(0, 1), - BytesRefBlock.newBlockBuilder(2) + new BytesRefArrayVector(bytesRefArray1, 2, blockFactory).filter(0, 1).asBlock(), + new BytesRefArrayVector(bytesRefArray2, 2, blockFactory).filter(0, 1).asBlock(), + new BytesRefArrayVector(bytesRefArray2, 3, blockFactory).filter(0, 1).asBlock(), + blockFactory.newConstantBytesRefBlockWith(new BytesRef("9"), 2), + blockFactory.newBytesRefBlockBuilder(2).appendBytesRef(new BytesRef("9")).appendBytesRef(new BytesRef("9")).build(), + blockFactory.newBytesRefBlockBuilder(2) + .appendBytesRef(new BytesRef("9")) + .appendBytesRef(new BytesRef("9")) + .build() + .filter(0, 1), + blockFactory.newBytesRefBlockBuilder(2) .appendBytesRef(new BytesRef("9")) .appendBytesRef(new BytesRef("4")) .appendBytesRef(new BytesRef("9")) .build() .filter(0, 2), - BytesRefBlock.newBlockBuilder(2) + blockFactory.newBytesRefBlockBuilder(2) .appendBytesRef(new BytesRef("9")) .appendNull() .appendBytesRef(new BytesRef("9")) @@ -252,25 +263,25 @@ public void testVectorInequality() { var bytesRefArray5 = arrayOf("1", "2", "4") ) { List notEqualVectors = List.of( - new BytesRefArrayVector(bytesRefArray1, 1), - new BytesRefArrayVector(bytesRefArray2, 1), - new BytesRefArrayVector(bytesRefArray3, 2), - new BytesRefArrayVector(bytesRefArray4, 3), - new BytesRefArrayVector(bytesRefArray5, 3), - BytesRefBlock.newConstantBlockWith(new BytesRef("9"), 2).asVector(), - BytesRefBlock.newBlockBuilder(2) + new BytesRefArrayVector(bytesRefArray1, 1, blockFactory), + new BytesRefArrayVector(bytesRefArray2, 1, blockFactory), + new BytesRefArrayVector(bytesRefArray3, 2, blockFactory), + new BytesRefArrayVector(bytesRefArray4, 3, blockFactory), + new BytesRefArrayVector(bytesRefArray5, 3, blockFactory), + blockFactory.newConstantBytesRefBlockWith(new BytesRef("9"), 2).asVector(), + blockFactory.newBytesRefBlockBuilder(2) .appendBytesRef(new BytesRef("1")) .appendBytesRef(new BytesRef("2")) .build() .asVector() .filter(1), - BytesRefBlock.newBlockBuilder(3) + blockFactory.newBytesRefBlockBuilder(3) .appendBytesRef(new BytesRef("1")) .appendBytesRef(new BytesRef("2")) .appendBytesRef(new BytesRef("5")) .build() .asVector(), - BytesRefBlock.newBlockBuilder(1) + blockFactory.newBytesRefBlockBuilder(1) .appendBytesRef(new BytesRef("1")) .appendBytesRef(new BytesRef("2")) .appendBytesRef(new BytesRef("3")) @@ -292,27 +303,35 @@ public void testBlockInequality() { var bytesRefArray5 = arrayOf("1", "2", "4") ) { List notEqualBlocks = List.of( - new BytesRefArrayVector(bytesRefArray1, 1).asBlock(), - new BytesRefArrayVector(bytesRefArray2, 1).asBlock(), - new BytesRefArrayVector(bytesRefArray3, 2).asBlock(), - new BytesRefArrayVector(bytesRefArray4, 3).asBlock(), - new BytesRefArrayVector(bytesRefArray5, 3).asBlock(), - BytesRefBlock.newConstantBlockWith(new BytesRef("9"), 2), - BytesRefBlock.newBlockBuilder(2).appendBytesRef(new BytesRef("1")).appendBytesRef(new BytesRef("2")).build().filter(1), - BytesRefBlock.newBlockBuilder(3) + new BytesRefArrayVector(bytesRefArray1, 1, blockFactory).asBlock(), + new BytesRefArrayVector(bytesRefArray2, 1, blockFactory).asBlock(), + new BytesRefArrayVector(bytesRefArray3, 2, blockFactory).asBlock(), + new BytesRefArrayVector(bytesRefArray4, 3, blockFactory).asBlock(), + new BytesRefArrayVector(bytesRefArray5, 3, blockFactory).asBlock(), + blockFactory.newConstantBytesRefBlockWith(new BytesRef("9"), 2), + blockFactory.newBytesRefBlockBuilder(2) + .appendBytesRef(new BytesRef("1")) + .appendBytesRef(new BytesRef("2")) + .build() + .filter(1), + blockFactory.newBytesRefBlockBuilder(3) .appendBytesRef(new BytesRef("1")) .appendBytesRef(new BytesRef("2")) .appendBytesRef(new BytesRef("5")) .build(), - BytesRefBlock.newBlockBuilder(1) + blockFactory.newBytesRefBlockBuilder(1) .appendBytesRef(new BytesRef("1")) .appendBytesRef(new BytesRef("2")) .appendBytesRef(new BytesRef("3")) .appendBytesRef(new BytesRef("4")) .build(), - BytesRefBlock.newBlockBuilder(1).appendBytesRef(new BytesRef("1")).appendNull().build(), - BytesRefBlock.newBlockBuilder(1).appendBytesRef(new BytesRef("1")).appendNull().appendBytesRef(new BytesRef("3")).build(), - BytesRefBlock.newBlockBuilder(1).appendBytesRef(new BytesRef("1")).appendBytesRef(new BytesRef("3")).build() + blockFactory.newBytesRefBlockBuilder(1).appendBytesRef(new BytesRef("1")).appendNull().build(), + blockFactory.newBytesRefBlockBuilder(1) + .appendBytesRef(new BytesRef("1")) + .appendNull() + .appendBytesRef(new BytesRef("3")) + .build(), + blockFactory.newBytesRefBlockBuilder(1).appendBytesRef(new BytesRef("1")).appendBytesRef(new BytesRef("3")).build() ); assertAllNotEquals(notEqualBlocks); } @@ -320,8 +339,12 @@ public void testBlockInequality() { public void testSimpleBlockWithSingleNull() { List blocks = List.of( - BytesRefBlock.newBlockBuilder(3).appendBytesRef(new BytesRef("1")).appendNull().appendBytesRef(new BytesRef("3")).build(), - BytesRefBlock.newBlockBuilder(3).appendBytesRef(new BytesRef("1")).appendNull().appendBytesRef(new BytesRef("3")).build() + blockFactory.newBytesRefBlockBuilder(3) + .appendBytesRef(new BytesRef("1")) + .appendNull() + .appendBytesRef(new BytesRef("3")) + .build(), + blockFactory.newBytesRefBlockBuilder(3).appendBytesRef(new BytesRef("1")).appendNull().appendBytesRef(new BytesRef("3")).build() ); assertEquals(3, blocks.get(0).getPositionCount()); assertTrue(blocks.get(0).isNull(1)); @@ -331,8 +354,8 @@ public void testSimpleBlockWithSingleNull() { public void testSimpleBlockWithManyNulls() { int positions = randomIntBetween(1, 256); boolean grow = randomBoolean(); - BytesRefBlock.Builder builder1 = BytesRefBlock.newBlockBuilder(grow ? 0 : positions); - BytesRefBlock.Builder builder2 = BytesRefBlock.newBlockBuilder(grow ? 0 : positions); + BytesRefBlock.Builder builder1 = blockFactory.newBytesRefBlockBuilder(grow ? 0 : positions); + BytesRefBlock.Builder builder2 = blockFactory.newBytesRefBlockBuilder(grow ? 0 : positions); for (int p = 0; p < positions; p++) { builder1.appendNull(); builder2.appendNull(); @@ -349,12 +372,12 @@ public void testSimpleBlockWithManyNulls() { public void testSimpleBlockWithSingleMultiValue() { List blocks = List.of( - BytesRefBlock.newBlockBuilder(1) + blockFactory.newBytesRefBlockBuilder(1) .beginPositionEntry() .appendBytesRef(new BytesRef("1a")) .appendBytesRef(new BytesRef("2b")) .build(), - BytesRefBlock.newBlockBuilder(1) + blockFactory.newBytesRefBlockBuilder(1) .beginPositionEntry() .appendBytesRef(new BytesRef("1a")) .appendBytesRef(new BytesRef("2b")) @@ -368,9 +391,9 @@ public void testSimpleBlockWithSingleMultiValue() { public void testSimpleBlockWithManyMultiValues() { int positions = randomIntBetween(1, 256); boolean grow = randomBoolean(); - BytesRefBlock.Builder builder1 = BytesRefBlock.newBlockBuilder(grow ? 0 : positions); - BytesRefBlock.Builder builder2 = BytesRefBlock.newBlockBuilder(grow ? 0 : positions); - BytesRefBlock.Builder builder3 = BytesRefBlock.newBlockBuilder(grow ? 0 : positions); + BytesRefBlock.Builder builder1 = blockFactory.newBytesRefBlockBuilder(grow ? 0 : positions); + BytesRefBlock.Builder builder2 = blockFactory.newBytesRefBlockBuilder(grow ? 0 : positions); + BytesRefBlock.Builder builder3 = blockFactory.newBytesRefBlockBuilder(grow ? 0 : positions); for (int pos = 0; pos < positions; pos++) { builder1.beginPositionEntry(); builder2.beginPositionEntry(); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/DocVectorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/DocVectorTests.java index e2eff15fcb769..7adf975c2b6d7 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/DocVectorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/DocVectorTests.java @@ -8,9 +8,10 @@ package org.elasticsearch.compute.data; import org.elasticsearch.common.Randomness; +import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.compute.operator.ComputeTestCase; import org.elasticsearch.core.Releasables; -import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; import java.util.Collections; @@ -21,7 +22,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; -public class DocVectorTests extends ESTestCase { +public class DocVectorTests extends ComputeTestCase { public void testNonDecreasingSetTrue() { int length = between(1, 100); DocVector docs = new DocVector(intRange(0, length), intRange(0, length), intRange(0, length), true); @@ -29,28 +30,64 @@ public void testNonDecreasingSetTrue() { } public void testNonDecreasingSetFalse() { - DocVector docs = new DocVector(intRange(0, 2), intRange(0, 2), new IntArrayVector(new int[] { 1, 0 }, 2), false); + BlockFactory blockFactory = blockFactory(); + DocVector docs = new DocVector(intRange(0, 2), intRange(0, 2), blockFactory.newIntArrayVector(new int[] { 1, 0 }, 2), false); assertFalse(docs.singleSegmentNonDecreasing()); + docs.close(); } public void testNonDecreasingNonConstantShard() { - DocVector docs = new DocVector(intRange(0, 2), IntBlock.newConstantBlockWith(0, 2).asVector(), intRange(0, 2), null); + BlockFactory blockFactory = blockFactory(); + DocVector docs = new DocVector(intRange(0, 2), blockFactory.newConstantIntVector(0, 2), intRange(0, 2), null); assertFalse(docs.singleSegmentNonDecreasing()); + docs.close(); } public void testNonDecreasingNonConstantSegment() { - DocVector docs = new DocVector(IntBlock.newConstantBlockWith(0, 2).asVector(), intRange(0, 2), intRange(0, 2), null); + BlockFactory blockFactory = blockFactory(); + DocVector docs = new DocVector(blockFactory.newConstantIntVector(0, 2), intRange(0, 2), intRange(0, 2), null); assertFalse(docs.singleSegmentNonDecreasing()); + docs.close(); } public void testNonDecreasingDescendingDocs() { + BlockFactory blockFactory = blockFactory(); DocVector docs = new DocVector( - IntBlock.newConstantBlockWith(0, 2).asVector(), - IntBlock.newConstantBlockWith(0, 2).asVector(), - new IntArrayVector(new int[] { 1, 0 }, 2), + blockFactory.newConstantIntVector(0, 2), + blockFactory.newConstantIntVector(0, 2), + blockFactory.newIntArrayVector(new int[] { 1, 0 }, 2), null ); assertFalse(docs.singleSegmentNonDecreasing()); + docs.close(); + } + + private static int MAX_BUILD_BREAKS_LIMIT = 1391; + + public void testBuildBreaks() { + testBuildBreaks(ByteSizeValue.ofBytes(between(0, MAX_BUILD_BREAKS_LIMIT))); + } + + public void testBuildBreaksMax() { + testBuildBreaks(ByteSizeValue.ofBytes(MAX_BUILD_BREAKS_LIMIT)); + } + + private void testBuildBreaks(ByteSizeValue limit) { + int size = 100; + BlockFactory blockFactory = blockFactory(limit); + Exception e = expectThrows(CircuitBreakingException.class, () -> { + try (DocBlock.Builder builder = DocBlock.newBlockBuilder(blockFactory, size)) { + for (int r = 0; r < size; r++) { + builder.appendShard(3 - size % 4); + builder.appendSegment(size % 10); + builder.appendDoc(size); + } + builder.build().close(); + } + }); + assertThat(e.getMessage(), equalTo("over test limit")); + logger.info("break position", e); + assertThat(blockFactory.breaker().getUsed(), equalTo(0L)); } public void testShardSegmentDocMap() { @@ -99,15 +136,17 @@ public void testRandomShardSegmentDocMap() { } private void assertShardSegmentDocMap(int[][] data, int[][] expected) { - BlockFactory blockFactory = BlockFactoryTests.blockFactory(ByteSizeValue.ofGb(1)); - try (DocBlock.Builder builder = DocBlock.newBlockBuilder(data.length, blockFactory)) { + BlockFactory blockFactory = blockFactory(); + try (DocBlock.Builder builder = DocBlock.newBlockBuilder(blockFactory, data.length)) { for (int r = 0; r < data.length; r++) { builder.appendShard(data[r][0]); builder.appendSegment(data[r][1]); builder.appendDoc(data[r][2]); } try (DocVector docVector = builder.build().asVector()) { + assertThat(blockFactory.breaker().getUsed(), equalTo(docVector.ramBytesUsed())); int[] forwards = docVector.shardSegmentDocMapForwards(); + assertThat(blockFactory.breaker().getUsed(), equalTo(docVector.ramBytesUsed())); int[][] result = new int[docVector.getPositionCount()][]; for (int p = 0; p < result.length; p++) { @@ -132,8 +171,38 @@ private void assertShardSegmentDocMap(int[][] data, int[][] expected) { assertThat(blockFactory.breaker().getUsed(), equalTo(0L)); } + // TODO these are really difficult to maintain. can we figure these out of the fly? + private static final int MAX_SHARD_SEGMENT_DOC_MAP_BREAKS = 2220; + + public void testShardSegmentDocMapBreaks() { + testShardSegmentDocMapBreaks(ByteSizeValue.ofBytes(between(MAX_BUILD_BREAKS_LIMIT + 1, MAX_SHARD_SEGMENT_DOC_MAP_BREAKS))); + } + + public void testShardSegmentDocMapBreaksMax() { + testShardSegmentDocMapBreaks(ByteSizeValue.ofBytes(MAX_SHARD_SEGMENT_DOC_MAP_BREAKS)); + } + + private void testShardSegmentDocMapBreaks(ByteSizeValue limit) { + int size = 100; + BlockFactory blockFactory = blockFactory(limit); + try (DocBlock.Builder builder = DocBlock.newBlockBuilder(blockFactory, size)) { + for (int r = 0; r < size; r++) { + builder.appendShard(3 - size % 4); + builder.appendSegment(size % 10); + builder.appendDoc(size); + } + try (DocBlock docBlock = builder.build()) { + Exception e = expectThrows(CircuitBreakingException.class, docBlock.asVector()::shardSegmentDocMapForwards); + assertThat(e.getMessage(), equalTo("over test limit")); + logger.info("broke at", e); + } + } + assertThat(blockFactory.breaker().getUsed(), equalTo(0L)); + } + public void testCannotDoubleRelease() { - var block = new DocVector(intRange(0, 2), IntBlock.newConstantBlockWith(0, 2).asVector(), intRange(0, 2), null).asBlock(); + BlockFactory blockFactory = blockFactory(); + var block = new DocVector(intRange(0, 2), blockFactory.newConstantIntBlockWith(0, 2).asVector(), intRange(0, 2), null).asBlock(); assertThat(block.isReleased(), is(false)); Page page = new Page(block); @@ -141,7 +210,7 @@ public void testCannotDoubleRelease() { assertThat(block.isReleased(), is(true)); Exception e = expectThrows(IllegalStateException.class, () -> block.close()); - assertThat(e.getMessage(), containsString("can't release already released block")); + assertThat(e.getMessage(), containsString("can't release already released object")); e = expectThrows(IllegalStateException.class, () -> page.getBlock(0)); assertThat(e.getMessage(), containsString("can't read released block")); @@ -151,17 +220,55 @@ public void testCannotDoubleRelease() { } public void testRamBytesUsedWithout() { + BlockFactory blockFactory = blockFactory(); DocVector docs = new DocVector( - IntBlock.newConstantBlockWith(0, 1).asVector(), - IntBlock.newConstantBlockWith(0, 1).asVector(), - IntBlock.newConstantBlockWith(0, 1).asVector(), + blockFactory.newConstantIntBlockWith(0, 1).asVector(), + blockFactory.newConstantIntBlockWith(0, 1).asVector(), + blockFactory.newConstantIntBlockWith(0, 1).asVector(), false ); assertThat(docs.singleSegmentNonDecreasing(), is(false)); docs.ramBytesUsed(); // ensure non-singleSegmentNonDecreasing handles nulls in ramByteUsed + docs.close(); + } + + public void testFilter() { + BlockFactory factory = blockFactory(); + try ( + DocVector docs = new DocVector( + factory.newConstantIntVector(0, 10), + factory.newConstantIntVector(0, 10), + factory.newIntArrayVector(new int[] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }, 10), + false + ); + DocVector filtered = docs.filter(1, 2, 3); + DocVector expected = new DocVector( + factory.newConstantIntVector(0, 3), + factory.newConstantIntVector(0, 3), + factory.newIntArrayVector(new int[] { 1, 2, 3 }, 3), + false + ); + ) { + assertThat(filtered, equalTo(expected)); + } + } + + public void testFilterBreaks() { + BlockFactory factory = blockFactory(ByteSizeValue.ofBytes(between(250, 370))); + try ( + DocVector docs = new DocVector( + factory.newConstantIntVector(0, 10), + factory.newConstantIntVector(0, 10), + factory.newIntArrayVector(new int[] { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 }, 10), + false + ) + ) { + Exception e = expectThrows(CircuitBreakingException.class, () -> docs.filter(1, 2, 3)); + assertThat(e.getMessage(), equalTo("over test limit")); + } } IntVector intRange(int startInclusive, int endExclusive) { - return IntVector.range(startInclusive, endExclusive, BlockFactory.getNonBreakingInstance()); + return IntVector.range(startInclusive, endExclusive, TestBlockFactory.getNonBreakingInstance()); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/DoubleBlockEqualityTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/DoubleBlockEqualityTests.java index 7dda97f52834e..e8f8fbcbf1c4c 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/DoubleBlockEqualityTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/DoubleBlockEqualityTests.java @@ -7,22 +7,25 @@ package org.elasticsearch.compute.data; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.compute.operator.ComputeTestCase; +import org.elasticsearch.core.Releasables; import java.util.BitSet; import java.util.List; -public class DoubleBlockEqualityTests extends ESTestCase { +public class DoubleBlockEqualityTests extends ComputeTestCase { + + static final BlockFactory blockFactory = TestBlockFactory.getNonBreakingInstance(); public void testEmptyVector() { // all these "empty" vectors should be equivalent List vectors = List.of( - new DoubleArrayVector(new double[] {}, 0), - new DoubleArrayVector(new double[] { 0 }, 0), - DoubleBlock.newConstantBlockWith(0, 0).asVector(), - DoubleBlock.newConstantBlockWith(0, 0).filter().asVector(), - DoubleBlock.newBlockBuilder(0).build().asVector(), - DoubleBlock.newBlockBuilder(0).appendDouble(1).build().asVector().filter() + blockFactory.newDoubleArrayVector(new double[] {}, 0), + blockFactory.newDoubleArrayVector(new double[] { 0 }, 0), + blockFactory.newConstantDoubleVector(0, 0), + blockFactory.newConstantDoubleBlockWith(0, 0).filter().asVector(), + blockFactory.newDoubleBlockBuilder(0).build().asVector(), + blockFactory.newDoubleBlockBuilder(0).appendDouble(1).build().asVector().filter() ); assertAllEquals(vectors); } @@ -30,41 +33,42 @@ public void testEmptyVector() { public void testEmptyBlock() { // all these "empty" vectors should be equivalent List blocks = List.of( - new DoubleArrayBlock( + blockFactory.newDoubleArrayBlock( new double[] {}, 0, new int[] {}, BitSet.valueOf(new byte[] { 0b00 }), randomFrom(Block.MvOrdering.values()) ), - new DoubleArrayBlock( + blockFactory.newDoubleArrayBlock( new double[] { 0 }, 0, new int[] {}, BitSet.valueOf(new byte[] { 0b00 }), randomFrom(Block.MvOrdering.values()) ), - DoubleBlock.newConstantBlockWith(0, 0), - DoubleBlock.newBlockBuilder(0).build(), - DoubleBlock.newBlockBuilder(0).appendDouble(1).build().filter(), - DoubleBlock.newBlockBuilder(0).appendNull().build().filter() + blockFactory.newConstantDoubleBlockWith(0, 0), + blockFactory.newDoubleBlockBuilder(0).build(), + blockFactory.newDoubleBlockBuilder(0).appendDouble(1).build().filter(), + blockFactory.newDoubleBlockBuilder(0).appendNull().build().filter() ); assertAllEquals(blocks); + Releasables.close(blocks); } public void testVectorEquality() { // all these vectors should be equivalent List vectors = List.of( - new DoubleArrayVector(new double[] { 1, 2, 3 }, 3), - new DoubleArrayVector(new double[] { 1, 2, 3 }, 3).asBlock().asVector(), - new DoubleArrayVector(new double[] { 1, 2, 3, 4 }, 3), - new DoubleArrayVector(new double[] { 1, 2, 3 }, 3).filter(0, 1, 2), - new DoubleArrayVector(new double[] { 1, 2, 3, 4 }, 4).filter(0, 1, 2), - new DoubleArrayVector(new double[] { 0, 1, 2, 3 }, 4).filter(1, 2, 3), - new DoubleArrayVector(new double[] { 1, 4, 2, 3 }, 4).filter(0, 2, 3), - DoubleBlock.newBlockBuilder(3).appendDouble(1).appendDouble(2).appendDouble(3).build().asVector(), - DoubleBlock.newBlockBuilder(3).appendDouble(1).appendDouble(2).appendDouble(3).build().asVector().filter(0, 1, 2), - DoubleBlock.newBlockBuilder(3) + blockFactory.newDoubleArrayVector(new double[] { 1, 2, 3 }, 3), + blockFactory.newDoubleArrayVector(new double[] { 1, 2, 3 }, 3).asBlock().asVector(), + blockFactory.newDoubleArrayVector(new double[] { 1, 2, 3, 4 }, 3), + blockFactory.newDoubleArrayVector(new double[] { 1, 2, 3 }, 3).filter(0, 1, 2), + blockFactory.newDoubleArrayVector(new double[] { 1, 2, 3, 4 }, 4).filter(0, 1, 2), + blockFactory.newDoubleArrayVector(new double[] { 0, 1, 2, 3 }, 4).filter(1, 2, 3), + blockFactory.newDoubleArrayVector(new double[] { 1, 4, 2, 3 }, 4).filter(0, 2, 3), + blockFactory.newDoubleBlockBuilder(3).appendDouble(1).appendDouble(2).appendDouble(3).build().asVector(), + blockFactory.newDoubleBlockBuilder(3).appendDouble(1).appendDouble(2).appendDouble(3).build().asVector().filter(0, 1, 2), + blockFactory.newDoubleBlockBuilder(3) .appendDouble(1) .appendDouble(4) .appendDouble(2) @@ -72,7 +76,7 @@ public void testVectorEquality() { .build() .filter(0, 2, 3) .asVector(), - DoubleBlock.newBlockBuilder(3) + blockFactory.newDoubleBlockBuilder(3) .appendDouble(1) .appendDouble(4) .appendDouble(2) @@ -85,17 +89,17 @@ public void testVectorEquality() { // all these constant-like vectors should be equivalent List moreVectors = List.of( - new DoubleArrayVector(new double[] { 1, 1, 1 }, 3), - new DoubleArrayVector(new double[] { 1, 1, 1 }, 3).asBlock().asVector(), - new DoubleArrayVector(new double[] { 1, 1, 1, 1 }, 3), - new DoubleArrayVector(new double[] { 1, 1, 1 }, 3).filter(0, 1, 2), - new DoubleArrayVector(new double[] { 1, 1, 1, 4 }, 4).filter(0, 1, 2), - new DoubleArrayVector(new double[] { 3, 1, 1, 1 }, 4).filter(1, 2, 3), - new DoubleArrayVector(new double[] { 1, 4, 1, 1 }, 4).filter(0, 2, 3), - DoubleBlock.newConstantBlockWith(1, 3).asVector(), - DoubleBlock.newBlockBuilder(3).appendDouble(1).appendDouble(1).appendDouble(1).build().asVector(), - DoubleBlock.newBlockBuilder(3).appendDouble(1).appendDouble(1).appendDouble(1).build().asVector().filter(0, 1, 2), - DoubleBlock.newBlockBuilder(3) + blockFactory.newDoubleArrayVector(new double[] { 1, 1, 1 }, 3), + blockFactory.newDoubleArrayVector(new double[] { 1, 1, 1 }, 3).asBlock().asVector(), + blockFactory.newDoubleArrayVector(new double[] { 1, 1, 1, 1 }, 3), + blockFactory.newDoubleArrayVector(new double[] { 1, 1, 1 }, 3).filter(0, 1, 2), + blockFactory.newDoubleArrayVector(new double[] { 1, 1, 1, 4 }, 4).filter(0, 1, 2), + blockFactory.newDoubleArrayVector(new double[] { 3, 1, 1, 1 }, 4).filter(1, 2, 3), + blockFactory.newDoubleArrayVector(new double[] { 1, 4, 1, 1 }, 4).filter(0, 2, 3), + blockFactory.newConstantDoubleBlockWith(1, 3).asVector(), + blockFactory.newDoubleBlockBuilder(3).appendDouble(1).appendDouble(1).appendDouble(1).build().asVector(), + blockFactory.newDoubleBlockBuilder(3).appendDouble(1).appendDouble(1).appendDouble(1).build().asVector().filter(0, 1, 2), + blockFactory.newDoubleBlockBuilder(3) .appendDouble(1) .appendDouble(4) .appendDouble(1) @@ -103,7 +107,7 @@ public void testVectorEquality() { .build() .filter(0, 2, 3) .asVector(), - DoubleBlock.newBlockBuilder(3) + blockFactory.newDoubleBlockBuilder(3) .appendDouble(1) .appendDouble(4) .appendDouble(1) @@ -118,58 +122,62 @@ public void testVectorEquality() { public void testBlockEquality() { // all these blocks should be equivalent List blocks = List.of( - new DoubleArrayVector(new double[] { 1, 2, 3 }, 3).asBlock(), + blockFactory.newDoubleArrayVector(new double[] { 1, 2, 3 }, 3).asBlock(), new DoubleArrayBlock( new double[] { 1, 2, 3 }, 3, new int[] { 0, 1, 2, 3 }, BitSet.valueOf(new byte[] { 0b000 }), - randomFrom(Block.MvOrdering.values()) + randomFrom(Block.MvOrdering.values()), + blockFactory ), new DoubleArrayBlock( new double[] { 1, 2, 3, 4 }, 3, new int[] { 0, 1, 2, 3 }, BitSet.valueOf(new byte[] { 0b1000 }), - randomFrom(Block.MvOrdering.values()) + randomFrom(Block.MvOrdering.values()), + blockFactory ), - new DoubleArrayVector(new double[] { 1, 2, 3 }, 3).filter(0, 1, 2).asBlock(), - new DoubleArrayVector(new double[] { 1, 2, 3, 4 }, 3).filter(0, 1, 2).asBlock(), - new DoubleArrayVector(new double[] { 1, 2, 3, 4 }, 4).filter(0, 1, 2).asBlock(), - new DoubleArrayVector(new double[] { 1, 2, 4, 3 }, 4).filter(0, 1, 3).asBlock(), - DoubleBlock.newBlockBuilder(3).appendDouble(1).appendDouble(2).appendDouble(3).build(), - DoubleBlock.newBlockBuilder(3).appendDouble(1).appendDouble(2).appendDouble(3).build().filter(0, 1, 2), - DoubleBlock.newBlockBuilder(3).appendDouble(1).appendDouble(4).appendDouble(2).appendDouble(3).build().filter(0, 2, 3), - DoubleBlock.newBlockBuilder(3).appendDouble(1).appendNull().appendDouble(2).appendDouble(3).build().filter(0, 2, 3) + blockFactory.newDoubleArrayVector(new double[] { 1, 2, 3 }, 3).filter(0, 1, 2).asBlock(), + blockFactory.newDoubleArrayVector(new double[] { 1, 2, 3, 4 }, 3).filter(0, 1, 2).asBlock(), + blockFactory.newDoubleArrayVector(new double[] { 1, 2, 3, 4 }, 4).filter(0, 1, 2).asBlock(), + blockFactory.newDoubleArrayVector(new double[] { 1, 2, 4, 3 }, 4).filter(0, 1, 3).asBlock(), + blockFactory.newDoubleBlockBuilder(3).appendDouble(1).appendDouble(2).appendDouble(3).build(), + blockFactory.newDoubleBlockBuilder(3).appendDouble(1).appendDouble(2).appendDouble(3).build().filter(0, 1, 2), + blockFactory.newDoubleBlockBuilder(3).appendDouble(1).appendDouble(4).appendDouble(2).appendDouble(3).build().filter(0, 2, 3), + blockFactory.newDoubleBlockBuilder(3).appendDouble(1).appendNull().appendDouble(2).appendDouble(3).build().filter(0, 2, 3) ); assertAllEquals(blocks); // all these constant-like blocks should be equivalent List moreBlocks = List.of( - new DoubleArrayVector(new double[] { 9, 9 }, 2).asBlock(), + blockFactory.newDoubleArrayVector(new double[] { 9, 9 }, 2).asBlock(), new DoubleArrayBlock( new double[] { 9, 9 }, 2, new int[] { 0, 1, 2 }, BitSet.valueOf(new byte[] { 0b000 }), - randomFrom(Block.MvOrdering.values()) + randomFrom(Block.MvOrdering.values()), + blockFactory ), new DoubleArrayBlock( new double[] { 9, 9, 4 }, 2, new int[] { 0, 1, 2 }, BitSet.valueOf(new byte[] { 0b100 }), - randomFrom(Block.MvOrdering.values()) + randomFrom(Block.MvOrdering.values()), + blockFactory ), - new DoubleArrayVector(new double[] { 9, 9 }, 2).filter(0, 1).asBlock(), - new DoubleArrayVector(new double[] { 9, 9, 4 }, 2).filter(0, 1).asBlock(), - new DoubleArrayVector(new double[] { 9, 9, 4 }, 3).filter(0, 1).asBlock(), - new DoubleArrayVector(new double[] { 9, 4, 9 }, 3).filter(0, 2).asBlock(), - DoubleBlock.newConstantBlockWith(9, 2), - DoubleBlock.newBlockBuilder(2).appendDouble(9).appendDouble(9).build(), - DoubleBlock.newBlockBuilder(2).appendDouble(9).appendDouble(9).build().filter(0, 1), - DoubleBlock.newBlockBuilder(2).appendDouble(9).appendDouble(4).appendDouble(9).build().filter(0, 2), - DoubleBlock.newBlockBuilder(2).appendDouble(9).appendNull().appendDouble(9).build().filter(0, 2) + blockFactory.newDoubleArrayVector(new double[] { 9, 9 }, 2).filter(0, 1).asBlock(), + blockFactory.newDoubleArrayVector(new double[] { 9, 9, 4 }, 2).filter(0, 1).asBlock(), + blockFactory.newDoubleArrayVector(new double[] { 9, 9, 4 }, 3).filter(0, 1).asBlock(), + blockFactory.newDoubleArrayVector(new double[] { 9, 4, 9 }, 3).filter(0, 2).asBlock(), + blockFactory.newConstantDoubleBlockWith(9, 2), + blockFactory.newDoubleBlockBuilder(2).appendDouble(9).appendDouble(9).build(), + blockFactory.newDoubleBlockBuilder(2).appendDouble(9).appendDouble(9).build().filter(0, 1), + blockFactory.newDoubleBlockBuilder(2).appendDouble(9).appendDouble(4).appendDouble(9).build().filter(0, 2), + blockFactory.newDoubleBlockBuilder(2).appendDouble(9).appendNull().appendDouble(9).build().filter(0, 2) ); assertAllEquals(moreBlocks); } @@ -177,15 +185,15 @@ public void testBlockEquality() { public void testVectorInequality() { // all these vectors should NOT be equivalent List notEqualVectors = List.of( - new DoubleArrayVector(new double[] { 1 }, 1), - new DoubleArrayVector(new double[] { 9 }, 1), - new DoubleArrayVector(new double[] { 1, 2 }, 2), - new DoubleArrayVector(new double[] { 1, 2, 3 }, 3), - new DoubleArrayVector(new double[] { 1, 2, 4 }, 3), - DoubleBlock.newConstantBlockWith(9, 2).asVector(), - DoubleBlock.newBlockBuilder(2).appendDouble(1).appendDouble(2).build().asVector().filter(1), - DoubleBlock.newBlockBuilder(3).appendDouble(1).appendDouble(2).appendDouble(5).build().asVector(), - DoubleBlock.newBlockBuilder(1).appendDouble(1).appendDouble(2).appendDouble(3).appendDouble(4).build().asVector() + blockFactory.newDoubleArrayVector(new double[] { 1 }, 1), + blockFactory.newDoubleArrayVector(new double[] { 9 }, 1), + blockFactory.newDoubleArrayVector(new double[] { 1, 2 }, 2), + blockFactory.newDoubleArrayVector(new double[] { 1, 2, 3 }, 3), + blockFactory.newDoubleArrayVector(new double[] { 1, 2, 4 }, 3), + blockFactory.newConstantDoubleBlockWith(9, 2).asVector(), + blockFactory.newDoubleBlockBuilder(2).appendDouble(1).appendDouble(2).build().asVector().filter(1), + blockFactory.newDoubleBlockBuilder(3).appendDouble(1).appendDouble(2).appendDouble(5).build().asVector(), + blockFactory.newDoubleBlockBuilder(1).appendDouble(1).appendDouble(2).appendDouble(3).appendDouble(4).build().asVector() ); assertAllNotEquals(notEqualVectors); } @@ -193,27 +201,27 @@ public void testVectorInequality() { public void testBlockInequality() { // all these blocks should NOT be equivalent List notEqualBlocks = List.of( - new DoubleArrayVector(new double[] { 1 }, 1).asBlock(), - new DoubleArrayVector(new double[] { 9 }, 1).asBlock(), - new DoubleArrayVector(new double[] { 1, 2 }, 2).asBlock(), - new DoubleArrayVector(new double[] { 1, 2, 3 }, 3).asBlock(), - new DoubleArrayVector(new double[] { 1, 2, 4 }, 3).asBlock(), - DoubleBlock.newConstantBlockWith(9, 2), - DoubleBlock.newBlockBuilder(2).appendDouble(1).appendDouble(2).build().filter(1), - DoubleBlock.newBlockBuilder(3).appendDouble(1).appendDouble(2).appendDouble(5).build(), - DoubleBlock.newBlockBuilder(1).appendDouble(1).appendDouble(2).appendDouble(3).appendDouble(4).build(), - DoubleBlock.newBlockBuilder(1).appendDouble(1).appendNull().build(), - DoubleBlock.newBlockBuilder(1).appendDouble(1).appendNull().appendDouble(3).build(), - DoubleBlock.newBlockBuilder(1).appendDouble(1).appendDouble(3).build(), - DoubleBlock.newBlockBuilder(3).appendDouble(1).beginPositionEntry().appendDouble(2).appendDouble(3).build() + blockFactory.newDoubleArrayVector(new double[] { 1 }, 1).asBlock(), + blockFactory.newDoubleArrayVector(new double[] { 9 }, 1).asBlock(), + blockFactory.newDoubleArrayVector(new double[] { 1, 2 }, 2).asBlock(), + blockFactory.newDoubleArrayVector(new double[] { 1, 2, 3 }, 3).asBlock(), + blockFactory.newDoubleArrayVector(new double[] { 1, 2, 4 }, 3).asBlock(), + blockFactory.newConstantDoubleBlockWith(9, 2), + blockFactory.newDoubleBlockBuilder(2).appendDouble(1).appendDouble(2).build().filter(1), + blockFactory.newDoubleBlockBuilder(3).appendDouble(1).appendDouble(2).appendDouble(5).build(), + blockFactory.newDoubleBlockBuilder(1).appendDouble(1).appendDouble(2).appendDouble(3).appendDouble(4).build(), + blockFactory.newDoubleBlockBuilder(1).appendDouble(1).appendNull().build(), + blockFactory.newDoubleBlockBuilder(1).appendDouble(1).appendNull().appendDouble(3).build(), + blockFactory.newDoubleBlockBuilder(1).appendDouble(1).appendDouble(3).build(), + blockFactory.newDoubleBlockBuilder(3).appendDouble(1).beginPositionEntry().appendDouble(2).appendDouble(3).build() ); assertAllNotEquals(notEqualBlocks); } public void testSimpleBlockWithSingleNull() { List blocks = List.of( - DoubleBlock.newBlockBuilder(3).appendDouble(1.1).appendNull().appendDouble(3.1).build(), - DoubleBlock.newBlockBuilder(3).appendDouble(1.1).appendNull().appendDouble(3.1).build() + blockFactory.newDoubleBlockBuilder(3).appendDouble(1.1).appendNull().appendDouble(3.1).build(), + blockFactory.newDoubleBlockBuilder(3).appendDouble(1.1).appendNull().appendDouble(3.1).build() ); assertEquals(3, blocks.get(0).getPositionCount()); assertTrue(blocks.get(0).isNull(1)); @@ -223,8 +231,8 @@ public void testSimpleBlockWithSingleNull() { public void testSimpleBlockWithManyNulls() { int positions = randomIntBetween(1, 256); boolean grow = randomBoolean(); - DoubleBlock.Builder builder1 = DoubleBlock.newBlockBuilder(grow ? 0 : positions); - DoubleBlock.Builder builder2 = DoubleBlock.newBlockBuilder(grow ? 0 : positions); + DoubleBlock.Builder builder1 = blockFactory.newDoubleBlockBuilder(grow ? 0 : positions); + DoubleBlock.Builder builder2 = blockFactory.newDoubleBlockBuilder(grow ? 0 : positions); for (int p = 0; p < positions; p++) { builder1.appendNull(); builder2.appendNull(); @@ -241,8 +249,8 @@ public void testSimpleBlockWithManyNulls() { public void testSimpleBlockWithSingleMultiValue() { List blocks = List.of( - DoubleBlock.newBlockBuilder(1).beginPositionEntry().appendDouble(1.1).appendDouble(2.2).build(), - DoubleBlock.newBlockBuilder(1).beginPositionEntry().appendDouble(1.1).appendDouble(2.2).build() + blockFactory.newDoubleBlockBuilder(1).beginPositionEntry().appendDouble(1.1).appendDouble(2.2).build(), + blockFactory.newDoubleBlockBuilder(1).beginPositionEntry().appendDouble(1.1).appendDouble(2.2).build() ); assert blocks.get(0).getPositionCount() == 1 && blocks.get(0).getValueCount(0) == 2; assertAllEquals(blocks); @@ -251,9 +259,9 @@ public void testSimpleBlockWithSingleMultiValue() { public void testSimpleBlockWithManyMultiValues() { int positions = randomIntBetween(1, 256); boolean grow = randomBoolean(); - DoubleBlock.Builder builder1 = DoubleBlock.newBlockBuilder(grow ? 0 : positions); - DoubleBlock.Builder builder2 = DoubleBlock.newBlockBuilder(grow ? 0 : positions); - DoubleBlock.Builder builder3 = DoubleBlock.newBlockBuilder(grow ? 0 : positions); + DoubleBlock.Builder builder1 = blockFactory.newDoubleBlockBuilder(grow ? 0 : positions); + DoubleBlock.Builder builder2 = blockFactory.newDoubleBlockBuilder(grow ? 0 : positions); + DoubleBlock.Builder builder3 = blockFactory.newDoubleBlockBuilder(grow ? 0 : positions); for (int pos = 0; pos < positions; pos++) { builder1.beginPositionEntry(); builder2.beginPositionEntry(); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/FilteredBlockTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/FilteredBlockTests.java index f43159b7ce9bd..dc78b3715d12a 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/FilteredBlockTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/FilteredBlockTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.util.BytesRefArray; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.test.ESTestCase; @@ -196,57 +197,85 @@ public void testFilterOnNoNullsBlock() { public void testFilterToStringSimple() { BitSet nulls = BitSet.valueOf(new byte[] { 0x08 }); // any non-empty bitset, that does not affect the filter, should suffice - var boolVector = new BooleanArrayVector(new boolean[] { true, false, false, true }, 4); - var boolBlock = new BooleanArrayBlock( + var boolVector = blockFactory.newBooleanArrayVector(new boolean[] { true, false, false, true }, 4); + var boolBlock = blockFactory.newBooleanArrayBlock( new boolean[] { true, false, false, true }, 4, null, nulls, randomFrom(Block.MvOrdering.values()) ); - for (Object obj : List.of(boolVector.filter(0, 2), boolVector.asBlock().filter(0, 2), boolBlock.filter(0, 2))) { + for (Releasable obj : List.of(boolVector.filter(0, 2), boolVector.asBlock().filter(0, 2), boolBlock.filter(0, 2))) { String s = obj.toString(); assertThat(s, containsString("[true, false]")); assertThat(s, containsString("positions=2")); + Releasables.close(obj); } + Releasables.close(boolVector, boolBlock); - var intVector = new IntArrayVector(new int[] { 10, 20, 30, 40 }, 4); - var intBlock = new IntArrayBlock(new int[] { 10, 20, 30, 40 }, 4, null, nulls, randomFrom(Block.MvOrdering.values())); - for (Object obj : List.of(intVector.filter(0, 2), intVector.asBlock().filter(0, 2), intBlock.filter(0, 2))) { + var intVector = blockFactory.newIntArrayVector(new int[] { 10, 20, 30, 40 }, 4); + var intBlock = blockFactory.newIntArrayBlock(new int[] { 10, 20, 30, 40 }, 4, null, nulls, randomFrom(Block.MvOrdering.values())); + for (Releasable obj : List.of(intVector.filter(0, 2), intVector.asBlock().filter(0, 2), intBlock.filter(0, 2))) { String s = obj.toString(); assertThat(s, containsString("[10, 30]")); assertThat(s, containsString("positions=2")); + Releasables.close(obj); } + Releasables.close(intVector, intBlock); - var longVector = new LongArrayVector(new long[] { 100L, 200L, 300L, 400L }, 4); - var longBlock = new LongArrayBlock(new long[] { 100L, 200L, 300L, 400L }, 4, null, nulls, randomFrom(Block.MvOrdering.values())); - for (Object obj : List.of(longVector.filter(0, 2), longVector.asBlock().filter(0, 2), longBlock.filter(0, 2))) { + var longVector = blockFactory.newLongArrayVector(new long[] { 100L, 200L, 300L, 400L }, 4); + var longBlock = blockFactory.newLongArrayBlock( + new long[] { 100L, 200L, 300L, 400L }, + 4, + null, + nulls, + randomFrom(Block.MvOrdering.values()) + ); + for (Releasable obj : List.of(longVector.filter(0, 2), longVector.asBlock().filter(0, 2), longBlock.filter(0, 2))) { String s = obj.toString(); assertThat(s, containsString("[100, 300]")); assertThat(s, containsString("positions=2")); + Releasables.close(obj); } - var doubleVector = new DoubleArrayVector(new double[] { 1.1, 2.2, 3.3, 4.4 }, 4); - var doubleBlock = new DoubleArrayBlock(new double[] { 1.1, 2.2, 3.3, 4.4 }, 4, null, nulls, randomFrom(Block.MvOrdering.values())); - for (Object obj : List.of(doubleVector.filter(0, 2), doubleVector.asBlock().filter(0, 2), doubleBlock.filter(0, 2))) { + Releasables.close(longVector, longBlock); + + var doubleVector = blockFactory.newDoubleArrayVector(new double[] { 1.1, 2.2, 3.3, 4.4 }, 4); + var doubleBlock = blockFactory.newDoubleArrayBlock( + new double[] { 1.1, 2.2, 3.3, 4.4 }, + 4, + null, + nulls, + randomFrom(Block.MvOrdering.values()) + ); + for (Releasable obj : List.of(doubleVector.filter(0, 2), doubleVector.asBlock().filter(0, 2), doubleBlock.filter(0, 2))) { String s = obj.toString(); assertThat(s, containsString("[1.1, 3.3]")); assertThat(s, containsString("positions=2")); + Releasables.close(obj); } + Releasables.close(doubleVector, doubleBlock); + assert new BytesRef("1a").toString().equals("[31 61]") && new BytesRef("3c").toString().equals("[33 63]"); - try (var bytesRefArray = arrayOf("1a", "2b", "3c", "4d")) { - var bytesRefVector = new BytesRefArrayVector(bytesRefArray, 4); - var bytesRefBlock = new BytesRefArrayBlock(bytesRefArray, 4, null, nulls, randomFrom(Block.MvOrdering.values())); - for (Object obj : List.of(bytesRefVector.filter(0, 2), bytesRefVector.asBlock().filter(0, 2), bytesRefBlock.filter(0, 2))) { - assertThat( - obj.toString(), - either(equalTo("BytesRefArrayVector[positions=2]")).or( - equalTo("BytesRefVectorBlock[vector=BytesRefArrayVector[positions=2]]") - ) - ); - } + var bytesRefVector = blockFactory.newBytesRefArrayVector(arrayOf("1a", "2b", "3c", "4d"), 4); + var bytesRefBlock = blockFactory.newBytesRefArrayBlock( + arrayOf("1a", "2b", "3c", "4d"), + 4, + null, + nulls, + randomFrom(Block.MvOrdering.values()) + ); + for (Releasable obj : List.of(bytesRefVector.filter(0, 2), bytesRefVector.asBlock().filter(0, 2), bytesRefBlock.filter(0, 2))) { + assertThat( + obj.toString(), + either(equalTo("BytesRefArrayVector[positions=2]")).or( + equalTo("BytesRefVectorBlock[vector=BytesRefArrayVector[positions=2]]") + ) + ); + Releasables.close(obj); } + Releasables.close(bytesRefVector, bytesRefBlock); } public void testFilterToStringMultiValue() { @@ -259,7 +288,10 @@ public void testFilterToStringMultiValue() { var filter = block.filter(0, 1); assertThat( filter.toString(), - containsString("BooleanArrayBlock[positions=2, mvOrdering=UNORDERED, values=[true, true, false, false]]") + containsString( + "BooleanArrayBlock[positions=2, mvOrdering=UNORDERED, " + + "vector=BooleanArrayVector[positions=4, values=[true, true, false, false]]]" + ) ); Releasables.close(builder, block); releaseAndAssertBreaker(filter); @@ -271,7 +303,12 @@ public void testFilterToStringMultiValue() { builder.beginPositionEntry().appendInt(90).appendInt(1000).endPositionEntry(); var block = builder.build(); var filter = block.filter(0, 1); - assertThat(filter.toString(), containsString("IntArrayBlock[positions=2, mvOrdering=UNORDERED, values=[0, 10, 20, 50]]")); + assertThat( + filter.toString(), + containsString( + "IntArrayBlock[positions=2, mvOrdering=UNORDERED, vector=IntArrayVector[positions=4, values=[0, 10, 20, 50]]]" + ) + ); Releasables.close(builder, block); releaseAndAssertBreaker(filter); } @@ -282,7 +319,12 @@ public void testFilterToStringMultiValue() { builder.beginPositionEntry().appendLong(90).appendLong(1000).endPositionEntry(); var block = builder.build(); var filter = block.filter(0, 1); - assertThat(filter.toString(), containsString("LongArrayBlock[positions=2, mvOrdering=UNORDERED, values=[0, 10, 20, 50]]")); + assertThat( + filter.toString(), + containsString( + "LongArrayBlock[positions=2, mvOrdering=UNORDERED, vector=LongArrayVector[positions=4, values=[0, 10, 20, 50]]]" + ) + ); Releasables.close(builder, block); releaseAndAssertBreaker(filter); } @@ -295,7 +337,10 @@ public void testFilterToStringMultiValue() { var filter = block.filter(0, 1); assertThat( filter.toString(), - containsString("DoubleArrayBlock[positions=2, mvOrdering=UNORDERED, values=[0.0, 10.0, 0.002, 1.0E9]]") + containsString( + "DoubleArrayBlock[positions=2, mvOrdering=UNORDERED, " + + "vector=DoubleArrayVector[positions=4, values=[0.0, 10.0, 0.002, 1.0E9]]]" + ) ); Releasables.close(builder, block); releaseAndAssertBreaker(filter); @@ -309,7 +354,10 @@ public void testFilterToStringMultiValue() { builder.beginPositionEntry().appendBytesRef(new BytesRef("pig")).appendBytesRef(new BytesRef("chicken")).endPositionEntry(); var block = builder.build(); var filter = block.filter(0, 1); - assertThat(filter.toString(), containsString("BytesRefArrayBlock[positions=2, mvOrdering=UNORDERED, values=4]")); + assertThat( + filter.toString(), + containsString("BytesRefArrayBlock[positions=2, mvOrdering=UNORDERED, vector=BytesRefArrayVector[positions=4]]") + ); assertThat(filter.getPositionCount(), equalTo(2)); Releasables.close(builder, block); releaseAndAssertBreaker(filter); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/IntBlockEqualityTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/IntBlockEqualityTests.java index 40c84324f13d2..6c1be6231e82c 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/IntBlockEqualityTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/IntBlockEqualityTests.java @@ -7,22 +7,24 @@ package org.elasticsearch.compute.data; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.compute.operator.ComputeTestCase; import java.util.BitSet; import java.util.List; -public class IntBlockEqualityTests extends ESTestCase { +public class IntBlockEqualityTests extends ComputeTestCase { + + static final BlockFactory blockFactory = TestBlockFactory.getNonBreakingInstance(); public void testEmptyVector() { // all these "empty" vectors should be equivalent List vectors = List.of( - new IntArrayVector(new int[] {}, 0), - new IntArrayVector(new int[] { 0 }, 0), - IntBlock.newConstantBlockWith(0, 0).asVector(), - IntBlock.newConstantBlockWith(0, 0).filter().asVector(), - IntBlock.newBlockBuilder(0).build().asVector(), - IntBlock.newBlockBuilder(0).appendInt(1).build().asVector().filter() + blockFactory.newIntArrayVector(new int[] {}, 0), + blockFactory.newIntArrayVector(new int[] { 0 }, 0), + blockFactory.newConstantIntVector(0, 0), + blockFactory.newConstantIntVector(0, 0).filter(), + blockFactory.newIntBlockBuilder(0).build().asVector(), + blockFactory.newIntBlockBuilder(0).appendInt(1).build().asVector().filter() ); assertAllEquals(vectors); } @@ -30,12 +32,24 @@ public void testEmptyVector() { public void testEmptyBlock() { // all these "empty" vectors should be equivalent List blocks = List.of( - new IntArrayBlock(new int[] {}, 0, new int[] {}, BitSet.valueOf(new byte[] { 0b00 }), randomFrom(Block.MvOrdering.values())), - new IntArrayBlock(new int[] { 0 }, 0, new int[] {}, BitSet.valueOf(new byte[] { 0b00 }), randomFrom(Block.MvOrdering.values())), - IntBlock.newConstantBlockWith(0, 0), - IntBlock.newBlockBuilder(0).build(), - IntBlock.newBlockBuilder(0).appendInt(1).build().filter(), - IntBlock.newBlockBuilder(0).appendNull().build().filter() + blockFactory.newIntArrayBlock( + new int[] {}, + 0, + new int[] {}, + BitSet.valueOf(new byte[] { 0b00 }), + randomFrom(Block.MvOrdering.values()) + ), + blockFactory.newIntArrayBlock( + new int[] { 0 }, + 0, + new int[] {}, + BitSet.valueOf(new byte[] { 0b00 }), + randomFrom(Block.MvOrdering.values()) + ), + blockFactory.newConstantIntBlockWith(0, 0), + blockFactory.newIntBlockBuilder(0).build(), + blockFactory.newIntBlockBuilder(0).appendInt(1).build().filter(), + blockFactory.newIntBlockBuilder(0).appendNull().build().filter() ); assertAllEquals(blocks); } @@ -43,34 +57,34 @@ public void testEmptyBlock() { public void testVectorEquality() { // all these vectors should be equivalent List vectors = List.of( - new IntArrayVector(new int[] { 1, 2, 3 }, 3), - new IntArrayVector(new int[] { 1, 2, 3 }, 3).asBlock().asVector(), - new IntArrayVector(new int[] { 1, 2, 3, 4 }, 3), - new IntArrayVector(new int[] { 1, 2, 3 }, 3).filter(0, 1, 2), - new IntArrayVector(new int[] { 1, 2, 3, 4 }, 4).filter(0, 1, 2), - new IntArrayVector(new int[] { 0, 1, 2, 3 }, 4).filter(1, 2, 3), - new IntArrayVector(new int[] { 1, 4, 2, 3 }, 4).filter(0, 2, 3), - IntBlock.newBlockBuilder(3).appendInt(1).appendInt(2).appendInt(3).build().asVector(), - IntBlock.newBlockBuilder(3).appendInt(1).appendInt(2).appendInt(3).build().asVector().filter(0, 1, 2), - IntBlock.newBlockBuilder(3).appendInt(1).appendInt(4).appendInt(2).appendInt(3).build().filter(0, 2, 3).asVector(), - IntBlock.newBlockBuilder(3).appendInt(1).appendInt(4).appendInt(2).appendInt(3).build().asVector().filter(0, 2, 3) + blockFactory.newIntArrayVector(new int[] { 1, 2, 3 }, 3), + blockFactory.newIntArrayVector(new int[] { 1, 2, 3 }, 3).asBlock().asVector(), + blockFactory.newIntArrayVector(new int[] { 1, 2, 3, 4 }, 3), + blockFactory.newIntArrayVector(new int[] { 1, 2, 3 }, 3).filter(0, 1, 2), + blockFactory.newIntArrayVector(new int[] { 1, 2, 3, 4 }, 4).filter(0, 1, 2), + blockFactory.newIntArrayVector(new int[] { 0, 1, 2, 3 }, 4).filter(1, 2, 3), + blockFactory.newIntArrayVector(new int[] { 1, 4, 2, 3 }, 4).filter(0, 2, 3), + blockFactory.newIntBlockBuilder(3).appendInt(1).appendInt(2).appendInt(3).build().asVector(), + blockFactory.newIntBlockBuilder(3).appendInt(1).appendInt(2).appendInt(3).build().asVector().filter(0, 1, 2), + blockFactory.newIntBlockBuilder(3).appendInt(1).appendInt(4).appendInt(2).appendInt(3).build().filter(0, 2, 3).asVector(), + blockFactory.newIntBlockBuilder(3).appendInt(1).appendInt(4).appendInt(2).appendInt(3).build().asVector().filter(0, 2, 3) ); assertAllEquals(vectors); // all these constant-like vectors should be equivalent List moreVectors = List.of( - new IntArrayVector(new int[] { 1, 1, 1 }, 3), - new IntArrayVector(new int[] { 1, 1, 1 }, 3).asBlock().asVector(), - new IntArrayVector(new int[] { 1, 1, 1, 1 }, 3), - new IntArrayVector(new int[] { 1, 1, 1 }, 3).filter(0, 1, 2), - new IntArrayVector(new int[] { 1, 1, 1, 4 }, 4).filter(0, 1, 2), - new IntArrayVector(new int[] { 3, 1, 1, 1 }, 4).filter(1, 2, 3), - new IntArrayVector(new int[] { 1, 4, 1, 1 }, 4).filter(0, 2, 3), - IntBlock.newConstantBlockWith(1, 3).asVector(), - IntBlock.newBlockBuilder(3).appendInt(1).appendInt(1).appendInt(1).build().asVector(), - IntBlock.newBlockBuilder(3).appendInt(1).appendInt(1).appendInt(1).build().asVector().filter(0, 1, 2), - IntBlock.newBlockBuilder(3).appendInt(1).appendInt(4).appendInt(1).appendInt(1).build().filter(0, 2, 3).asVector(), - IntBlock.newBlockBuilder(3).appendInt(1).appendInt(4).appendInt(1).appendInt(1).build().asVector().filter(0, 2, 3) + blockFactory.newIntArrayVector(new int[] { 1, 1, 1 }, 3), + blockFactory.newIntArrayVector(new int[] { 1, 1, 1 }, 3).asBlock().asVector(), + blockFactory.newIntArrayVector(new int[] { 1, 1, 1, 1 }, 3), + blockFactory.newIntArrayVector(new int[] { 1, 1, 1 }, 3).filter(0, 1, 2), + blockFactory.newIntArrayVector(new int[] { 1, 1, 1, 4 }, 4).filter(0, 1, 2), + blockFactory.newIntArrayVector(new int[] { 3, 1, 1, 1 }, 4).filter(1, 2, 3), + blockFactory.newIntArrayVector(new int[] { 1, 4, 1, 1 }, 4).filter(0, 2, 3), + blockFactory.newConstantIntBlockWith(1, 3).asVector(), + blockFactory.newIntBlockBuilder(3).appendInt(1).appendInt(1).appendInt(1).build().asVector(), + blockFactory.newIntBlockBuilder(3).appendInt(1).appendInt(1).appendInt(1).build().asVector().filter(0, 1, 2), + blockFactory.newIntBlockBuilder(3).appendInt(1).appendInt(4).appendInt(1).appendInt(1).build().filter(0, 2, 3).asVector(), + blockFactory.newIntBlockBuilder(3).appendInt(1).appendInt(4).appendInt(1).appendInt(1).build().asVector().filter(0, 2, 3) ); assertAllEquals(moreVectors); } @@ -78,58 +92,60 @@ public void testVectorEquality() { public void testBlockEquality() { // all these blocks should be equivalent List blocks = List.of( - new IntArrayVector(new int[] { 1, 2, 3 }, 3).asBlock(), + new IntArrayVector(new int[] { 1, 2, 3 }, 3, blockFactory).asBlock(), new IntArrayBlock( new int[] { 1, 2, 3 }, 3, new int[] { 0, 1, 2, 3 }, BitSet.valueOf(new byte[] { 0b000 }), - randomFrom(Block.MvOrdering.values()) + randomFrom(Block.MvOrdering.values()), + blockFactory ), new IntArrayBlock( new int[] { 1, 2, 3, 4 }, 3, new int[] { 0, 1, 2, 3 }, BitSet.valueOf(new byte[] { 0b1000 }), - randomFrom(Block.MvOrdering.values()) + randomFrom(Block.MvOrdering.values()), + blockFactory ), - new IntArrayVector(new int[] { 1, 2, 3 }, 3).filter(0, 1, 2).asBlock(), - new IntArrayVector(new int[] { 1, 2, 3, 4 }, 3).filter(0, 1, 2).asBlock(), - new IntArrayVector(new int[] { 1, 2, 3, 4 }, 4).filter(0, 1, 2).asBlock(), - new IntArrayVector(new int[] { 1, 2, 4, 3 }, 4).filter(0, 1, 3).asBlock(), - IntBlock.newBlockBuilder(3).appendInt(1).appendInt(2).appendInt(3).build(), - IntBlock.newBlockBuilder(3).appendInt(1).appendInt(2).appendInt(3).build().filter(0, 1, 2), - IntBlock.newBlockBuilder(3).appendInt(1).appendInt(4).appendInt(2).appendInt(3).build().filter(0, 2, 3), - IntBlock.newBlockBuilder(3).appendInt(1).appendNull().appendInt(2).appendInt(3).build().filter(0, 2, 3) + new IntArrayVector(new int[] { 1, 2, 3 }, 3, blockFactory).filter(0, 1, 2).asBlock(), + new IntArrayVector(new int[] { 1, 2, 3, 4 }, 3, blockFactory).filter(0, 1, 2).asBlock(), + new IntArrayVector(new int[] { 1, 2, 3, 4 }, 4, blockFactory).filter(0, 1, 2).asBlock(), + new IntArrayVector(new int[] { 1, 2, 4, 3 }, 4, blockFactory).filter(0, 1, 3).asBlock(), + blockFactory.newIntBlockBuilder(3).appendInt(1).appendInt(2).appendInt(3).build(), + blockFactory.newIntBlockBuilder(3).appendInt(1).appendInt(2).appendInt(3).build().filter(0, 1, 2), + blockFactory.newIntBlockBuilder(3).appendInt(1).appendInt(4).appendInt(2).appendInt(3).build().filter(0, 2, 3), + blockFactory.newIntBlockBuilder(3).appendInt(1).appendNull().appendInt(2).appendInt(3).build().filter(0, 2, 3) ); assertAllEquals(blocks); // all these constant-like blocks should be equivalent List moreBlocks = List.of( - new IntArrayVector(new int[] { 9, 9 }, 2).asBlock(), - new IntArrayBlock( + blockFactory.newIntArrayVector(new int[] { 9, 9 }, 2).asBlock(), + blockFactory.newIntArrayBlock( new int[] { 9, 9 }, 2, new int[] { 0, 1, 2 }, BitSet.valueOf(new byte[] { 0b000 }), randomFrom(Block.MvOrdering.values()) ), - new IntArrayBlock( + blockFactory.newIntArrayBlock( new int[] { 9, 9, 4 }, 2, new int[] { 0, 1, 2 }, BitSet.valueOf(new byte[] { 0b100 }), randomFrom(Block.MvOrdering.values()) ), - new IntArrayVector(new int[] { 9, 9 }, 2).filter(0, 1).asBlock(), - new IntArrayVector(new int[] { 9, 9, 4 }, 2).filter(0, 1).asBlock(), - new IntArrayVector(new int[] { 9, 9, 4 }, 3).filter(0, 1).asBlock(), - new IntArrayVector(new int[] { 9, 4, 9 }, 3).filter(0, 2).asBlock(), - IntBlock.newConstantBlockWith(9, 2), - IntBlock.newBlockBuilder(2).appendInt(9).appendInt(9).build(), - IntBlock.newBlockBuilder(2).appendInt(9).appendInt(9).build().filter(0, 1), - IntBlock.newBlockBuilder(2).appendInt(9).appendInt(4).appendInt(9).build().filter(0, 2), - IntBlock.newBlockBuilder(2).appendInt(9).appendNull().appendInt(9).build().filter(0, 2) + blockFactory.newIntArrayVector(new int[] { 9, 9 }, 2).filter(0, 1).asBlock(), + blockFactory.newIntArrayVector(new int[] { 9, 9, 4 }, 2).filter(0, 1).asBlock(), + blockFactory.newIntArrayVector(new int[] { 9, 9, 4 }, 3).filter(0, 1).asBlock(), + blockFactory.newIntArrayVector(new int[] { 9, 4, 9 }, 3).filter(0, 2).asBlock(), + blockFactory.newConstantIntBlockWith(9, 2), + blockFactory.newIntBlockBuilder(2).appendInt(9).appendInt(9).build(), + blockFactory.newIntBlockBuilder(2).appendInt(9).appendInt(9).build().filter(0, 1), + blockFactory.newIntBlockBuilder(2).appendInt(9).appendInt(4).appendInt(9).build().filter(0, 2), + blockFactory.newIntBlockBuilder(2).appendInt(9).appendNull().appendInt(9).build().filter(0, 2) ); assertAllEquals(moreBlocks); } @@ -137,15 +153,15 @@ public void testBlockEquality() { public void testVectorInequality() { // all these vectors should NOT be equivalent List notEqualVectors = List.of( - new IntArrayVector(new int[] { 1 }, 1), - new IntArrayVector(new int[] { 9 }, 1), - new IntArrayVector(new int[] { 1, 2 }, 2), - new IntArrayVector(new int[] { 1, 2, 3 }, 3), - new IntArrayVector(new int[] { 1, 2, 4 }, 3), - IntBlock.newConstantBlockWith(9, 2).asVector(), - IntBlock.newBlockBuilder(2).appendInt(1).appendInt(2).build().asVector().filter(1), - IntBlock.newBlockBuilder(3).appendInt(1).appendInt(2).appendInt(5).build().asVector(), - IntBlock.newBlockBuilder(1).appendInt(1).appendInt(2).appendInt(3).appendInt(4).build().asVector() + blockFactory.newIntArrayVector(new int[] { 1 }, 1), + blockFactory.newIntArrayVector(new int[] { 9 }, 1), + blockFactory.newIntArrayVector(new int[] { 1, 2 }, 2), + blockFactory.newIntArrayVector(new int[] { 1, 2, 3 }, 3), + blockFactory.newIntArrayVector(new int[] { 1, 2, 4 }, 3), + blockFactory.newConstantIntBlockWith(9, 2).asVector(), + blockFactory.newIntBlockBuilder(2).appendInt(1).appendInt(2).build().asVector().filter(1), + blockFactory.newIntBlockBuilder(3).appendInt(1).appendInt(2).appendInt(5).build().asVector(), + blockFactory.newIntBlockBuilder(1).appendInt(1).appendInt(2).appendInt(3).appendInt(4).build().asVector() ); assertAllNotEquals(notEqualVectors); } @@ -153,27 +169,27 @@ public void testVectorInequality() { public void testBlockInequality() { // all these blocks should NOT be equivalent List notEqualBlocks = List.of( - new IntArrayVector(new int[] { 1 }, 1).asBlock(), - new IntArrayVector(new int[] { 9 }, 1).asBlock(), - new IntArrayVector(new int[] { 1, 2 }, 2).asBlock(), - new IntArrayVector(new int[] { 1, 2, 3 }, 3).asBlock(), - new IntArrayVector(new int[] { 1, 2, 4 }, 3).asBlock(), - IntBlock.newConstantBlockWith(9, 2), - IntBlock.newBlockBuilder(2).appendInt(1).appendInt(2).build().filter(1), - IntBlock.newBlockBuilder(3).appendInt(1).appendInt(2).appendInt(5).build(), - IntBlock.newBlockBuilder(1).appendInt(1).appendInt(2).appendInt(3).appendInt(4).build(), - IntBlock.newBlockBuilder(1).appendInt(1).appendNull().build(), - IntBlock.newBlockBuilder(1).appendInt(1).appendNull().appendInt(3).build(), - IntBlock.newBlockBuilder(1).appendInt(1).appendInt(3).build(), - IntBlock.newBlockBuilder(3).appendInt(1).beginPositionEntry().appendInt(2).appendInt(3).build() + blockFactory.newIntArrayVector(new int[] { 1 }, 1).asBlock(), + blockFactory.newIntArrayVector(new int[] { 9 }, 1).asBlock(), + blockFactory.newIntArrayVector(new int[] { 1, 2 }, 2).asBlock(), + blockFactory.newIntArrayVector(new int[] { 1, 2, 3 }, 3).asBlock(), + blockFactory.newIntArrayVector(new int[] { 1, 2, 4 }, 3).asBlock(), + blockFactory.newConstantIntBlockWith(9, 2), + blockFactory.newIntBlockBuilder(2).appendInt(1).appendInt(2).build().filter(1), + blockFactory.newIntBlockBuilder(3).appendInt(1).appendInt(2).appendInt(5).build(), + blockFactory.newIntBlockBuilder(1).appendInt(1).appendInt(2).appendInt(3).appendInt(4).build(), + blockFactory.newIntBlockBuilder(1).appendInt(1).appendNull().build(), + blockFactory.newIntBlockBuilder(1).appendInt(1).appendNull().appendInt(3).build(), + blockFactory.newIntBlockBuilder(1).appendInt(1).appendInt(3).build(), + blockFactory.newIntBlockBuilder(3).appendInt(1).beginPositionEntry().appendInt(2).appendInt(3).build() ); assertAllNotEquals(notEqualBlocks); } public void testSimpleBlockWithSingleNull() { List blocks = List.of( - IntBlock.newBlockBuilder(1).appendInt(1).appendNull().appendInt(3).build(), - IntBlock.newBlockBuilder(1).appendInt(1).appendNull().appendInt(3).build() + blockFactory.newIntBlockBuilder(1).appendInt(1).appendNull().appendInt(3).build(), + blockFactory.newIntBlockBuilder(1).appendInt(1).appendNull().appendInt(3).build() ); assertEquals(3, blocks.get(0).getPositionCount()); assertTrue(blocks.get(0).isNull(1)); @@ -184,8 +200,8 @@ public void testSimpleBlockWithSingleNull() { public void testSimpleBlockWithManyNulls() { int positions = randomIntBetween(1, 256); boolean grow = randomBoolean(); - IntBlock.Builder builder1 = IntBlock.newBlockBuilder(grow ? 0 : positions); - IntBlock.Builder builder2 = IntBlock.newBlockBuilder(grow ? 0 : positions); + IntBlock.Builder builder1 = blockFactory.newIntBlockBuilder(grow ? 0 : positions); + IntBlock.Builder builder2 = blockFactory.newIntBlockBuilder(grow ? 0 : positions); for (int p = 0; p < positions; p++) { builder1.appendNull(); builder2.appendNull(); @@ -202,8 +218,8 @@ public void testSimpleBlockWithManyNulls() { public void testSimpleBlockWithSingleMultiValue() { List blocks = List.of( - IntBlock.newBlockBuilder(1).beginPositionEntry().appendInt(1).appendInt(2).build(), - IntBlock.newBlockBuilder(1).beginPositionEntry().appendInt(1).appendInt(2).build() + blockFactory.newIntBlockBuilder(1).beginPositionEntry().appendInt(1).appendInt(2).build(), + blockFactory.newIntBlockBuilder(1).beginPositionEntry().appendInt(1).appendInt(2).build() ); assertEquals(1, blocks.get(0).getPositionCount()); assertEquals(2, blocks.get(0).getValueCount(0)); @@ -213,9 +229,9 @@ public void testSimpleBlockWithSingleMultiValue() { public void testSimpleBlockWithManyMultiValues() { int positions = randomIntBetween(1, 256); boolean grow = randomBoolean(); - IntBlock.Builder builder1 = IntBlock.newBlockBuilder(grow ? 0 : positions); - IntBlock.Builder builder2 = IntBlock.newBlockBuilder(grow ? 0 : positions); - IntBlock.Builder builder3 = IntBlock.newBlockBuilder(grow ? 0 : positions); + IntBlock.Builder builder1 = blockFactory.newIntBlockBuilder(grow ? 0 : positions); + IntBlock.Builder builder2 = blockFactory.newIntBlockBuilder(grow ? 0 : positions); + IntBlock.Builder builder3 = blockFactory.newIntBlockBuilder(grow ? 0 : positions); for (int pos = 0; pos < positions; pos++) { builder1.beginPositionEntry(); builder2.beginPositionEntry(); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/LongBlockEqualityTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/LongBlockEqualityTests.java index a24b4a4dd6fa6..27a2f9702a0ae 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/LongBlockEqualityTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/LongBlockEqualityTests.java @@ -7,22 +7,24 @@ package org.elasticsearch.compute.data; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.compute.operator.ComputeTestCase; import java.util.BitSet; import java.util.List; -public class LongBlockEqualityTests extends ESTestCase { +public class LongBlockEqualityTests extends ComputeTestCase { + + static final BlockFactory blockFactory = TestBlockFactory.getNonBreakingInstance(); public void testEmptyVector() { // all these "empty" vectors should be equivalent List vectors = List.of( - new LongArrayVector(new long[] {}, 0), - new LongArrayVector(new long[] { 0 }, 0), - LongBlock.newConstantBlockWith(0, 0).asVector(), - LongBlock.newConstantBlockWith(0, 0).filter().asVector(), - LongBlock.newBlockBuilder(0).build().asVector(), - LongBlock.newBlockBuilder(0).appendLong(1).build().asVector().filter() + blockFactory.newLongArrayVector(new long[] {}, 0), + blockFactory.newLongArrayVector(new long[] { 0 }, 0), + blockFactory.newConstantLongBlockWith(0, 0).asVector(), + blockFactory.newConstantLongBlockWith(0, 0).filter().asVector(), + blockFactory.newLongBlockBuilder(0).build().asVector(), + blockFactory.newLongBlockBuilder(0).appendLong(1).build().asVector().filter() ); assertAllEquals(vectors); } @@ -30,18 +32,24 @@ public void testEmptyVector() { public void testEmptyBlock() { // all these "empty" vectors should be equivalent List blocks = List.of( - new LongArrayBlock(new long[] {}, 0, new int[] {}, BitSet.valueOf(new byte[] { 0b00 }), randomFrom(Block.MvOrdering.values())), - new LongArrayBlock( + blockFactory.newLongArrayBlock( + new long[] {}, + 0, + new int[] {}, + BitSet.valueOf(new byte[] { 0b00 }), + randomFrom(Block.MvOrdering.values()) + ), + blockFactory.newLongArrayBlock( new long[] { 0 }, 0, new int[] {}, BitSet.valueOf(new byte[] { 0b00 }), randomFrom(Block.MvOrdering.values()) ), - LongBlock.newConstantBlockWith(0, 0), - LongBlock.newBlockBuilder(0).build(), - LongBlock.newBlockBuilder(0).appendLong(1).build().filter(), - LongBlock.newBlockBuilder(0).appendNull().build().filter() + blockFactory.newConstantLongBlockWith(0, 0), + blockFactory.newLongBlockBuilder(0).build(), + blockFactory.newLongBlockBuilder(0).appendLong(1).build().filter(), + blockFactory.newLongBlockBuilder(0).appendNull().build().filter() ); assertAllEquals(blocks); } @@ -49,34 +57,34 @@ public void testEmptyBlock() { public void testVectorEquality() { // all these vectors should be equivalent List vectors = List.of( - new LongArrayVector(new long[] { 1, 2, 3 }, 3), - new LongArrayVector(new long[] { 1, 2, 3 }, 3).asBlock().asVector(), - new LongArrayVector(new long[] { 1, 2, 3, 4 }, 3), - new LongArrayVector(new long[] { 1, 2, 3 }, 3).filter(0, 1, 2), - new LongArrayVector(new long[] { 1, 2, 3, 4 }, 4).filter(0, 1, 2), - new LongArrayVector(new long[] { 0, 1, 2, 3 }, 4).filter(1, 2, 3), - new LongArrayVector(new long[] { 1, 4, 2, 3 }, 4).filter(0, 2, 3), - LongBlock.newBlockBuilder(3).appendLong(1).appendLong(2).appendLong(3).build().asVector(), - LongBlock.newBlockBuilder(3).appendLong(1).appendLong(2).appendLong(3).build().asVector().filter(0, 1, 2), - LongBlock.newBlockBuilder(3).appendLong(1).appendLong(4).appendLong(2).appendLong(3).build().filter(0, 2, 3).asVector(), - LongBlock.newBlockBuilder(3).appendLong(1).appendLong(4).appendLong(2).appendLong(3).build().asVector().filter(0, 2, 3) + blockFactory.newLongArrayVector(new long[] { 1, 2, 3 }, 3), + blockFactory.newLongArrayVector(new long[] { 1, 2, 3 }, 3).asBlock().asVector(), + blockFactory.newLongArrayVector(new long[] { 1, 2, 3, 4 }, 3), + blockFactory.newLongArrayVector(new long[] { 1, 2, 3 }, 3).filter(0, 1, 2), + blockFactory.newLongArrayVector(new long[] { 1, 2, 3, 4 }, 4).filter(0, 1, 2), + blockFactory.newLongArrayVector(new long[] { 0, 1, 2, 3 }, 4).filter(1, 2, 3), + blockFactory.newLongArrayVector(new long[] { 1, 4, 2, 3 }, 4).filter(0, 2, 3), + blockFactory.newLongBlockBuilder(3).appendLong(1).appendLong(2).appendLong(3).build().asVector(), + blockFactory.newLongBlockBuilder(3).appendLong(1).appendLong(2).appendLong(3).build().asVector().filter(0, 1, 2), + blockFactory.newLongBlockBuilder(3).appendLong(1).appendLong(4).appendLong(2).appendLong(3).build().filter(0, 2, 3).asVector(), + blockFactory.newLongBlockBuilder(3).appendLong(1).appendLong(4).appendLong(2).appendLong(3).build().asVector().filter(0, 2, 3) ); assertAllEquals(vectors); // all these constant-like vectors should be equivalent List moreVectors = List.of( - new LongArrayVector(new long[] { 1, 1, 1 }, 3), - new LongArrayVector(new long[] { 1, 1, 1 }, 3).asBlock().asVector(), - new LongArrayVector(new long[] { 1, 1, 1, 1 }, 3), - new LongArrayVector(new long[] { 1, 1, 1 }, 3).filter(0, 1, 2), - new LongArrayVector(new long[] { 1, 1, 1, 4 }, 4).filter(0, 1, 2), - new LongArrayVector(new long[] { 3, 1, 1, 1 }, 4).filter(1, 2, 3), - new LongArrayVector(new long[] { 1, 4, 1, 1 }, 4).filter(0, 2, 3), - LongBlock.newConstantBlockWith(1, 3).asVector(), - LongBlock.newBlockBuilder(3).appendLong(1).appendLong(1).appendLong(1).build().asVector(), - LongBlock.newBlockBuilder(3).appendLong(1).appendLong(1).appendLong(1).build().asVector().filter(0, 1, 2), - LongBlock.newBlockBuilder(3).appendLong(1).appendLong(4).appendLong(1).appendLong(1).build().filter(0, 2, 3).asVector(), - LongBlock.newBlockBuilder(3).appendLong(1).appendLong(4).appendLong(1).appendLong(1).build().asVector().filter(0, 2, 3) + blockFactory.newLongArrayVector(new long[] { 1, 1, 1 }, 3), + blockFactory.newLongArrayVector(new long[] { 1, 1, 1 }, 3).asBlock().asVector(), + blockFactory.newLongArrayVector(new long[] { 1, 1, 1, 1 }, 3), + blockFactory.newLongArrayVector(new long[] { 1, 1, 1 }, 3).filter(0, 1, 2), + blockFactory.newLongArrayVector(new long[] { 1, 1, 1, 4 }, 4).filter(0, 1, 2), + blockFactory.newLongArrayVector(new long[] { 3, 1, 1, 1 }, 4).filter(1, 2, 3), + blockFactory.newLongArrayVector(new long[] { 1, 4, 1, 1 }, 4).filter(0, 2, 3), + blockFactory.newConstantLongBlockWith(1, 3).asVector(), + blockFactory.newLongBlockBuilder(3).appendLong(1).appendLong(1).appendLong(1).build().asVector(), + blockFactory.newLongBlockBuilder(3).appendLong(1).appendLong(1).appendLong(1).build().asVector().filter(0, 1, 2), + blockFactory.newLongBlockBuilder(3).appendLong(1).appendLong(4).appendLong(1).appendLong(1).build().filter(0, 2, 3).asVector(), + blockFactory.newLongBlockBuilder(3).appendLong(1).appendLong(4).appendLong(1).appendLong(1).build().asVector().filter(0, 2, 3) ); assertAllEquals(moreVectors); } @@ -84,58 +92,58 @@ public void testVectorEquality() { public void testBlockEquality() { // all these blocks should be equivalent List blocks = List.of( - new LongArrayVector(new long[] { 1, 2, 3 }, 3).asBlock(), - new LongArrayBlock( + blockFactory.newLongArrayVector(new long[] { 1, 2, 3 }, 3).asBlock(), + blockFactory.newLongArrayBlock( new long[] { 1, 2, 3 }, 3, new int[] { 0, 1, 2, 3 }, BitSet.valueOf(new byte[] { 0b000 }), randomFrom(Block.MvOrdering.values()) ), - new LongArrayBlock( + blockFactory.newLongArrayBlock( new long[] { 1, 2, 3, 4 }, 3, new int[] { 0, 1, 2, 3 }, BitSet.valueOf(new byte[] { 0b1000 }), randomFrom(Block.MvOrdering.values()) ), - new LongArrayVector(new long[] { 1, 2, 3 }, 3).filter(0, 1, 2).asBlock(), - new LongArrayVector(new long[] { 1, 2, 3, 4 }, 3).filter(0, 1, 2).asBlock(), - new LongArrayVector(new long[] { 1, 2, 3, 4 }, 4).filter(0, 1, 2).asBlock(), - new LongArrayVector(new long[] { 1, 2, 4, 3 }, 4).filter(0, 1, 3).asBlock(), - LongBlock.newBlockBuilder(3).appendLong(1).appendLong(2).appendLong(3).build(), - LongBlock.newBlockBuilder(3).appendLong(1).appendLong(2).appendLong(3).build().filter(0, 1, 2), - LongBlock.newBlockBuilder(3).appendLong(1).appendLong(4).appendLong(2).appendLong(3).build().filter(0, 2, 3), - LongBlock.newBlockBuilder(3).appendLong(1).appendNull().appendLong(2).appendLong(3).build().filter(0, 2, 3) + blockFactory.newLongArrayVector(new long[] { 1, 2, 3 }, 3).filter(0, 1, 2).asBlock(), + blockFactory.newLongArrayVector(new long[] { 1, 2, 3, 4 }, 3).filter(0, 1, 2).asBlock(), + blockFactory.newLongArrayVector(new long[] { 1, 2, 3, 4 }, 4).filter(0, 1, 2).asBlock(), + blockFactory.newLongArrayVector(new long[] { 1, 2, 4, 3 }, 4).filter(0, 1, 3).asBlock(), + blockFactory.newLongBlockBuilder(3).appendLong(1).appendLong(2).appendLong(3).build(), + blockFactory.newLongBlockBuilder(3).appendLong(1).appendLong(2).appendLong(3).build().filter(0, 1, 2), + blockFactory.newLongBlockBuilder(3).appendLong(1).appendLong(4).appendLong(2).appendLong(3).build().filter(0, 2, 3), + blockFactory.newLongBlockBuilder(3).appendLong(1).appendNull().appendLong(2).appendLong(3).build().filter(0, 2, 3) ); assertAllEquals(blocks); // all these constant-like blocks should be equivalent List moreBlocks = List.of( - new LongArrayVector(new long[] { 9, 9 }, 2).asBlock(), - new LongArrayBlock( + blockFactory.newLongArrayVector(new long[] { 9, 9 }, 2).asBlock(), + blockFactory.newLongArrayBlock( new long[] { 9, 9 }, 2, new int[] { 0, 1, 2 }, BitSet.valueOf(new byte[] { 0b000 }), randomFrom(Block.MvOrdering.values()) ), - new LongArrayBlock( + blockFactory.newLongArrayBlock( new long[] { 9, 9, 4 }, 2, new int[] { 0, 1, 2 }, BitSet.valueOf(new byte[] { 0b100 }), randomFrom(Block.MvOrdering.values()) ), - new LongArrayVector(new long[] { 9, 9 }, 2).filter(0, 1).asBlock(), - new LongArrayVector(new long[] { 9, 9, 4 }, 2).filter(0, 1).asBlock(), - new LongArrayVector(new long[] { 9, 9, 4 }, 3).filter(0, 1).asBlock(), - new LongArrayVector(new long[] { 9, 4, 9 }, 3).filter(0, 2).asBlock(), - LongBlock.newConstantBlockWith(9, 2), - LongBlock.newBlockBuilder(2).appendLong(9).appendLong(9).build(), - LongBlock.newBlockBuilder(2).appendLong(9).appendLong(9).build().filter(0, 1), - LongBlock.newBlockBuilder(2).appendLong(9).appendLong(4).appendLong(9).build().filter(0, 2), - LongBlock.newBlockBuilder(2).appendLong(9).appendNull().appendLong(9).build().filter(0, 2) + blockFactory.newLongArrayVector(new long[] { 9, 9 }, 2).filter(0, 1).asBlock(), + blockFactory.newLongArrayVector(new long[] { 9, 9, 4 }, 2).filter(0, 1).asBlock(), + blockFactory.newLongArrayVector(new long[] { 9, 9, 4 }, 3).filter(0, 1).asBlock(), + blockFactory.newLongArrayVector(new long[] { 9, 4, 9 }, 3).filter(0, 2).asBlock(), + blockFactory.newConstantLongBlockWith(9, 2), + blockFactory.newLongBlockBuilder(2).appendLong(9).appendLong(9).build(), + blockFactory.newLongBlockBuilder(2).appendLong(9).appendLong(9).build().filter(0, 1), + blockFactory.newLongBlockBuilder(2).appendLong(9).appendLong(4).appendLong(9).build().filter(0, 2), + blockFactory.newLongBlockBuilder(2).appendLong(9).appendNull().appendLong(9).build().filter(0, 2) ); assertAllEquals(moreBlocks); } @@ -143,15 +151,15 @@ public void testBlockEquality() { public void testVectorInequality() { // all these vectors should NOT be equivalent List notEqualVectors = List.of( - new LongArrayVector(new long[] { 1 }, 1), - new LongArrayVector(new long[] { 9 }, 1), - new LongArrayVector(new long[] { 1, 2 }, 2), - new LongArrayVector(new long[] { 1, 2, 3 }, 3), - new LongArrayVector(new long[] { 1, 2, 4 }, 3), - LongBlock.newConstantBlockWith(9, 2).asVector(), - LongBlock.newBlockBuilder(2).appendLong(1).appendLong(2).build().asVector().filter(1), - LongBlock.newBlockBuilder(3).appendLong(1).appendLong(2).appendLong(5).build().asVector(), - LongBlock.newBlockBuilder(1).appendLong(1).appendLong(2).appendLong(3).appendLong(4).build().asVector() + blockFactory.newLongArrayVector(new long[] { 1 }, 1), + blockFactory.newLongArrayVector(new long[] { 9 }, 1), + blockFactory.newLongArrayVector(new long[] { 1, 2 }, 2), + blockFactory.newLongArrayVector(new long[] { 1, 2, 3 }, 3), + blockFactory.newLongArrayVector(new long[] { 1, 2, 4 }, 3), + blockFactory.newConstantLongBlockWith(9, 2).asVector(), + blockFactory.newLongBlockBuilder(2).appendLong(1).appendLong(2).build().asVector().filter(1), + blockFactory.newLongBlockBuilder(3).appendLong(1).appendLong(2).appendLong(5).build().asVector(), + blockFactory.newLongBlockBuilder(1).appendLong(1).appendLong(2).appendLong(3).appendLong(4).build().asVector() ); assertAllNotEquals(notEqualVectors); } @@ -159,27 +167,27 @@ public void testVectorInequality() { public void testBlockInequality() { // all these blocks should NOT be equivalent List notEqualBlocks = List.of( - new LongArrayVector(new long[] { 1 }, 1).asBlock(), - new LongArrayVector(new long[] { 9 }, 1).asBlock(), - new LongArrayVector(new long[] { 1, 2 }, 2).asBlock(), - new LongArrayVector(new long[] { 1, 2, 3 }, 3).asBlock(), - new LongArrayVector(new long[] { 1, 2, 4 }, 3).asBlock(), - LongBlock.newConstantBlockWith(9, 2), - LongBlock.newBlockBuilder(2).appendLong(1).appendLong(2).build().filter(1), - LongBlock.newBlockBuilder(3).appendLong(1).appendLong(2).appendLong(5).build(), - LongBlock.newBlockBuilder(1).appendLong(1).appendLong(2).appendLong(3).appendLong(4).build(), - LongBlock.newBlockBuilder(1).appendLong(1).appendNull().build(), - LongBlock.newBlockBuilder(1).appendLong(1).appendNull().appendLong(3).build(), - LongBlock.newBlockBuilder(1).appendLong(1).appendLong(3).build(), - LongBlock.newBlockBuilder(3).appendLong(1).beginPositionEntry().appendLong(2).appendLong(3).build() + blockFactory.newLongArrayVector(new long[] { 1 }, 1).asBlock(), + blockFactory.newLongArrayVector(new long[] { 9 }, 1).asBlock(), + blockFactory.newLongArrayVector(new long[] { 1, 2 }, 2).asBlock(), + blockFactory.newLongArrayVector(new long[] { 1, 2, 3 }, 3).asBlock(), + blockFactory.newLongArrayVector(new long[] { 1, 2, 4 }, 3).asBlock(), + blockFactory.newConstantLongBlockWith(9, 2), + blockFactory.newLongBlockBuilder(2).appendLong(1).appendLong(2).build().filter(1), + blockFactory.newLongBlockBuilder(3).appendLong(1).appendLong(2).appendLong(5).build(), + blockFactory.newLongBlockBuilder(1).appendLong(1).appendLong(2).appendLong(3).appendLong(4).build(), + blockFactory.newLongBlockBuilder(1).appendLong(1).appendNull().build(), + blockFactory.newLongBlockBuilder(1).appendLong(1).appendNull().appendLong(3).build(), + blockFactory.newLongBlockBuilder(1).appendLong(1).appendLong(3).build(), + blockFactory.newLongBlockBuilder(3).appendLong(1).beginPositionEntry().appendLong(2).appendLong(3).build() ); assertAllNotEquals(notEqualBlocks); } public void testSimpleBlockWithSingleNull() { List blocks = List.of( - LongBlock.newBlockBuilder(1).appendLong(1).appendNull().appendLong(3).build(), - LongBlock.newBlockBuilder(1).appendLong(1).appendNull().appendLong(3).build() + blockFactory.newLongBlockBuilder(1).appendLong(1).appendNull().appendLong(3).build(), + blockFactory.newLongBlockBuilder(1).appendLong(1).appendNull().appendLong(3).build() ); assertEquals(3, blocks.get(0).getPositionCount()); assertTrue(blocks.get(0).isNull(1)); @@ -190,8 +198,8 @@ public void testSimpleBlockWithSingleNull() { public void testSimpleBlockWithManyNulls() { int positions = randomIntBetween(1, 256); boolean grow = randomBoolean(); - LongBlock.Builder builder1 = LongBlock.newBlockBuilder(grow ? 0 : positions); - LongBlock.Builder builder2 = LongBlock.newBlockBuilder(grow ? 0 : positions); + LongBlock.Builder builder1 = blockFactory.newLongBlockBuilder(grow ? 0 : positions); + LongBlock.Builder builder2 = blockFactory.newLongBlockBuilder(grow ? 0 : positions); for (int p = 0; p < positions; p++) { builder1.appendNull(); builder2.appendNull(); @@ -208,8 +216,8 @@ public void testSimpleBlockWithManyNulls() { public void testSimpleBlockWithSingleMultiValue() { List blocks = List.of( - LongBlock.newBlockBuilder(1).beginPositionEntry().appendLong(1).appendLong(2).build(), - LongBlock.newBlockBuilder(1).beginPositionEntry().appendLong(1).appendLong(2).build() + blockFactory.newLongBlockBuilder(1).beginPositionEntry().appendLong(1).appendLong(2).build(), + blockFactory.newLongBlockBuilder(1).beginPositionEntry().appendLong(1).appendLong(2).build() ); assertEquals(1, blocks.get(0).getPositionCount()); assertEquals(2, blocks.get(0).getValueCount(0)); @@ -219,9 +227,9 @@ public void testSimpleBlockWithSingleMultiValue() { public void testSimpleBlockWithManyMultiValues() { int positions = randomIntBetween(1, 256); boolean grow = randomBoolean(); - LongBlock.Builder builder1 = LongBlock.newBlockBuilder(grow ? 0 : positions); - LongBlock.Builder builder2 = LongBlock.newBlockBuilder(grow ? 0 : positions); - LongBlock.Builder builder3 = LongBlock.newBlockBuilder(grow ? 0 : positions); + LongBlock.Builder builder1 = blockFactory.newLongBlockBuilder(grow ? 0 : positions); + LongBlock.Builder builder2 = blockFactory.newLongBlockBuilder(grow ? 0 : positions); + LongBlock.Builder builder3 = blockFactory.newLongBlockBuilder(grow ? 0 : positions); for (int pos = 0; pos < positions; pos++) { builder1.beginPositionEntry(); builder2.beginPositionEntry(); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/MockBlockFactory.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/MockBlockFactory.java index 35623b93357df..2457f97e12792 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/MockBlockFactory.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/MockBlockFactory.java @@ -10,6 +10,7 @@ import org.apache.lucene.tests.util.LuceneTestCase; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.BytesRefArray; import org.elasticsearch.compute.data.Block.MvOrdering; @@ -31,7 +32,9 @@ public class MockBlockFactory extends BlockFactory { static final boolean TRACK_ALLOCATIONS = true; static Object trackDetail() { - return TRACK_ALLOCATIONS ? new RuntimeException("Block allocated from test: " + LuceneTestCase.getTestClass().getName()) : true; + return TRACK_ALLOCATIONS + ? new RuntimeException("Releasable allocated from test: " + LuceneTestCase.getTestClass().getName()) + : true; } final ConcurrentMap TRACKED_BLOCKS = new ConcurrentHashMap<>(); @@ -49,7 +52,7 @@ public void ensureAllBlocksAreReleased() { Iterator causes = copy.values().iterator(); Object firstCause = causes.next(); RuntimeException exception = new RuntimeException( - copy.size() + " blocks have not been released", + copy.size() + " releasables have not been released", firstCause instanceof Throwable ? (Throwable) firstCause : null ); while (causes.hasNext()) { @@ -63,11 +66,15 @@ public void ensureAllBlocksAreReleased() { } public MockBlockFactory(CircuitBreaker breaker, BigArrays bigArrays) { - this(breaker, bigArrays, null); + this(breaker, bigArrays, BlockFactory.DEFAULT_MAX_BLOCK_PRIMITIVE_ARRAY_SIZE); } - protected MockBlockFactory(CircuitBreaker breaker, BigArrays bigArrays, BlockFactory parent) { - super(breaker, bigArrays, parent); + public MockBlockFactory(CircuitBreaker breaker, BigArrays bigArrays, ByteSizeValue maxPrimitiveArraySize) { + this(breaker, bigArrays, maxPrimitiveArraySize, null); + } + + private MockBlockFactory(CircuitBreaker breaker, BigArrays bigArrays, ByteSizeValue maxPrimitiveArraySize, BlockFactory parent) { + super(breaker, bigArrays, maxPrimitiveArraySize, parent); } @Override @@ -75,7 +82,7 @@ public BlockFactory newChildFactory(LocalCircuitBreaker childBreaker) { if (childBreaker.parentBreaker() != breaker()) { throw new IllegalStateException("Different parent breaker"); } - return new MockBlockFactory(childBreaker, bigArrays(), this); + return new MockBlockFactory(childBreaker, bigArrays(), ByteSizeValue.ofBytes(maxPrimitiveArrayBytes()), this); } @Override @@ -116,7 +123,7 @@ void purgeTrackBlocks() { TRACKED_BLOCKS.remove(vecBuilder); } } else if (b instanceof Vector vector) { - if (vector.asBlock().isReleased()) { + if (vector.isReleased()) { TRACKED_BLOCKS.remove(vector); } } else { diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/MultiValueBlockTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/MultiValueBlockTests.java index f067999a04ff1..d3572377912ac 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/MultiValueBlockTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/MultiValueBlockTests.java @@ -23,7 +23,7 @@ public class MultiValueBlockTests extends SerializationTestCase { public void testIntBlockTrivial1() { - var blockBuilder = IntBlock.newBlockBuilder(4); + var blockBuilder = blockFactory.newIntBlockBuilder(4); blockBuilder.appendInt(10); blockBuilder.beginPositionEntry(); blockBuilder.appendInt(21); @@ -54,10 +54,11 @@ public void testIntBlockTrivial1() { // cannot get a Vector view assertNull(block.asVector()); EqualsHashCodeTestUtils.checkEqualsAndHashCode(block, this::serializeDeserializeBlock, null, Releasable::close); + block.close(); } public void testIntBlockTrivial() { - var blockBuilder = IntBlock.newBlockBuilder(10); + var blockBuilder = blockFactory.newIntBlockBuilder(10); blockBuilder.appendInt(1); blockBuilder.beginPositionEntry(); blockBuilder.appendInt(21); @@ -79,57 +80,66 @@ public void testIntBlockTrivial() { assertThat(block.getInt(block.getFirstValueIndex(0)), is(1)); assertNull(block.asVector()); EqualsHashCodeTestUtils.checkEqualsAndHashCode(block, this::serializeDeserializeBlock, null, Releasable::close); + block.close(); } public void testEmpty() { for (int initialSize : new int[] { 0, 10, 100, randomInt(512) }) { - IntBlock intBlock = IntBlock.newBlockBuilder(initialSize).build(); + IntBlock intBlock = blockFactory.newIntBlockBuilder(initialSize).build(); assertThat(intBlock.getPositionCount(), is(0)); assertThat(intBlock.asVector(), is(notNullValue())); EqualsHashCodeTestUtils.checkEqualsAndHashCode(intBlock, this::serializeDeserializeBlock, null, Releasable::close); + intBlock.close(); - LongBlock longBlock = LongBlock.newBlockBuilder(initialSize).build(); + LongBlock longBlock = blockFactory.newLongBlockBuilder(initialSize).build(); assertThat(longBlock.getPositionCount(), is(0)); assertThat(longBlock.asVector(), is(notNullValue())); EqualsHashCodeTestUtils.checkEqualsAndHashCode(longBlock, this::serializeDeserializeBlock, null, Releasable::close); + longBlock.close(); - DoubleBlock doubleBlock = DoubleBlock.newBlockBuilder(initialSize).build(); + DoubleBlock doubleBlock = blockFactory.newDoubleBlockBuilder(initialSize).build(); assertThat(doubleBlock.getPositionCount(), is(0)); assertThat(doubleBlock.asVector(), is(notNullValue())); EqualsHashCodeTestUtils.checkEqualsAndHashCode(doubleBlock, this::serializeDeserializeBlock, null, Releasable::close); + doubleBlock.close(); - BytesRefBlock bytesRefBlock = BytesRefBlock.newBlockBuilder(initialSize).build(); + BytesRefBlock bytesRefBlock = blockFactory.newBytesRefBlockBuilder(initialSize).build(); assertThat(bytesRefBlock.getPositionCount(), is(0)); assertThat(bytesRefBlock.asVector(), is(notNullValue())); EqualsHashCodeTestUtils.checkEqualsAndHashCode(bytesRefBlock, this::serializeDeserializeBlock, null, Releasable::close); + bytesRefBlock.close(); } } public void testNullOnly() throws IOException { for (int initialSize : new int[] { 0, 10, 100, randomInt(512) }) { - IntBlock intBlock = IntBlock.newBlockBuilder(initialSize).appendNull().build(); + IntBlock intBlock = blockFactory.newIntBlockBuilder(initialSize).appendNull().build(); assertThat(intBlock.getPositionCount(), is(1)); assertThat(intBlock.getValueCount(0), is(0)); assertNull(intBlock.asVector()); EqualsHashCodeTestUtils.checkEqualsAndHashCode(intBlock, this::serializeDeserializeBlock, null, Releasable::close); + intBlock.close(); - LongBlock longBlock = LongBlock.newBlockBuilder(initialSize).appendNull().build(); + LongBlock longBlock = blockFactory.newLongBlockBuilder(initialSize).appendNull().build(); assertThat(longBlock.getPositionCount(), is(1)); assertThat(longBlock.getValueCount(0), is(0)); assertNull(longBlock.asVector()); EqualsHashCodeTestUtils.checkEqualsAndHashCode(longBlock, this::serializeDeserializeBlock, null, Releasable::close); + longBlock.close(); - DoubleBlock doubleBlock = DoubleBlock.newBlockBuilder(initialSize).appendNull().build(); + DoubleBlock doubleBlock = blockFactory.newDoubleBlockBuilder(initialSize).appendNull().build(); assertThat(doubleBlock.getPositionCount(), is(1)); assertThat(doubleBlock.getValueCount(0), is(0)); assertNull(doubleBlock.asVector()); EqualsHashCodeTestUtils.checkEqualsAndHashCode(doubleBlock, this::serializeDeserializeBlock, null, Releasable::close); + doubleBlock.close(); - BytesRefBlock bytesRefBlock = BytesRefBlock.newBlockBuilder(initialSize).appendNull().build(); + BytesRefBlock bytesRefBlock = blockFactory.newBytesRefBlockBuilder(initialSize).appendNull().build(); assertThat(bytesRefBlock.getPositionCount(), is(1)); assertThat(bytesRefBlock.getValueCount(0), is(0)); assertNull(bytesRefBlock.asVector()); EqualsHashCodeTestUtils.checkEqualsAndHashCode(bytesRefBlock, this::serializeDeserializeBlock, null, Releasable::close); + bytesRefBlock.close(); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/SerializationTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/SerializationTestCase.java index b0666e89cf79e..7d3e00845284a 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/SerializationTestCase.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/SerializationTestCase.java @@ -28,7 +28,7 @@ public abstract class SerializationTestCase extends ESTestCase { BigArrays bigArrays; - private BlockFactory blockFactory; + protected BlockFactory blockFactory; NamedWriteableRegistry registry = new NamedWriteableRegistry(Block.getNamedWriteables()); @Before diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/TestBlockBuilder.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/TestBlockBuilder.java index d9377a490368d..a2b074c1403a0 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/TestBlockBuilder.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/TestBlockBuilder.java @@ -29,7 +29,7 @@ public abstract class TestBlockBuilder implements Block.Builder { public abstract TestBlockBuilder endPositionEntry(); public static Block blockFromValues(List> blockValues, ElementType elementType) { - TestBlockBuilder builder = builderOf(elementType); + TestBlockBuilder builder = builderOf(TestBlockFactory.getNonBreakingInstance(), elementType); for (List rowValues : blockValues) { if (rowValues.isEmpty()) { builder.appendNull(); @@ -47,7 +47,7 @@ public static Block blockFromValues(List> blockValues, ElementType // Builds a block of single values. Each value can be null or non-null. // Differs from blockFromValues, as it does not use begin/endPositionEntry public static Block blockFromSingleValues(List blockValues, ElementType elementType) { - TestBlockBuilder builder = builderOf(elementType); + TestBlockBuilder builder = builderOf(TestBlockFactory.getNonBreakingInstance(), elementType); for (Object rowValue : blockValues) { if (rowValue == null) { builder.appendNull(); @@ -58,39 +58,23 @@ public static Block blockFromSingleValues(List blockValues, ElementType return builder.build(); } - static TestBlockBuilder builderOf(ElementType type) { + static TestBlockBuilder builderOf(BlockFactory blockFactory, ElementType type) { return switch (type) { - case INT -> new TestIntBlockBuilder(0); - case LONG -> new TestLongBlockBuilder(0); - case DOUBLE -> new TestDoubleBlockBuilder(0); - case BYTES_REF -> new TestBytesRefBlockBuilder(0); - case BOOLEAN -> new TestBooleanBlockBuilder(0); + case INT -> new TestIntBlockBuilder(blockFactory, 0); + case LONG -> new TestLongBlockBuilder(blockFactory, 0); + case DOUBLE -> new TestDoubleBlockBuilder(blockFactory, 0); + case BYTES_REF -> new TestBytesRefBlockBuilder(blockFactory, 0); + case BOOLEAN -> new TestBooleanBlockBuilder(blockFactory, 0); default -> throw new AssertionError(type); }; } - static TestBlockBuilder ofInt(int estimatedSize) { - return new TestIntBlockBuilder(estimatedSize); - } - - static TestBlockBuilder ofLong(int estimatedSize) { - return new TestLongBlockBuilder(estimatedSize); - } - - static TestBlockBuilder ofDouble(int estimatedSize) { - return new TestDoubleBlockBuilder(estimatedSize); - } - - static TestBlockBuilder ofBytesRef(int estimatedSize) { - return new TestBytesRefBlockBuilder(estimatedSize); - } - private static class TestIntBlockBuilder extends TestBlockBuilder { private final IntBlock.Builder builder; - TestIntBlockBuilder(int estimatedSize) { - builder = IntBlock.newBlockBuilder(estimatedSize); + TestIntBlockBuilder(BlockFactory blockFactory, int estimatedSize) { + builder = blockFactory.newIntBlockBuilder(estimatedSize); } @Override @@ -150,8 +134,8 @@ private static class TestLongBlockBuilder extends TestBlockBuilder { private final LongBlock.Builder builder; - TestLongBlockBuilder(int estimatedSize) { - builder = LongBlock.newBlockBuilder(estimatedSize); + TestLongBlockBuilder(BlockFactory blockFactory, int estimatedSize) { + builder = blockFactory.newLongBlockBuilder(estimatedSize); } @Override @@ -211,8 +195,8 @@ private static class TestDoubleBlockBuilder extends TestBlockBuilder { private final DoubleBlock.Builder builder; - TestDoubleBlockBuilder(int estimatedSize) { - builder = DoubleBlock.newBlockBuilder(estimatedSize); + TestDoubleBlockBuilder(BlockFactory blockFactory, int estimatedSize) { + builder = blockFactory.newDoubleBlockBuilder(estimatedSize); } @Override @@ -272,8 +256,8 @@ private static class TestBytesRefBlockBuilder extends TestBlockBuilder { private final BytesRefBlock.Builder builder; - TestBytesRefBlockBuilder(int estimatedSize) { - builder = BytesRefBlock.newBlockBuilder(estimatedSize); + TestBytesRefBlockBuilder(BlockFactory blockFactory, int estimatedSize) { + builder = blockFactory.newBytesRefBlockBuilder(estimatedSize); } @Override @@ -333,8 +317,8 @@ private static class TestBooleanBlockBuilder extends TestBlockBuilder { private final BooleanBlock.Builder builder; - TestBooleanBlockBuilder(int estimatedSize) { - builder = BooleanBlock.newBlockBuilder(estimatedSize); + TestBooleanBlockBuilder(BlockFactory blockFactory, int estimatedSize) { + builder = blockFactory.newBooleanBlockBuilder(estimatedSize); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/TestBlockFactory.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/TestBlockFactory.java new file mode 100644 index 0000000000000..5b7072ab6476d --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/TestBlockFactory.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data; + +import org.elasticsearch.common.breaker.NoopCircuitBreaker; +import org.elasticsearch.common.util.BigArrays; + +public class TestBlockFactory { + + private static final BlockFactory NON_BREAKING = BlockFactory.getInstance( + new NoopCircuitBreaker("test-noop"), + BigArrays.NON_RECYCLING_INSTANCE + ); + + /** + * Returns the Non-Breaking block factory. + */ + public static BlockFactory getNonBreakingInstance() { + return NON_BREAKING; + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/VectorBuilderTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/VectorBuilderTests.java index 04ccf47ea6122..096db174a2580 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/VectorBuilderTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/VectorBuilderTests.java @@ -114,11 +114,11 @@ public void testCranky() { private Vector.Builder vectorBuilder(int estimatedSize, BlockFactory blockFactory) { return switch (elementType) { case NULL, DOC, UNKNOWN -> throw new UnsupportedOperationException(); - case BOOLEAN -> BooleanVector.newVectorBuilder(estimatedSize, blockFactory); - case BYTES_REF -> BytesRefVector.newVectorBuilder(estimatedSize, blockFactory); - case DOUBLE -> DoubleVector.newVectorBuilder(estimatedSize, blockFactory); - case INT -> IntVector.newVectorBuilder(estimatedSize, blockFactory); - case LONG -> LongVector.newVectorBuilder(estimatedSize, blockFactory); + case BOOLEAN -> blockFactory.newBooleanVectorBuilder(estimatedSize); + case BYTES_REF -> blockFactory.newBytesRefVectorBuilder(estimatedSize); + case DOUBLE -> blockFactory.newDoubleVectorBuilder(estimatedSize); + case INT -> blockFactory.newIntVectorBuilder(estimatedSize); + case LONG -> blockFactory.newLongVectorBuilder(estimatedSize); }; } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/VectorFixedBuilderTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/VectorFixedBuilderTests.java index 3c46fef7e5257..cdfc7611ec678 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/VectorFixedBuilderTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/VectorFixedBuilderTests.java @@ -116,10 +116,10 @@ public void testCranky() { private Vector.Builder vectorBuilder(int size, BlockFactory blockFactory) { return switch (elementType) { case NULL, BYTES_REF, DOC, UNKNOWN -> throw new UnsupportedOperationException(); - case BOOLEAN -> BooleanVector.newVectorFixedBuilder(size, blockFactory); - case DOUBLE -> DoubleVector.newVectorFixedBuilder(size, blockFactory); - case INT -> IntVector.newVectorFixedBuilder(size, blockFactory); - case LONG -> LongVector.newVectorFixedBuilder(size, blockFactory); + case BOOLEAN -> blockFactory.newBooleanVectorFixedBuilder(size); + case DOUBLE -> blockFactory.newDoubleVectorFixedBuilder(size); + case INT -> blockFactory.newIntVectorFixedBuilder(size); + case LONG -> blockFactory.newLongVectorFixedBuilder(size); }; } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneCountOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneCountOperatorTests.java index d6edc903607cc..9acf188a4010d 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneCountOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneCountOperatorTests.java @@ -11,7 +11,6 @@ import org.apache.lucene.document.LongPoint; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.NoMergePolicy; -import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; @@ -27,15 +26,11 @@ import org.elasticsearch.compute.operator.OperatorTestCase; import org.elasticsearch.compute.operator.TestResultPageSinkOperator; import org.elasticsearch.core.IOUtils; -import org.elasticsearch.index.cache.query.TrivialQueryCachingPolicy; -import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.indices.CrankyCircuitBreakerService; -import org.elasticsearch.search.internal.ContextIndexSearcher; import org.elasticsearch.search.internal.SearchContext; import org.junit.After; import java.io.IOException; -import java.io.UncheckedIOException; import java.util.ArrayList; import java.util.List; import java.util.concurrent.CopyOnWriteArrayList; @@ -44,7 +39,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThanOrEqualTo; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class LuceneCountOperatorTests extends AnyOperatorTestCase { @@ -89,10 +83,8 @@ private LuceneCountOperator.Factory simple(BigArrays bigArrays, DataPartitioning throw new RuntimeException(e); } - SearchContext ctx = mockSearchContext(reader); - SearchExecutionContext ectx = mock(SearchExecutionContext.class); - when(ctx.getSearchExecutionContext()).thenReturn(ectx); - when(ectx.getIndexReader()).thenReturn(reader); + SearchContext ctx = LuceneSourceOperatorTests.mockSearchContext(reader); + when(ctx.getSearchExecutionContext().getIndexReader()).thenReturn(reader); final Query query; if (enableShortcut && randomBoolean()) { query = new MatchAllDocsQuery(); @@ -185,25 +177,4 @@ private void testCount(Supplier contexts, int size, int limit) { assertThat(totalCount, equalTo((long) size)); } } - - /** - * Creates a mock search context with the given index reader. - * The returned mock search context can be used to test with {@link LuceneOperator}. - */ - public static SearchContext mockSearchContext(IndexReader reader) { - try { - ContextIndexSearcher searcher = new ContextIndexSearcher( - reader, - IndexSearcher.getDefaultSimilarity(), - IndexSearcher.getDefaultQueryCache(), - TrivialQueryCachingPolicy.NEVER, - true - ); - SearchContext searchContext = mock(SearchContext.class); - when(searchContext.searcher()).thenReturn(searcher); - return searchContext; - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneSourceOperatorStatusTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneSourceOperatorStatusTests.java index fad1f793122d8..eab3e855d01ab 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneSourceOperatorStatusTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneSourceOperatorStatusTests.java @@ -12,20 +12,39 @@ import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.test.ESTestCase; +import java.util.List; +import java.util.Set; +import java.util.TreeSet; + import static org.hamcrest.Matchers.equalTo; public class LuceneSourceOperatorStatusTests extends AbstractWireSerializingTestCase { public static LuceneSourceOperator.Status simple() { - return new LuceneSourceOperator.Status(0, 0, 1, 5, 123, 99990, 8000); + return new LuceneSourceOperator.Status(2, Set.of("*:*"), new TreeSet<>(List.of("a:0", "a:1")), 0, 1, 5, 123, 99990, 8000); } public static String simpleToJson() { return """ - {"processed_slices":0,"slice_index":0,"total_slices":1,"pages_emitted":5,"slice_min":123,"slice_max":99990,"current":8000}"""; + { + "processed_slices" : 2, + "processed_queries" : [ + "*:*" + ], + "processed_shards" : [ + "a:0", + "a:1" + ], + "slice_index" : 0, + "total_slices" : 1, + "pages_emitted" : 5, + "slice_min" : 123, + "slice_max" : 99990, + "current" : 8000 + }"""; } public void testToXContent() { - assertThat(Strings.toString(simple()), equalTo(simpleToJson())); + assertThat(Strings.toString(simple(), true, true), equalTo(simpleToJson())); } @Override @@ -37,6 +56,8 @@ protected Writeable.Reader instanceReader() { public LuceneSourceOperator.Status createTestInstance() { return new LuceneSourceOperator.Status( randomNonNegativeInt(), + randomProcessedQueries(), + randomProcessedShards(), randomNonNegativeInt(), randomNonNegativeInt(), randomNonNegativeInt(), @@ -46,26 +67,58 @@ public LuceneSourceOperator.Status createTestInstance() { ); } + private static Set randomProcessedQueries() { + int size = between(0, 10); + Set set = new TreeSet<>(); + while (set.size() < size) { + set.add(randomAlphaOfLength(5)); + } + return set; + } + + private static Set randomProcessedShards() { + int size = between(0, 10); + Set set = new TreeSet<>(); + while (set.size() < size) { + set.add(randomAlphaOfLength(3) + ":" + between(0, 10)); + } + return set; + } + @Override protected LuceneSourceOperator.Status mutateInstance(LuceneSourceOperator.Status instance) { int processedSlices = instance.processedSlices(); + Set processedQueries = instance.processedQueries(); + Set processedShards = instance.processedShards(); int sliceIndex = instance.sliceIndex(); int totalSlices = instance.totalSlices(); int pagesEmitted = instance.pagesEmitted(); int sliceMin = instance.sliceMin(); int sliceMax = instance.sliceMax(); int current = instance.current(); - switch (between(0, 6)) { + switch (between(0, 8)) { case 0 -> processedSlices = randomValueOtherThan(processedSlices, ESTestCase::randomNonNegativeInt); - case 1 -> sliceIndex = randomValueOtherThan(sliceIndex, ESTestCase::randomNonNegativeInt); - case 2 -> totalSlices = randomValueOtherThan(totalSlices, ESTestCase::randomNonNegativeInt); - case 3 -> pagesEmitted = randomValueOtherThan(pagesEmitted, ESTestCase::randomNonNegativeInt); - case 4 -> sliceMin = randomValueOtherThan(sliceMin, ESTestCase::randomNonNegativeInt); - case 5 -> sliceMax = randomValueOtherThan(sliceMax, ESTestCase::randomNonNegativeInt); - case 6 -> current = randomValueOtherThan(current, ESTestCase::randomNonNegativeInt); + case 1 -> processedQueries = randomValueOtherThan(processedQueries, LuceneSourceOperatorStatusTests::randomProcessedQueries); + case 2 -> processedQueries = randomValueOtherThan(processedShards, LuceneSourceOperatorStatusTests::randomProcessedShards); + case 3 -> sliceIndex = randomValueOtherThan(sliceIndex, ESTestCase::randomNonNegativeInt); + case 4 -> totalSlices = randomValueOtherThan(totalSlices, ESTestCase::randomNonNegativeInt); + case 5 -> pagesEmitted = randomValueOtherThan(pagesEmitted, ESTestCase::randomNonNegativeInt); + case 6 -> sliceMin = randomValueOtherThan(sliceMin, ESTestCase::randomNonNegativeInt); + case 7 -> sliceMax = randomValueOtherThan(sliceMax, ESTestCase::randomNonNegativeInt); + case 8 -> current = randomValueOtherThan(current, ESTestCase::randomNonNegativeInt); default -> throw new UnsupportedOperationException(); } ; - return new LuceneSourceOperator.Status(processedSlices, sliceIndex, totalSlices, pagesEmitted, sliceMin, sliceMax, current); + return new LuceneSourceOperator.Status( + processedSlices, + processedQueries, + processedShards, + sliceIndex, + totalSlices, + pagesEmitted, + sliceMin, + sliceMax, + current + ); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneSourceOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneSourceOperatorTests.java index 41fe1a93d9c8b..74e9d7b122718 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneSourceOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneSourceOperatorTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.compute.operator.OperatorTestCase; import org.elasticsearch.compute.operator.TestResultPageSinkOperator; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.index.Index; import org.elasticsearch.index.cache.query.TrivialQueryCachingPolicy; import org.elasticsearch.index.fielddata.FieldDataContext; import org.elasticsearch.index.fielddata.IndexFieldData; @@ -96,23 +97,21 @@ private LuceneSourceOperator.Factory simple(BigArrays bigArrays, DataPartitionin } SearchContext ctx = mockSearchContext(reader); - SearchExecutionContext ectx = mock(SearchExecutionContext.class); - when(ctx.getSearchExecutionContext()).thenReturn(ectx); - when(ectx.getFieldType(anyString())).thenAnswer(inv -> { + when(ctx.getSearchExecutionContext().getFieldType(anyString())).thenAnswer(inv -> { String name = inv.getArgument(0); return switch (name) { case "s" -> S_FIELD; default -> throw new IllegalArgumentException("don't support [" + name + "]"); }; }); - when(ectx.getForField(any(), any())).thenAnswer(inv -> { + when(ctx.getSearchExecutionContext().getForField(any(), any())).thenAnswer(inv -> { MappedFieldType ft = inv.getArgument(0); IndexFieldData.Builder builder = ft.fielddataBuilder(FieldDataContext.noRuntimeFields("test")); return builder.build(new IndexFieldDataCache.None(), bigArrays.breakerService()); }); - when(ectx.nestedScope()).thenReturn(new NestedScope()); - when(ectx.nestedLookup()).thenReturn(NestedLookup.EMPTY); - when(ectx.getIndexReader()).thenReturn(reader); + when(ctx.getSearchExecutionContext().nestedScope()).thenReturn(new NestedScope()); + when(ctx.getSearchExecutionContext().nestedLookup()).thenReturn(NestedLookup.EMPTY); + when(ctx.getSearchExecutionContext().getIndexReader()).thenReturn(reader); Function queryFunction = c -> new MatchAllDocsQuery(); int maxPageSize = between(10, Math.max(10, numDocs)); return new LuceneSourceOperator.Factory(List.of(ctx), queryFunction, dataPartitioning, 1, maxPageSize, limit); @@ -216,6 +215,10 @@ public static SearchContext mockSearchContext(IndexReader reader) { ); SearchContext searchContext = mock(SearchContext.class); when(searchContext.searcher()).thenReturn(searcher); + SearchExecutionContext searchExecutionContext = mock(SearchExecutionContext.class); + when(searchContext.getSearchExecutionContext()).thenReturn(searchExecutionContext); + when(searchExecutionContext.getFullyQualifiedIndex()).thenReturn(new Index("test", "uid")); + when(searchExecutionContext.getShardId()).thenReturn(0); return searchContext; } catch (IOException e) { throw new UncheckedIOException(e); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperatorTests.java index d1b9e706750df..445e3e0f80264 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperatorTests.java @@ -32,7 +32,6 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NestedLookup; import org.elasticsearch.index.mapper.NumberFieldMapper; -import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.query.support.NestedScope; import org.elasticsearch.indices.CrankyCircuitBreakerService; import org.elasticsearch.search.internal.SearchContext; @@ -49,7 +48,6 @@ import static org.hamcrest.Matchers.hasSize; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class LuceneTopNSourceOperatorTests extends AnyOperatorTestCase { @@ -90,23 +88,21 @@ private LuceneTopNSourceOperator.Factory simple(BigArrays bigArrays, DataPartiti } SearchContext ctx = LuceneSourceOperatorTests.mockSearchContext(reader); - SearchExecutionContext ectx = mock(SearchExecutionContext.class); - when(ctx.getSearchExecutionContext()).thenReturn(ectx); - when(ectx.getFieldType(anyString())).thenAnswer(inv -> { + when(ctx.getSearchExecutionContext().getFieldType(anyString())).thenAnswer(inv -> { String name = inv.getArgument(0); return switch (name) { case "s" -> S_FIELD; default -> throw new IllegalArgumentException("don't support [" + name + "]"); }; }); - when(ectx.getForField(any(), any())).thenAnswer(inv -> { + when(ctx.getSearchExecutionContext().getForField(any(), any())).thenAnswer(inv -> { MappedFieldType ft = inv.getArgument(0); IndexFieldData.Builder builder = ft.fielddataBuilder(FieldDataContext.noRuntimeFields("test")); return builder.build(new IndexFieldDataCache.None(), bigArrays.breakerService()); }); - when(ectx.nestedScope()).thenReturn(new NestedScope()); - when(ectx.nestedLookup()).thenReturn(NestedLookup.EMPTY); - when(ectx.getIndexReader()).thenReturn(reader); + when(ctx.getSearchExecutionContext().nestedScope()).thenReturn(new NestedScope()); + when(ctx.getSearchExecutionContext().nestedLookup()).thenReturn(NestedLookup.EMPTY); + when(ctx.getSearchExecutionContext().getIndexReader()).thenReturn(reader); Function queryFunction = c -> new MatchAllDocsQuery(); int taskConcurrency = 0; int maxPageSize = between(10, Math.max(10, size)); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorStatusTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorStatusTests.java index 6f0317b509e3b..1851f7ac948cc 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorStatusTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorStatusTests.java @@ -24,11 +24,16 @@ public static ValuesSourceReaderOperator.Status simple() { public static String simpleToJson() { return """ - {"readers_built":{"ReaderType":3},"pages_processed":123}"""; + { + "readers_built" : { + "ReaderType" : 3 + }, + "pages_processed" : 123 + }"""; } public void testToXContent() { - assertThat(Strings.toString(simple()), equalTo(simpleToJson())); + assertThat(Strings.toString(simple(), true, true), equalTo(simpleToJson())); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java index f6310d826c989..f917056a03026 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java @@ -272,9 +272,9 @@ protected void assertSimpleOutput(List input, List results) { } @Override - protected ByteSizeValue smallEnoughToCircuitBreak() { - assumeTrue("doesn't use big arrays so can't break", false); - return null; + protected ByteSizeValue memoryLimitForSimple() { + assumeFalse("strange exception in the test, fix soon", true); + return ByteSizeValue.ofKb(1); } public void testLoadAll() { diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AggregationOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AggregationOperatorTests.java index 784d5134e9608..58169cb4cfda6 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AggregationOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AggregationOperatorTests.java @@ -83,8 +83,7 @@ protected void assertSimpleOutput(List input, List results) { } @Override - protected ByteSizeValue smallEnoughToCircuitBreak() { - assumeTrue("doesn't use big array so never breaks", false); - return null; + protected ByteSizeValue memoryLimitForSimple() { + return ByteSizeValue.ofBytes(50); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AnyOperatorTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AnyOperatorTestCase.java index 290756e81cfae..3495300f1bc9b 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AnyOperatorTestCase.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AnyOperatorTestCase.java @@ -7,22 +7,9 @@ package org.elasticsearch.compute.operator; -import org.elasticsearch.common.breaker.CircuitBreaker; -import org.elasticsearch.common.breaker.CircuitBreakingException; -import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.common.util.MockBigArrays; -import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.compute.aggregation.GroupingAggregatorFunction; import org.elasticsearch.compute.data.BlockFactory; -import org.elasticsearch.compute.data.MockBlockFactory; -import org.elasticsearch.indices.CrankyCircuitBreakerService; -import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; -import org.elasticsearch.test.ESTestCase; -import org.junit.After; - -import java.util.ArrayList; -import java.util.List; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.matchesPattern; @@ -30,7 +17,7 @@ /** * Superclass for testing any {@link Operator}, including {@link SourceOperator}s. */ -public abstract class AnyOperatorTestCase extends ESTestCase { +public abstract class AnyOperatorTestCase extends ComputeTestCase { /** * The operator configured a "simple" or basic way, used for smoke testing * descriptions and {@link BigArrays} and scatter/gather. @@ -87,58 +74,16 @@ public final void testSimpleToString() { } } - /** - * A {@link BigArrays} that won't throw {@link CircuitBreakingException}. - *

- * Rather than using the {@link NoneCircuitBreakerService} we use a - * very large limit so tests can call {@link CircuitBreaker#getUsed()}. - *

- */ - protected final BigArrays nonBreakingBigArrays() { - return new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, ByteSizeValue.ofBytes(Integer.MAX_VALUE)).withCircuitBreaking(); - } - /** * A {@link DriverContext} with a nonBreakingBigArrays. */ protected DriverContext driverContext() { // TODO make this final once all operators support memory tracking - BigArrays bigArrays = new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, ByteSizeValue.ofGb(1)).withCircuitBreaking(); - CircuitBreaker breaker = bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST); - breakers.add(breaker); - BlockFactory factory = new MockBlockFactory(breaker, bigArrays); - blockFactories.add(factory); - return new DriverContext(bigArrays, factory); - } - - protected final DriverContext nonBreakingDriverContext() { // TODO drop this once the driverContext method isn't overrideable - return new DriverContext(nonBreakingBigArrays(), BlockFactory.getNonBreakingInstance()); + BlockFactory blockFactory = blockFactory(); + return new DriverContext(blockFactory.bigArrays(), blockFactory); } - private final List breakers = new ArrayList<>(); - private final List blockFactories = new ArrayList<>(); - protected final DriverContext crankyDriverContext() { - CrankyCircuitBreakerService cranky = new CrankyCircuitBreakerService(); - BigArrays bigArrays = new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, cranky).withCircuitBreaking(); - CircuitBreaker breaker = bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST); - breakers.add(breaker); - BlockFactory blockFactory = new MockBlockFactory(breaker, bigArrays); - blockFactories.add(blockFactory); - return new DriverContext(bigArrays, blockFactory); - } - - @After - public void allBreakersEmpty() throws Exception { - // first check that all big arrays are released, which can affect breakers - MockBigArrays.ensureAllArraysAreReleased(); - - for (CircuitBreaker breaker : breakers) { - for (var factory : blockFactories) { - if (factory instanceof MockBlockFactory mockBlockFactory) { - mockBlockFactory.ensureAllBlocksAreReleased(); - } - } - assertThat("Unexpected used in breaker: " + breaker, breaker.getUsed(), equalTo(0L)); - } + BlockFactory blockFactory = crankyBlockFactory(); + return new DriverContext(blockFactory.bigArrays(), blockFactory); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorTests.java index 8cd7116677fd0..c127ac2cf9507 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorTests.java @@ -76,7 +76,7 @@ public void testBasic() { final DriverContext driverContext; if (randomBoolean()) { localBreaker = new LocalCircuitBreaker(globalBlockFactory.breaker(), between(0, 1024), between(0, 4096)); - BlockFactory localFactory = new BlockFactory(localBreaker, globalBlockFactory.bigArrays()); + BlockFactory localFactory = globalBlockFactory.newChildFactory(localBreaker); driverContext = new DriverContext(globalBlockFactory.bigArrays(), localFactory); } else { driverContext = new DriverContext(globalBlockFactory.bigArrays(), globalBlockFactory); @@ -213,7 +213,7 @@ public void testFailure() throws Exception { final DriverContext driverContext; if (randomBoolean()) { localBreaker = new LocalCircuitBreaker(globalBlockFactory.breaker(), between(0, 1024), between(0, 4096)); - BlockFactory localFactory = new BlockFactory(localBreaker, globalBlockFactory.bigArrays()); + BlockFactory localFactory = globalBlockFactory.newChildFactory(localBreaker); driverContext = new DriverContext(globalBlockFactory.bigArrays(), localFactory); } else { driverContext = new DriverContext(globalBlockFactory.bigArrays(), globalBlockFactory); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/CannedSourceOperator.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/CannedSourceOperator.java index 47febc09e45f5..01f51b32edb1d 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/CannedSourceOperator.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/CannedSourceOperator.java @@ -9,7 +9,9 @@ import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.data.TestBlockFactory; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; @@ -47,7 +49,7 @@ public static Page mergePages(List pages) { Block.Builder[] builders = new Block.Builder[first.getBlockCount()]; try { for (int b = 0; b < builders.length; b++) { - builders[b] = first.getBlock(b).elementType().newBlockBuilder(totalPositions); + builders[b] = first.getBlock(b).elementType().newBlockBuilder(totalPositions, TestBlockFactory.getNonBreakingInstance()); } for (Page p : pages) { for (int b = 0; b < builders.length; b++) { @@ -79,11 +81,12 @@ public static Page mergePages(List pages) { */ public static List deepCopyOf(List pages) { List out = new ArrayList<>(pages.size()); + BlockFactory blockFactory = TestBlockFactory.getNonBreakingInstance(); for (Page p : pages) { Block[] blocks = new Block[p.getBlockCount()]; for (int b = 0; b < blocks.length; b++) { Block orig = p.getBlock(b); - Block.Builder builder = orig.elementType().newBlockBuilder(p.getPositionCount()); + Block.Builder builder = orig.elementType().newBlockBuilder(p.getPositionCount(), blockFactory); builder.copyFrom(orig, 0, p.getPositionCount()); blocks[b] = builder.build(); } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ColumnExtractOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ColumnExtractOperatorTests.java index 485610f5842bb..f28a982824afa 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ColumnExtractOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ColumnExtractOperatorTests.java @@ -58,7 +58,7 @@ public Block eval(Page page) { BytesRefBlock input = page.getBlock(0); for (int i = 0; i < input.getPositionCount(); i++) { if (input.getBytesRef(i, new BytesRef()).utf8ToString().startsWith("no_")) { - return Block.constantNullBlock(input.getPositionCount(), input.blockFactory()); + return input.blockFactory().newConstantNullBlock(input.getPositionCount()); } } input.incRef(); @@ -97,15 +97,15 @@ protected void assertSimpleOutput(List input, List results) { } @Override - protected ByteSizeValue smallEnoughToCircuitBreak() { - return ByteSizeValue.ofBytes(between(1, 32)); + protected ByteSizeValue memoryLimitForSimple() { + return ByteSizeValue.ofKb(15); } public void testAllNullValues() { DriverContext driverContext = driverContext(); BytesRef scratch = new BytesRef(); - Block input1 = BytesRefBlock.newBlockBuilder(1, driverContext.blockFactory()).appendBytesRef(new BytesRef("can_match")).build(); - Block input2 = BytesRefBlock.newBlockBuilder(1, driverContext.blockFactory()).appendBytesRef(new BytesRef("no_match")).build(); + Block input1 = driverContext.blockFactory().newBytesRefBlockBuilder(1).appendBytesRef(new BytesRef("can_match")).build(); + Block input2 = driverContext.blockFactory().newBytesRefBlockBuilder(1).appendBytesRef(new BytesRef("no_match")).build(); List inputPages = List.of(new Page(input1), new Page(input2)); List outputPages = drive(simple(driverContext.bigArrays()).get(driverContext), inputPages.iterator(), driverContext); BytesRefBlock output1 = outputPages.get(0).getBlock(1); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ComputeTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ComputeTestCase.java new file mode 100644 index 0000000000000..ce62fb9896eba --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ComputeTestCase.java @@ -0,0 +1,92 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.operator; + +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.MockBlockFactory; +import org.elasticsearch.indices.CrankyCircuitBreakerService; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.test.ESTestCase; +import org.junit.After; + +import java.util.ArrayList; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; + +/** + * Superclass for testing with blocks and operators + */ +public abstract class ComputeTestCase extends ESTestCase { + + private final List breakers = new ArrayList<>(); + private final List blockFactories = new ArrayList<>(); + + /** + * A {@link BigArrays} that won't throw {@link CircuitBreakingException}. + *

+ * Rather than using the {@link NoneCircuitBreakerService} we use a + * very large limit so tests can call {@link CircuitBreaker#getUsed()}. + *

+ */ + protected final BigArrays nonBreakingBigArrays() { + return new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, ByteSizeValue.ofBytes(Integer.MAX_VALUE)).withCircuitBreaking(); + } + + /** + * Build a {@link BlockFactory} with a huge limit. + */ + protected final BlockFactory blockFactory() { + return blockFactory(ByteSizeValue.ofGb(1)); + } + + /** + * Build a {@link BlockFactory} with a configured limit. + */ + protected final BlockFactory blockFactory(ByteSizeValue limit) { + BigArrays bigArrays = new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, limit).withCircuitBreaking(); + CircuitBreaker breaker = bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST); + breakers.add(breaker); + BlockFactory factory = new MockBlockFactory(breaker, bigArrays); + blockFactories.add(factory); + return factory; + } + + /** + * Build a {@link BlockFactory} that randomly fails. + */ + protected final BlockFactory crankyBlockFactory() { + CrankyCircuitBreakerService cranky = new CrankyCircuitBreakerService(); + BigArrays bigArrays = new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, cranky).withCircuitBreaking(); + CircuitBreaker breaker = bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST); + breakers.add(breaker); + BlockFactory blockFactory = new MockBlockFactory(breaker, bigArrays); + blockFactories.add(blockFactory); + return blockFactory; + } + + @After + public final void allBreakersEmpty() throws Exception { + // first check that all big arrays are released, which can affect breakers + MockBigArrays.ensureAllArraysAreReleased(); + for (var factory : blockFactories) { + if (factory instanceof MockBlockFactory mockBlockFactory) { + mockBlockFactory.ensureAllBlocksAreReleased(); + } + } + for (CircuitBreaker breaker : breakers) { + assertThat("Unexpected used in breaker: " + breaker, breaker.getUsed(), equalTo(0L)); + } + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverContextTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverContextTests.java index 27076c2adf2d2..a3af5aafcbee3 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverContextTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverContextTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.TestBlockFactory; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; @@ -167,7 +167,7 @@ static class AssertingDriverContext extends DriverContext { AssertingDriverContext() { super( new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, new NoneCircuitBreakerService()), - BlockFactory.getNonBreakingInstance() + TestBlockFactory.getNonBreakingInstance() ); } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverProfileTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverProfileTests.java index f6b4fbc817940..ec9952cdce022 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverProfileTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverProfileTests.java @@ -30,19 +30,23 @@ public void testToXContent() { new DriverStatus.OperatorStatus("ValuesSourceReader", ValuesSourceReaderOperatorStatusTests.simple()) ) ); - assertThat( - Strings.toString(status), - equalTo( - """ - {"operators":[""" - + """ - {"operator":"LuceneSource","status":""" - + LuceneSourceOperatorStatusTests.simpleToJson() - + "},{\"operator\":\"ValuesSourceReader\",\"status\":" - + ValuesSourceReaderOperatorStatusTests.simpleToJson() - + "}]}" - ) - ); + assertThat(Strings.toString(status, true, true), equalTo(""" + { + "operators" : [ + { + "operator" : "LuceneSource", + "status" : + """.stripTrailing() + " " + LuceneSourceOperatorStatusTests.simpleToJson().replace("\n", "\n ") + """ + + }, + { + "operator" : "ValuesSourceReader", + "status" : + """.stripTrailing() + " " + ValuesSourceReaderOperatorStatusTests.simpleToJson().replace("\n", "\n ") + """ + + } + ] + }""")); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverStatusTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverStatusTests.java index cdae4283540c4..c10bcf8d49ca4 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverStatusTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/DriverStatusTests.java @@ -39,15 +39,34 @@ public void testToXContent() { ), List.of(new DriverStatus.OperatorStatus("ExchangeSink", ExchangeSinkOperatorStatusTests.simple())) ); - assertThat(Strings.toString(status), equalTo(""" - {"sessionId":"ABC:123","last_updated":"1973-11-29T09:27:23.214Z","status":"running", - """.trim() + """ - "completed_operators":[{"operator":"LuceneSource","status": - """.trim() + LuceneSourceOperatorStatusTests.simpleToJson() + """ - },{"operator":"ValuesSourceReader","status": - """.trim() + ValuesSourceReaderOperatorStatusTests.simpleToJson() + """ - }],"active_operators":[{"operator":"ExchangeSink","status": - """.trim() + ExchangeSinkOperatorStatusTests.simpleToJson() + "}]}")); + assertThat(Strings.toString(status, true, true), equalTo(""" + { + "sessionId" : "ABC:123", + "last_updated" : "1973-11-29T09:27:23.214Z", + "status" : "running", + "completed_operators" : [ + { + "operator" : "LuceneSource", + "status" : + """.trim() + " " + LuceneSourceOperatorStatusTests.simpleToJson().replace("\n", "\n ") + """ + + }, + { + "operator" : "ValuesSourceReader", + "status" : + """.stripTrailing() + " " + ValuesSourceReaderOperatorStatusTests.simpleToJson().replace("\n", "\n ") + """ + + } + ], + "active_operators" : [ + { + "operator" : "ExchangeSink", + "status" : + """.stripTrailing() + " " + ExchangeSinkOperatorStatusTests.simpleToJson().replace("\n", "\n ") + """ + + } + ] + }""")); } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/EvalOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/EvalOperatorTests.java index c755c5eafe08d..29e4404d43482 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/EvalOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/EvalOperatorTests.java @@ -37,7 +37,7 @@ record Addition(DriverContext driverContext, int lhs, int rhs) implements EvalOp public Block eval(Page page) { LongVector lhsVector = page.getBlock(0).asVector(); LongVector rhsVector = page.getBlock(1).asVector(); - try (LongVector.FixedBuilder result = LongVector.newVectorFixedBuilder(page.getPositionCount(), driverContext.blockFactory())) { + try (LongVector.FixedBuilder result = driverContext.blockFactory().newLongVectorFixedBuilder(page.getPositionCount())) { for (int p = 0; p < page.getPositionCount(); p++) { result.appendLong(lhsVector.getLong(p) + rhsVector.getLong(p)); } @@ -117,7 +117,7 @@ public void testReadFromBlock() { } @Override - protected ByteSizeValue smallEnoughToCircuitBreak() { - return ByteSizeValue.ofBytes(between(1, 8000)); + protected ByteSizeValue memoryLimitForSimple() { + return ByteSizeValue.ofKb(4); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/FilterOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/FilterOperatorTests.java index d067435ba9aaa..3c09d0100b366 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/FilterOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/FilterOperatorTests.java @@ -36,7 +36,7 @@ record SameLastDigit(DriverContext context, int lhs, int rhs) implements EvalOpe public Block eval(Page page) { LongVector lhsVector = page.getBlock(0).asVector(); LongVector rhsVector = page.getBlock(1).asVector(); - BooleanVector.FixedBuilder result = BooleanVector.newVectorFixedBuilder(page.getPositionCount(), context.blockFactory()); + BooleanVector.FixedBuilder result = context.blockFactory().newBooleanVectorFixedBuilder(page.getPositionCount()); for (int p = 0; p < page.getPositionCount(); p++) { result.appendBoolean(lhsVector.getLong(p) % 10 == rhsVector.getLong(p) % 10); } @@ -116,7 +116,7 @@ public void testReadFromBlock() { } @Override - protected ByteSizeValue smallEnoughToCircuitBreak() { - return ByteSizeValue.ofBytes(between(1, 600)); + protected ByteSizeValue memoryLimitForSimple() { + return ByteSizeValue.ofKb(1); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ForkingOperatorTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ForkingOperatorTestCase.java index 9403d22f2b4c4..3986c4b337e03 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ForkingOperatorTestCase.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ForkingOperatorTestCase.java @@ -14,9 +14,9 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.compute.aggregation.AggregatorMode; -import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockTestUtils; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.data.TestBlockFactory; import org.elasticsearch.compute.operator.exchange.ExchangeSinkHandler; import org.elasticsearch.compute.operator.exchange.ExchangeSinkOperator; import org.elasticsearch.compute.operator.exchange.ExchangeSourceHandler; @@ -61,7 +61,7 @@ public final void testInitialFinal() { BigArrays bigArrays = nonBreakingBigArrays(); DriverContext driverContext = driverContext(); List input = CannedSourceOperator.collectPages(simpleInput(driverContext.blockFactory(), between(1_000, 100_000))); - List origInput = BlockTestUtils.deepCopyOf(input, BlockFactory.getNonBreakingInstance()); + List origInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); List results = new ArrayList<>(); try ( Driver d = new Driver( @@ -85,7 +85,7 @@ public final void testManyInitialFinal() { BigArrays bigArrays = nonBreakingBigArrays(); DriverContext driverContext = driverContext(); List input = CannedSourceOperator.collectPages(simpleInput(driverContext.blockFactory(), between(1_000, 100_000))); - List origInput = BlockTestUtils.deepCopyOf(input, BlockFactory.getNonBreakingInstance()); + List origInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); List partials = oneDriverPerPage(input, () -> List.of(simpleWithMode(bigArrays, AggregatorMode.INITIAL).get(driverContext))); List results = new ArrayList<>(); try ( @@ -107,7 +107,7 @@ public final void testInitialIntermediateFinal() { BigArrays bigArrays = nonBreakingBigArrays(); DriverContext driverContext = driverContext(); List input = CannedSourceOperator.collectPages(simpleInput(driverContext.blockFactory(), between(1_000, 100_000))); - List origInput = BlockTestUtils.deepCopyOf(input, BlockFactory.getNonBreakingInstance()); + List origInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); List results = new ArrayList<>(); try ( @@ -133,7 +133,7 @@ public final void testManyInitialManyPartialFinal() { BigArrays bigArrays = nonBreakingBigArrays(); DriverContext driverContext = driverContext(); List input = CannedSourceOperator.collectPages(simpleInput(driverContext.blockFactory(), between(1_000, 100_000))); - List origInput = BlockTestUtils.deepCopyOf(input, BlockFactory.getNonBreakingInstance()); + List origInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); List partials = oneDriverPerPage(input, () -> List.of(simpleWithMode(bigArrays, AggregatorMode.INITIAL).get(driverContext))); Collections.shuffle(partials, random()); @@ -163,7 +163,7 @@ public final void testManyInitialManyPartialFinal() { public final void testManyInitialManyPartialFinalRunner() { BigArrays bigArrays = nonBreakingBigArrays(); List input = CannedSourceOperator.collectPages(simpleInput(driverContext().blockFactory(), between(1_000, 100_000))); - List origInput = BlockTestUtils.deepCopyOf(input, BlockFactory.getNonBreakingInstance()); + List origInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); List results = new ArrayList<>(); List drivers = createDriversForInput(bigArrays, input, results, false /* no throwing ops */); var runner = new DriverRunner(threadPool.getThreadContext()) { diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashAggregationOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashAggregationOperatorTests.java index b1ef784ca339c..120f6e2b6e6bd 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashAggregationOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/HashAggregationOperatorTests.java @@ -95,8 +95,7 @@ protected void assertSimpleOutput(List input, List results) { } @Override - protected ByteSizeValue smallEnoughToCircuitBreak() { - return ByteSizeValue.ofBytes(between(1, 32)); + protected ByteSizeValue memoryLimitForSimple() { + return ByteSizeValue.ofKb(1); } - } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/LimitOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/LimitOperatorTests.java index 8c85f5927196f..fdceb6ead36dd 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/LimitOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/LimitOperatorTests.java @@ -51,8 +51,8 @@ protected void assertSimpleOutput(List input, List results) { } @Override - protected ByteSizeValue smallEnoughToCircuitBreak() { - assumeFalse("doesn't use big arrays", true); + protected ByteSizeValue memoryLimitForSimple() { + assumeFalse("doesn't allocate, just filters", true); return null; } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/MultivalueDedupeTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/MultivalueDedupeTests.java index 50b20a2ffdcff..b82ded7cb812f 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/MultivalueDedupeTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/MultivalueDedupeTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.TestBlockFactory; import org.elasticsearch.core.Releasables; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matcher; @@ -374,7 +375,7 @@ private int assertEncodedPosition(BasicBlockTests.RandomBlock b, BatchEncoder en * This produces a block with a single value per position, but it's good enough * for comparison. */ - Block.Builder builder = elementType.newBlockBuilder(encoder.valueCount(offset)); + Block.Builder builder = elementType.newBlockBuilder(encoder.valueCount(offset), TestBlockFactory.getNonBreakingInstance()); BytesRef[] toDecode = new BytesRef[encoder.valueCount(offset)]; for (int i = 0; i < toDecode.length; i++) { BytesRefBuilder dest = new BytesRefBuilder(); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/MvExpandOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/MvExpandOperatorTests.java index 3572dc620287d..ff84f1fc7ae3e 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/MvExpandOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/MvExpandOperatorTests.java @@ -12,9 +12,8 @@ import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.ElementType; -import org.elasticsearch.compute.data.IntBlock; -import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.data.TestBlockFactory; import java.util.Iterator; import java.util.List; @@ -202,16 +201,17 @@ protected void assertSimpleOutput(List input, List results) { } @Override - protected ByteSizeValue smallEnoughToCircuitBreak() { - assumeTrue("doesn't use big arrays so can't break", false); - return null; + protected ByteSizeValue memoryLimitForSimple() { + assumeFalse("doesn't throw in tests but probably should", true); + return ByteSizeValue.ofBytes(1); } public void testNoopStatus() { + BlockFactory blockFactory = blockFactory(); MvExpandOperator op = new MvExpandOperator(0, randomIntBetween(1, 1000)); List result = drive( op, - List.of(new Page(IntVector.newVectorBuilder(2).appendInt(1).appendInt(2).build().asBlock())).iterator(), + List.of(new Page(blockFactory.newIntVectorBuilder(2).appendInt(1).appendInt(2).build().asBlock())).iterator(), driverContext() ); assertThat(result, hasSize(1)); @@ -224,7 +224,8 @@ public void testNoopStatus() { public void testExpandStatus() { MvExpandOperator op = new MvExpandOperator(0, randomIntBetween(1, 1)); - var builder = IntBlock.newBlockBuilder(2).beginPositionEntry().appendInt(1).appendInt(2).endPositionEntry(); + BlockFactory blockFactory = blockFactory(); + var builder = blockFactory.newIntBlockBuilder(2).beginPositionEntry().appendInt(1).appendInt(2).endPositionEntry(); List result = drive(op, List.of(new Page(builder.build())).iterator(), driverContext()); assertThat(result, hasSize(1)); assertThat(valuesAtPositions(result.get(0).getBlock(0), 0, 2), equalTo(List.of(List.of(1), List.of(2)))); @@ -232,6 +233,7 @@ public void testExpandStatus() { assertThat(status.pagesIn(), equalTo(1)); assertThat(status.pagesOut(), equalTo(1)); assertThat(status.noops(), equalTo(0)); + result.forEach(Page::releaseBlocks); } public void testExpandWithBytesRefs() { @@ -253,7 +255,7 @@ protected Page createPage(int positionOffset, int length) { ); } }); - List origInput = deepCopyOf(input, BlockFactory.getNonBreakingInstance()); + List origInput = deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); List results = drive(new MvExpandOperator(0, randomIntBetween(1, 1000)), input.iterator(), context); assertSimpleOutput(origInput, results); } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OperatorTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OperatorTestCase.java index 2f1cc2981766e..de2c94f9f0a3a 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OperatorTestCase.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/OperatorTestCase.java @@ -24,6 +24,7 @@ import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockTestUtils; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.data.TestBlockFactory; import org.elasticsearch.core.Releasables; import org.elasticsearch.core.TimeValue; import org.elasticsearch.indices.CrankyCircuitBreakerService; @@ -56,13 +57,6 @@ public abstract class OperatorTestCase extends AnyOperatorTestCase { */ protected abstract void assertSimpleOutput(List input, List results); - /** - * A {@link ByteSizeValue} that is so small any input to the operator - * will cause it to circuit break. If the operator can't break then - * throw an {@link AssumptionViolatedException}. - */ - protected abstract ByteSizeValue smallEnoughToCircuitBreak(); - /** * Test a small input set against {@link #simple}. Smaller input sets * are more likely to discover accidental behavior for clumped inputs. @@ -79,27 +73,55 @@ public final void testSimpleLargeInput() { } /** - * Run {@link #simple} with a circuit breaker configured by - * {@link #smallEnoughToCircuitBreak} and assert that it breaks - * in a sane way. + * A {@link ByteSizeValue} that is small enough that running {@link #simple} + * on {@link #simpleInput} will exhaust the breaker and throw a + * {@link CircuitBreakingException}. We should make an effort to make this + * number as large as possible and still cause a break consistently so we get + * good test coverage. If the operator can't break then throw an + * {@link AssumptionViolatedException}. + */ + protected abstract ByteSizeValue memoryLimitForSimple(); + + /** + * Run {@link #simple} with a circuit breaker limited to somewhere + * between 0 bytes and {@link #memoryLimitForSimple} and assert that + * it breaks in a sane way. */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/101824") public final void testSimpleCircuitBreaking() { + testSimpleCircuitBreaking(ByteSizeValue.ofBytes(randomLongBetween(0, memoryLimitForSimple().getBytes()))); + } + + /** + * Run {@link #simple} with a circuit breaker configured limited to + * {@link #memoryLimitForSimple} and assert that it breaks in a sane way. + *

+ * This test helps to make sure that the limits set by + * {@link #memoryLimitForSimple} aren't too large. + * {@link #testSimpleCircuitBreaking}, with it's random configured + * limit will use the actual maximum very rarely. + *

+ */ + public final void testSimpleCircuitBreakingAtLimit() { + testSimpleCircuitBreaking(memoryLimitForSimple()); + } + + private void testSimpleCircuitBreaking(ByteSizeValue limit) { /* * We build two CircuitBreakers - one for the input blocks and one for the operation itself. * The input blocks don't count against the memory usage for the limited operator that we * build. */ DriverContext inputFactoryContext = driverContext(); - BigArrays bigArrays = new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, smallEnoughToCircuitBreak()) - .withCircuitBreaking(); + BigArrays bigArrays = new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, limit).withCircuitBreaking(); + Operator.OperatorFactory simple = simple(bigArrays); + logger.info("running {} with {}", simple, bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST)); List input = CannedSourceOperator.collectPages(simpleInput(inputFactoryContext.blockFactory(), between(1_000, 10_000))); CircuitBreaker breaker = bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST); BlockFactory blockFactory = BlockFactory.getInstance(breaker, bigArrays); DriverContext driverContext = new DriverContext(bigArrays, blockFactory); boolean[] driverStarted = new boolean[1]; Exception e = expectThrows(CircuitBreakingException.class, () -> { - var operator = simple(bigArrays).get(driverContext); + var operator = simple.get(driverContext); driverStarted[0] = true; drive(operator, input.iterator(), driverContext); }); @@ -186,7 +208,7 @@ protected final void assertSimple(DriverContext context, int size) { } // Clone the input so that the operator can close it, then, later, we can read it again to build the assertion. - List origInput = BlockTestUtils.deepCopyOf(input, BlockFactory.getNonBreakingInstance()); + List origInput = BlockTestUtils.deepCopyOf(input, TestBlockFactory.getNonBreakingInstance()); BigArrays bigArrays = context.bigArrays().withCircuitBreaking(); List results = drive(simple(bigArrays).get(context), input.iterator(), context); @@ -270,10 +292,10 @@ public static void runDriver(List drivers) { drivers.add( new Driver( "dummy-session", - new DriverContext(BigArrays.NON_RECYCLING_INSTANCE, BlockFactory.getNonBreakingInstance()), + new DriverContext(BigArrays.NON_RECYCLING_INSTANCE, TestBlockFactory.getNonBreakingInstance()), () -> "dummy-driver", new SequenceLongBlockSourceOperator( - BlockFactory.getNonBreakingInstance(), + TestBlockFactory.getNonBreakingInstance(), LongStream.range(0, between(1, 100)), between(1, 100) ), diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ProjectOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ProjectOperatorTests.java index 30f3bfda27d5e..15166ac525435 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ProjectOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ProjectOperatorTests.java @@ -97,8 +97,8 @@ protected void assertSimpleOutput(List input, List results) { } @Override - protected ByteSizeValue smallEnoughToCircuitBreak() { - assumeTrue("doesn't use big arrays so can't break", false); + protected ByteSizeValue memoryLimitForSimple() { + assumeTrue("doesn't allocate", false); return null; } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/RowOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/RowOperatorTests.java index c8250eba5703a..cd8a49939fbb5 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/RowOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/RowOperatorTests.java @@ -11,12 +11,12 @@ import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.DoubleBlock; import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.TestBlockFactory; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; @@ -28,7 +28,7 @@ public class RowOperatorTests extends ESTestCase { final DriverContext driverContext = new DriverContext( new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, new NoneCircuitBreakerService()).withCircuitBreaking(), - BlockFactory.getNonBreakingInstance() + TestBlockFactory.getNonBreakingInstance() ); public void testBoolean() { diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/SequenceBooleanBlockSourceOperator.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/SequenceBooleanBlockSourceOperator.java index 7c1c62aea6ab9..b92c6d01e5077 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/SequenceBooleanBlockSourceOperator.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/SequenceBooleanBlockSourceOperator.java @@ -37,12 +37,13 @@ public SequenceBooleanBlockSourceOperator(BlockFactory blockFactory, List @Override protected Page createPage(int positionOffset, int length) { - DoubleVector.FixedBuilder builder = DoubleVector.newVectorFixedBuilder(length, blockFactory); + DoubleVector.FixedBuilder builder = blockFactory.newDoubleVectorFixedBuilder(length); for (int i = 0; i < length; i++) { builder.appendDouble(values[positionOffset + i]); } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/StringExtractOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/StringExtractOperatorTests.java index 70ef2118fcef0..a90d6e71633e6 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/StringExtractOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/StringExtractOperatorTests.java @@ -85,8 +85,8 @@ protected void assertSimpleOutput(List input, List results) { } @Override - protected ByteSizeValue smallEnoughToCircuitBreak() { - return ByteSizeValue.ofBytes(between(1, 32)); + protected ByteSizeValue memoryLimitForSimple() { + return ByteSizeValue.ofKb(15); } public void testMultivalueDissectInput() { @@ -103,8 +103,9 @@ public Block eval(Page page) { public void close() {} }, new FirstWord("test"), driverContext()); - Page result = null; - try (BytesRefBlock.Builder builder = BytesRefBlock.newBlockBuilder(1)) { + BlockFactory blockFactory = blockFactory(); + final Page result; + try (BytesRefBlock.Builder builder = blockFactory.newBytesRefBlockBuilder(1)) { builder.beginPositionEntry(); builder.appendBytesRef(new BytesRef("foo1 bar1")); builder.appendBytesRef(new BytesRef("foo2 bar2")); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/TestResultPageSinkOperator.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/TestResultPageSinkOperator.java index aaa3a6ac8a3c8..e2cb0e21938e2 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/TestResultPageSinkOperator.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/TestResultPageSinkOperator.java @@ -7,9 +7,9 @@ package org.elasticsearch.compute.operator; -import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockTestUtils; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.data.TestBlockFactory; import java.util.function.Consumer; @@ -21,7 +21,7 @@ public class TestResultPageSinkOperator extends PageConsumerOperator { public TestResultPageSinkOperator(Consumer pageConsumer) { super(page -> { - Page copy = BlockTestUtils.deepCopyOf(page, BlockFactory.getNonBreakingInstance()); + Page copy = BlockTestUtils.deepCopyOf(page, TestBlockFactory.getNonBreakingInstance()); page.releaseBlocks(); pageConsumer.accept(copy); }); diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeServiceTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeServiceTests.java index 74e83017e03bf..8be6cdebad538 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeServiceTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeServiceTests.java @@ -24,8 +24,8 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; -import org.elasticsearch.compute.data.ConstantIntVector; import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.MockBlockFactory; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.Driver; import org.elasticsearch.compute.operator.DriverContext; @@ -41,6 +41,7 @@ import org.elasticsearch.threadpool.FixedExecutorBuilder; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.transport.AbstractSimpleTransportTestCase; +import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestHandler; @@ -85,9 +86,10 @@ public void shutdownThreadPool() { } public void testBasic() throws Exception { + BlockFactory blockFactory = blockFactory(); Page[] pages = new Page[7]; for (int i = 0; i < pages.length; i++) { - pages[i] = new Page(new ConstantIntVector(i, 2).asBlock()); + pages[i] = new Page(blockFactory.newConstantIntBlockWith(i, 2)); } ExchangeSinkHandler sinkExchanger = new ExchangeSinkHandler(2, threadPool::relativeTimeInMillis); ExchangeSink sink1 = sinkExchanger.createExchangeSink(); @@ -143,6 +145,9 @@ public void testBasic() throws Exception { sourceExchanger.decRef(); assertTrue(latch.await(1, TimeUnit.SECONDS)); ESTestCase.terminate(threadPool); + for (Page page : pages) { + page.releaseBlocks(); + } } /** @@ -180,14 +185,15 @@ public Page getOutput() { return null; } int size = randomIntBetween(1, 10); - IntBlock.Builder builder = IntBlock.newBlockBuilder(size); - for (int i = 0; i < size; i++) { - int seqNo = nextSeqNo.incrementAndGet(); - if (seqNo < maxInputSeqNo) { - builder.appendInt(seqNo); + try (IntBlock.Builder builder = driverContext.blockFactory().newIntBlockBuilder(size)) { + for (int i = 0; i < size; i++) { + int seqNo = nextSeqNo.incrementAndGet(); + if (seqNo < maxInputSeqNo) { + builder.appendInt(seqNo); + } } + return new Page(builder.build()); } - return new Page(builder.build()); } @Override @@ -338,8 +344,9 @@ public void testConcurrentWithHandlers() { } public void testEarlyTerminate() { - IntBlock block1 = new ConstantIntVector(1, 2).asBlock(); - IntBlock block2 = new ConstantIntVector(1, 2).asBlock(); + BlockFactory blockFactory = blockFactory(); + IntBlock block1 = blockFactory.newConstantIntBlockWith(1, 2); + IntBlock block2 = blockFactory.newConstantIntBlockWith(1, 2); Page p1 = new Page(block1); Page p2 = new Page(block2); ExchangeSinkHandler sinkExchanger = new ExchangeSinkHandler(2, threadPool::relativeTimeInMillis); @@ -368,9 +375,10 @@ public void testConcurrentWithTransportActions() throws Exception { try (exchange0; exchange1; node0; node1) { String exchangeId = "exchange"; Task task = new Task(1, "", "", "", null, Collections.emptyMap()); - ExchangeSourceHandler sourceHandler = exchange0.createSourceHandler(exchangeId, randomExchangeBuffer(), ESQL_TEST_EXECUTOR); + var sourceHandler = new ExchangeSourceHandler(randomExchangeBuffer(), threadPool.executor(ESQL_TEST_EXECUTOR)); ExchangeSinkHandler sinkHandler = exchange1.createSinkHandler(exchangeId, randomExchangeBuffer()); - sourceHandler.addRemoteSink(exchange0.newRemoteSink(task, exchangeId, node0, node1.getLocalNode()), randomIntBetween(1, 5)); + Transport.Connection connection = node0.getConnection(node1.getLocalNode()); + sourceHandler.addRemoteSink(exchange0.newRemoteSink(task, exchangeId, node0, connection), randomIntBetween(1, 5)); final int maxInputSeqNo = rarely() ? -1 : randomIntBetween(0, 50_000); final int maxOutputSeqNo = rarely() ? -1 : randomIntBetween(0, 50_000); runConcurrentTest(maxInputSeqNo, maxOutputSeqNo, sourceHandler::createExchangeSource, sinkHandler::createExchangeSink); @@ -410,8 +418,8 @@ public void sendResponse(TransportResponse transportResponse) throws IOException } } } - ExchangeResponse newResp = new ExchangeResponse(page, origResp.finished()); origResp.decRef(); + ExchangeResponse newResp = new ExchangeResponse(page, origResp.finished()); super.sendResponse(newResp); } }; @@ -421,9 +429,10 @@ public void sendResponse(TransportResponse transportResponse) throws IOException try (exchange0; exchange1; node0; node1) { String exchangeId = "exchange"; Task task = new Task(1, "", "", "", null, Collections.emptyMap()); - ExchangeSourceHandler sourceHandler = exchange0.createSourceHandler(exchangeId, randomIntBetween(1, 128), ESQL_TEST_EXECUTOR); + var sourceHandler = new ExchangeSourceHandler(randomIntBetween(1, 128), threadPool.executor(ESQL_TEST_EXECUTOR)); ExchangeSinkHandler sinkHandler = exchange1.createSinkHandler(exchangeId, randomIntBetween(1, 128)); - sourceHandler.addRemoteSink(exchange0.newRemoteSink(task, exchangeId, node0, node1.getLocalNode()), randomIntBetween(1, 5)); + Transport.Connection connection = node0.getConnection(node1.getLocalDiscoNode()); + sourceHandler.addRemoteSink(exchange0.newRemoteSink(task, exchangeId, node0, connection), randomIntBetween(1, 5)); Exception err = expectThrows( Exception.class, () -> runConcurrentTest(maxSeqNo, maxSeqNo, sourceHandler::createExchangeSource, sinkHandler::createExchangeSink) @@ -431,6 +440,7 @@ public void sendResponse(TransportResponse transportResponse) throws IOException Throwable cause = ExceptionsHelper.unwrap(err, IOException.class); assertNotNull(cause); assertThat(cause.getMessage(), equalTo("page is too large")); + sinkHandler.onFailure(new RuntimeException(cause)); } } @@ -495,11 +505,18 @@ private BlockFactory blockFactory() { MockBigArrays bigArrays = new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, ByteSizeValue.ofGb(1)); CircuitBreaker breaker = bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST); breakers.add(breaker); - return new BlockFactory(breaker, bigArrays); + MockBlockFactory factory = new MockBlockFactory(breaker, bigArrays); + blockFactories.add(factory); + return factory; } + private final List blockFactories = new ArrayList<>(); + @After public void allMemoryReleased() { + for (MockBlockFactory blockFactory : blockFactories) { + blockFactory.ensureAllBlocksAreReleased(); + } for (CircuitBreaker breaker : breakers) { assertThat(breaker.getUsed(), equalTo(0L)); } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkOperatorStatusTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkOperatorStatusTests.java index 7438055284b14..369913c7d152c 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkOperatorStatusTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/exchange/ExchangeSinkOperatorStatusTests.java @@ -17,7 +17,7 @@ public class ExchangeSinkOperatorStatusTests extends AbstractWireSerializingTestCase { public void testToXContent() { - assertThat(Strings.toString(simple()), equalTo(simpleToJson())); + assertThat(Strings.toString(simple(), true, true), equalTo(simpleToJson())); } public static ExchangeSinkOperator.Status simple() { @@ -26,7 +26,9 @@ public static ExchangeSinkOperator.Status simple() { public static String simpleToJson() { return """ - {"pages_accepted":10}"""; + { + "pages_accepted" : 10 + }"""; } @Override diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/topn/ExtractorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/topn/ExtractorTests.java index 7c8f68549c8a4..b2d0f288c900e 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/topn/ExtractorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/topn/ExtractorTests.java @@ -19,7 +19,7 @@ import org.elasticsearch.compute.data.BlockUtils; import org.elasticsearch.compute.data.DocVector; import org.elasticsearch.compute.data.ElementType; -import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.TestBlockFactory; import org.elasticsearch.compute.operator.BreakingBytesRefBuilder; import org.elasticsearch.test.ESTestCase; @@ -34,6 +34,7 @@ public class ExtractorTests extends ESTestCase { @ParametersFactory public static Iterable parameters() { + BlockFactory blockFactory = TestBlockFactory.getNonBreakingInstance(); List cases = new ArrayList<>(); for (ElementType e : ElementType.values()) { switch (e) { @@ -79,9 +80,9 @@ public static Iterable parameters() { e, TopNEncoder.DEFAULT_UNSORTABLE, () -> new DocVector( - IntBlock.newConstantBlockWith(randomInt(), 1).asVector(), - IntBlock.newConstantBlockWith(randomInt(), 1).asVector(), - IntBlock.newConstantBlockWith(randomInt(), 1).asVector(), + blockFactory.newConstantIntBlockWith(randomInt(), 1).asVector(), + blockFactory.newConstantIntBlockWith(randomInt(), 1).asVector(), + blockFactory.newConstantIntBlockWith(randomInt(), 1).asVector(), randomBoolean() ? null : randomBoolean() ).asBlock() ) } @@ -109,7 +110,7 @@ static Object[] valueTestCase(String name, ElementType type, TopNEncoder encoder name, type, encoder, - () -> BlockUtils.fromListRow(BlockFactory.getNonBreakingInstance(), Arrays.asList(value.get()))[0] + () -> BlockUtils.fromListRow(TestBlockFactory.getNonBreakingInstance(), Arrays.asList(value.get()))[0] ) }; } @@ -150,7 +151,7 @@ public void testNotInKey() { assertThat(valuesBuilder.length(), greaterThan(0)); ResultBuilder result = ResultBuilder.resultBuilderFor( - BlockFactory.getNonBreakingInstance(), + TestBlockFactory.getNonBreakingInstance(), testCase.type, testCase.encoder.toUnsortable(), false, @@ -177,7 +178,7 @@ public void testInKey() { assertThat(valuesBuilder.length(), greaterThan(0)); ResultBuilder result = ResultBuilder.resultBuilderFor( - BlockFactory.getNonBreakingInstance(), + TestBlockFactory.getNonBreakingInstance(), testCase.type, testCase.encoder.toUnsortable(), true, diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/topn/TopNOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/topn/TopNOperatorTests.java index be3e75fcce2a2..2715e8a5d1d83 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/topn/TopNOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/topn/TopNOperatorTests.java @@ -18,15 +18,12 @@ import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; -import org.elasticsearch.compute.data.BooleanBlock; -import org.elasticsearch.compute.data.BytesRefBlock; -import org.elasticsearch.compute.data.DoubleBlock; import org.elasticsearch.compute.data.ElementType; -import org.elasticsearch.compute.data.IntArrayVector; import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.data.TestBlockBuilder; +import org.elasticsearch.compute.data.TestBlockFactory; import org.elasticsearch.compute.operator.CannedSourceOperator; import org.elasticsearch.compute.operator.Driver; import org.elasticsearch.compute.operator.DriverContext; @@ -181,12 +178,12 @@ protected void assertSimpleOutput(List input, List results) { } @Override - protected ByteSizeValue smallEnoughToCircuitBreak() { + protected ByteSizeValue memoryLimitForSimple() { /* * 775 causes us to blow up while collecting values and 780 doesn't - * trip the breaker. So 775 is the max on this range. + * trip the breaker. */ - return ByteSizeValue.ofBytes(between(1, 775)); + return ByteSizeValue.ofBytes(775); } public void testRamBytesUsed() { @@ -305,14 +302,14 @@ private List topNLong(List inputValues, int limit, boolean ascending } public void testCompareInts() { + BlockFactory blockFactory = blockFactory(); testCompare( new Page( - new Block[] { - IntBlock.newBlockBuilder(2).appendInt(Integer.MIN_VALUE).appendInt(randomIntBetween(-1000, -1)).build(), - IntBlock.newBlockBuilder(2).appendInt(randomIntBetween(-1000, -1)).appendInt(0).build(), - IntBlock.newBlockBuilder(2).appendInt(0).appendInt(randomIntBetween(1, 1000)).build(), - IntBlock.newBlockBuilder(2).appendInt(randomIntBetween(1, 1000)).appendInt(Integer.MAX_VALUE).build(), - IntBlock.newBlockBuilder(2).appendInt(0).appendInt(Integer.MAX_VALUE).build() } + blockFactory.newIntBlockBuilder(2).appendInt(Integer.MIN_VALUE).appendInt(randomIntBetween(-1000, -1)).build(), + blockFactory.newIntBlockBuilder(2).appendInt(randomIntBetween(-1000, -1)).appendInt(0).build(), + blockFactory.newIntBlockBuilder(2).appendInt(0).appendInt(randomIntBetween(1, 1000)).build(), + blockFactory.newIntBlockBuilder(2).appendInt(randomIntBetween(1, 1000)).appendInt(Integer.MAX_VALUE).build(), + blockFactory.newIntBlockBuilder(2).appendInt(0).appendInt(Integer.MAX_VALUE).build() ), INT, DEFAULT_SORTABLE @@ -320,14 +317,14 @@ public void testCompareInts() { } public void testCompareLongs() { + BlockFactory blockFactory = blockFactory(); testCompare( new Page( - new Block[] { - LongBlock.newBlockBuilder(2).appendLong(Long.MIN_VALUE).appendLong(randomLongBetween(-1000, -1)).build(), - LongBlock.newBlockBuilder(2).appendLong(randomLongBetween(-1000, -1)).appendLong(0).build(), - LongBlock.newBlockBuilder(2).appendLong(0).appendLong(randomLongBetween(1, 1000)).build(), - LongBlock.newBlockBuilder(2).appendLong(randomLongBetween(1, 1000)).appendLong(Long.MAX_VALUE).build(), - LongBlock.newBlockBuilder(2).appendLong(0).appendLong(Long.MAX_VALUE).build() } + blockFactory.newLongBlockBuilder(2).appendLong(Long.MIN_VALUE).appendLong(randomLongBetween(-1000, -1)).build(), + blockFactory.newLongBlockBuilder(2).appendLong(randomLongBetween(-1000, -1)).appendLong(0).build(), + blockFactory.newLongBlockBuilder(2).appendLong(0).appendLong(randomLongBetween(1, 1000)).build(), + blockFactory.newLongBlockBuilder(2).appendLong(randomLongBetween(1, 1000)).appendLong(Long.MAX_VALUE).build(), + blockFactory.newLongBlockBuilder(2).appendLong(0).appendLong(Long.MAX_VALUE).build() ), LONG, DEFAULT_SORTABLE @@ -335,17 +332,17 @@ public void testCompareLongs() { } public void testCompareDoubles() { + BlockFactory blockFactory = blockFactory(); testCompare( new Page( - new Block[] { - DoubleBlock.newBlockBuilder(2) - .appendDouble(-Double.MAX_VALUE) - .appendDouble(randomDoubleBetween(-1000, -1, true)) - .build(), - DoubleBlock.newBlockBuilder(2).appendDouble(randomDoubleBetween(-1000, -1, true)).appendDouble(0.0).build(), - DoubleBlock.newBlockBuilder(2).appendDouble(0).appendDouble(randomDoubleBetween(1, 1000, true)).build(), - DoubleBlock.newBlockBuilder(2).appendDouble(randomLongBetween(1, 1000)).appendDouble(Double.MAX_VALUE).build(), - DoubleBlock.newBlockBuilder(2).appendDouble(0.0).appendDouble(Double.MAX_VALUE).build() } + blockFactory.newDoubleBlockBuilder(2) + .appendDouble(-Double.MAX_VALUE) + .appendDouble(randomDoubleBetween(-1000, -1, true)) + .build(), + blockFactory.newDoubleBlockBuilder(2).appendDouble(randomDoubleBetween(-1000, -1, true)).appendDouble(0.0).build(), + blockFactory.newDoubleBlockBuilder(2).appendDouble(0).appendDouble(randomDoubleBetween(1, 1000, true)).build(), + blockFactory.newDoubleBlockBuilder(2).appendDouble(randomLongBetween(1, 1000)).appendDouble(Double.MAX_VALUE).build(), + blockFactory.newDoubleBlockBuilder(2).appendDouble(0.0).appendDouble(Double.MAX_VALUE).build() ), DOUBLE, DEFAULT_SORTABLE @@ -353,10 +350,10 @@ public void testCompareDoubles() { } public void testCompareUtf8() { + BlockFactory blockFactory = blockFactory(); testCompare( new Page( - new Block[] { - BytesRefBlock.newBlockBuilder(2).appendBytesRef(new BytesRef("bye")).appendBytesRef(new BytesRef("hello")).build() } + blockFactory.newBytesRefBlockBuilder(2).appendBytesRef(new BytesRef("bye")).appendBytesRef(new BytesRef("hello")).build() ), BYTES_REF, UTF8 @@ -364,15 +361,16 @@ public void testCompareUtf8() { } public void testCompareBooleans() { + BlockFactory blockFactory = blockFactory(); testCompare( - new Page(new Block[] { BooleanBlock.newBlockBuilder(2).appendBoolean(false).appendBoolean(true).build() }), + new Page(blockFactory.newBooleanBlockBuilder(2).appendBoolean(false).appendBoolean(true).build()), BOOLEAN, DEFAULT_SORTABLE ); } private void testCompare(Page page, ElementType elementType, TopNEncoder encoder) { - Block nullBlock = Block.constantNullBlock(1); + Block nullBlock = TestBlockFactory.getNonBreakingInstance().newConstantNullBlock(1); Page nullPage = new Page(new Block[] { nullBlock, nullBlock, nullBlock, nullBlock, nullBlock }); for (int b = 0; b < page.getBlockCount(); b++) { @@ -423,6 +421,7 @@ private void testCompare(Page page, ElementType elementType, TopNEncoder encoder assertThat(TopNOperator.compareRows(r2, r1), greaterThan(0)); } } + page.releaseBlocks(); } private TopNOperator.Row row( @@ -1386,7 +1385,7 @@ public void testCloseWithoutCompleting() { randomPageSize() ) ) { - op.addInput(new Page(new IntArrayVector(new int[] { 1 }, 1).asBlock())); + op.addInput(new Page(blockFactory().newIntArrayVector(new int[] { 1 }, 1).asBlock())); } } diff --git a/x-pack/plugin/esql/qa/security/build.gradle b/x-pack/plugin/esql/qa/security/build.gradle index 44a4f5a27efea..33371320b865d 100644 --- a/x-pack/plugin/esql/qa/security/build.gradle +++ b/x-pack/plugin/esql/qa/security/build.gradle @@ -1,16 +1,5 @@ -apply plugin: 'elasticsearch.legacy-java-rest-test' +apply plugin: 'elasticsearch.internal-java-rest-test' -testClusters.configureEach { - testDistribution = 'DEFAULT' - setting 'xpack.license.self_generated.type', 'trial' - setting 'xpack.monitoring.collection.enabled', 'false' - setting 'xpack.security.enabled', 'true' - numberOfNodes = 1 - extraConfigFile 'roles.yml', file('roles.yml') - user username: "test-admin", password: 'x-pack-test-password', role: "test-admin" - user username: "user1", password: 'x-pack-test-password', role: "user1" - user username: "user2", password: 'x-pack-test-password', role: "user2" - user username: "user3", password: 'x-pack-test-password', role: "user3" - user username: "user4", password: 'x-pack-test-password', role: "user4" - user username: "user5", password: 'x-pack-test-password', role: "user5" +tasks.named('javaRestTest') { + usesDefaultDistribution() } diff --git a/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java b/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java index 10c77a05af49b..98ec411569af5 100644 --- a/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java +++ b/x-pack/plugin/esql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/esql/EsqlSecurityIT.java @@ -17,10 +17,14 @@ import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.resource.Resource; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.json.JsonXContent; import org.junit.Before; +import org.junit.ClassRule; import java.io.IOException; import java.util.List; @@ -31,6 +35,26 @@ public class EsqlSecurityIT extends ESRestTestCase { + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .nodes(2) + .setting("xpack.license.self_generated.type", "trial") + .setting("xpack.security.enabled", "true") + .rolesFile(Resource.fromClasspath("roles.yml")) + .user("test-admin", "x-pack-test-password", "test-admin", false) + .user("user1", "x-pack-test-password", "user1", false) + .user("user2", "x-pack-test-password", "user2", false) + .user("user3", "x-pack-test-password", "user3", false) + .user("user4", "x-pack-test-password", "user4", false) + .user("user5", "x-pack-test-password", "user5", false) + .build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + @Override protected Settings restClientSettings() { String token = basicAuthHeaderValue("test-admin", new SecureString("x-pack-test-password".toCharArray())); diff --git a/x-pack/plugin/esql/qa/security/roles.yml b/x-pack/plugin/esql/qa/security/src/javaRestTest/resources/roles.yml similarity index 100% rename from x-pack/plugin/esql/qa/security/roles.yml rename to x-pack/plugin/esql/qa/security/src/javaRestTest/resources/roles.yml diff --git a/x-pack/plugin/esql/qa/server/build.gradle b/x-pack/plugin/esql/qa/server/build.gradle index 12c3a9d951383..ff7ace533fb3a 100644 --- a/x-pack/plugin/esql/qa/server/build.gradle +++ b/x-pack/plugin/esql/qa/server/build.gradle @@ -9,50 +9,3 @@ dependencies { api project(xpackModule('ql:test-fixtures')) api project(xpackModule('esql:qa:testFixtures')) } - -subprojects { - if (subprojects.isEmpty()) { - // leaf project - } else { - apply plugin: 'elasticsearch.java' - apply plugin: 'elasticsearch.standalone-rest-test' - } - - - if (project.name != 'security' && project.name != 'mixed-cluster' ) { - // The security project just configures its subprojects - apply plugin: 'elasticsearch.legacy-java-rest-test' - - testClusters.matching { it.name == "javaRestTest" }.configureEach { - testDistribution = 'DEFAULT' - setting 'xpack.ml.enabled', 'false' - setting 'xpack.watcher.enabled', 'false' - } - - - dependencies { - configurations.javaRestTestRuntimeClasspath { - resolutionStrategy.force "org.slf4j:slf4j-api:1.7.25" - } - configurations.javaRestTestRuntimeOnly { - // This is also required to make resolveAllDependencies work - resolutionStrategy.force "org.slf4j:slf4j-api:1.7.25" - } - - /* Since we're a standalone rest test we actually get transitive - * dependencies but we don't really want them because they cause - * all kinds of trouble with the jar hell checks. So we suppress - * them explicitly for non-es projects. */ - javaRestTestImplementation(project(':x-pack:plugin:esql:qa:server')) { - transitive = false - } - javaRestTestImplementation project(":test:framework") - javaRestTestRuntimeOnly project(xpackModule('ql:test-fixtures')) - - javaRestTestRuntimeOnly "org.slf4j:slf4j-api:1.7.25" - javaRestTestRuntimeOnly "net.sf.supercsv:super-csv:${versions.supercsv}" - - javaRestTestImplementation project(path: xpackModule('ql:test-fixtures')) - } - } -} diff --git a/x-pack/plugin/esql/qa/server/heap-attack/build.gradle b/x-pack/plugin/esql/qa/server/heap-attack/build.gradle index de88fdecf2b14..75fc42c275508 100644 --- a/x-pack/plugin/esql/qa/server/heap-attack/build.gradle +++ b/x-pack/plugin/esql/qa/server/heap-attack/build.gradle @@ -1,19 +1,9 @@ -apply plugin: 'elasticsearch.legacy-yaml-rest-test' +apply plugin: 'elasticsearch.internal-java-rest-test' dependencies { - javaRestTestImplementation project(xpackModule('esql:qa:testFixtures')) + javaRestTestImplementation project(xpackModule('esql:qa:server')) } -restResources { - restApi { - include '_common', 'bulk', 'indices', 'esql', 'xpack', 'enrich' - } -} - -testClusters.configureEach { - numberOfNodes = 1 - testDistribution = 'DEFAULT' - setting 'xpack.license.self_generated.type', 'trial' - setting 'xpack.monitoring.collection.enabled', 'true' - setting 'xpack.security.enabled', 'false' +tasks.named('javaRestTest') { + usesDefaultDistribution() } diff --git a/x-pack/plugin/esql/qa/server/heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/heap_attack/HeapAttackIT.java b/x-pack/plugin/esql/qa/server/heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/heap_attack/HeapAttackIT.java index 37f2c86dbc251..270fc96975401 100644 --- a/x-pack/plugin/esql/qa/server/heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/heap_attack/HeapAttackIT.java +++ b/x-pack/plugin/esql/qa/server/heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/heap_attack/HeapAttackIT.java @@ -9,6 +9,7 @@ import org.apache.http.client.config.RequestConfig; import org.apache.http.util.EntityUtils; +import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; @@ -19,12 +20,15 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ListMatcher; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase; import org.junit.After; import org.junit.Before; +import org.junit.ClassRule; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -47,7 +51,22 @@ * Tests that run ESQL queries that have, in the past, used so much memory they * crash Elasticsearch. */ +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103527") public class HeapAttackIT extends ESRestTestCase { + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .nodes(1) + .setting("xpack.security.enabled", "false") + .setting("xpack.license.self_generated.type", "trial") + .build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + /** * This used to fail, but we've since compacted top n so it actually succeeds now. */ @@ -344,7 +363,6 @@ public void testFetchMvLongs() throws IOException { assertMap(map, matchesMap().entry("columns", columns)); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/100528") public void testFetchTooManyMvLongs() throws IOException { initMvLongsIndex(500, 100, 1000); assertCircuitBreaks(() -> fetchMvLongs()); diff --git a/x-pack/plugin/esql/qa/server/mixed-cluster/build.gradle b/x-pack/plugin/esql/qa/server/mixed-cluster/build.gradle index 01955adb3af0c..dfd6095b2c217 100644 --- a/x-pack/plugin/esql/qa/server/mixed-cluster/build.gradle +++ b/x-pack/plugin/esql/qa/server/mixed-cluster/build.gradle @@ -4,15 +4,8 @@ import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.internal.info.BuildParams import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask -apply plugin: 'elasticsearch.internal-testclusters' -apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.internal-java-rest-test' apply plugin: 'elasticsearch.bwc-test' -apply plugin: 'elasticsearch.rest-resources' - -dependencies { - testImplementation project(xpackModule('esql:qa:testFixtures')) - testImplementation project(xpackModule('esql:qa:server')) -} restResources { restApi { @@ -23,31 +16,20 @@ restResources { } } -BuildParams.bwcVersions.withWireCompatible { bwcVersion, baseName -> - - if (bwcVersion != VersionProperties.getElasticsearchVersion() && bwcVersion.onOrAfter(Version.fromString("8.11.0"))) { - /* This project runs the ESQL spec tests against a 4 node cluster where two of the nodes has a different minor. */ - def baseCluster = testClusters.register(baseName) { - versions = [bwcVersion.toString(), bwcVersion.toString(), project.version, project.version] - numberOfNodes = 4 - testDistribution = 'DEFAULT' - setting 'xpack.license.self_generated.type', 'trial' - setting 'xpack.security.enabled', 'false' - } +dependencies { + javaRestTestImplementation project(xpackModule('esql:qa:testFixtures')) + javaRestTestImplementation project(xpackModule('esql:qa:server')) +} - tasks.register("${baseName}#mixedClusterTest", StandaloneRestIntegTestTask) { - useCluster baseCluster - mustRunAfter("precommit") - nonInputProperties.systemProperty('tests.rest.cluster', baseCluster.map(c -> c.allHttpSocketURI.join(","))) - nonInputProperties.systemProperty('tests.clustername', baseName) - systemProperty 'tests.bwc_nodes_version', bwcVersion.toString().replace('-SNAPSHOT', '') - systemProperty 'tests.new_nodes_version', project.version.toString().replace('-SNAPSHOT', '') - onlyIf("BWC tests disabled") { project.bwc_tests_enabled } - } +def supportedVersion = bwcVersion -> { + // ESQL is available in 8.11 or later + return bwcVersion.onOrAfter(Version.fromString("8.11.0")); +} - tasks.register(bwcTaskName(bwcVersion)) { - dependsOn "${baseName}#mixedClusterTest" - } +BuildParams.bwcVersions.withWireCompatible(supportedVersion) { bwcVersion, baseName -> + tasks.register(bwcTaskName(bwcVersion), StandaloneRestIntegTestTask) { + usesBwcDistribution(bwcVersion) + systemProperty("tests.old_cluster_version", bwcVersion) + maxParallelForks = 1 } } - diff --git a/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/Clusters.java b/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/Clusters.java new file mode 100644 index 0000000000000..578025be72314 --- /dev/null +++ b/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/Clusters.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.qa.mixed; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.Version; + +public class Clusters { + public static ElasticsearchCluster mixedVersionCluster() { + Version oldVersion = Version.fromString(System.getProperty("tests.old_cluster_version")); + return ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .withNode(node -> node.version(oldVersion)) + .withNode(node -> node.version(Version.CURRENT)) + .withNode(node -> node.version(oldVersion)) + .withNode(node -> node.version(Version.CURRENT)) + .setting("xpack.security.enabled", "false") + .setting("xpack.license.self_generated.type", "trial") + .setting("cluster.routing.rebalance.enable", "none") // disable relocation until we have retry in ESQL + .shared(true) + .build(); + } +} diff --git a/x-pack/plugin/esql/qa/server/mixed-cluster/src/test/java/org/elasticsearch/xpack/esql/qa/mixed/EsqlClientYamlIT.java b/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/EsqlClientYamlIT.java similarity index 69% rename from x-pack/plugin/esql/qa/server/mixed-cluster/src/test/java/org/elasticsearch/xpack/esql/qa/mixed/EsqlClientYamlIT.java rename to x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/EsqlClientYamlIT.java index 0965c5506c6a1..034460e48ece3 100644 --- a/x-pack/plugin/esql/qa/server/mixed-cluster/src/test/java/org/elasticsearch/xpack/esql/qa/mixed/EsqlClientYamlIT.java +++ b/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/EsqlClientYamlIT.java @@ -8,14 +8,26 @@ package org.elasticsearch.xpack.esql.qa.mixed; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; +import org.elasticsearch.test.TestClustersThreadFilter; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; import org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase; import org.junit.After; import org.junit.Before; +import org.junit.ClassRule; +@ThreadLeakFilters(filters = TestClustersThreadFilter.class) public class EsqlClientYamlIT extends ESClientYamlSuiteTestCase { + @ClassRule + public static ElasticsearchCluster cluster = Clusters.mixedVersionCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } public EsqlClientYamlIT(final ClientYamlTestCandidate testCandidate) { super(testCandidate); diff --git a/x-pack/plugin/esql/qa/server/mixed-cluster/src/test/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java b/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java similarity index 64% rename from x-pack/plugin/esql/qa/server/mixed-cluster/src/test/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java rename to x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java index b8dab3641c2a0..9b81c84e75a7a 100644 --- a/x-pack/plugin/esql/qa/server/mixed-cluster/src/test/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java +++ b/x-pack/plugin/esql/qa/server/mixed-cluster/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/mixed/MixedClusterEsqlSpecIT.java @@ -7,16 +7,28 @@ package org.elasticsearch.xpack.esql.qa.mixed; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + import org.elasticsearch.Version; +import org.elasticsearch.test.TestClustersThreadFilter; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase; import org.elasticsearch.xpack.ql.CsvSpecReader.CsvTestCase; +import org.junit.ClassRule; import static org.elasticsearch.xpack.esql.CsvTestUtils.isEnabled; +@ThreadLeakFilters(filters = TestClustersThreadFilter.class) public class MixedClusterEsqlSpecIT extends EsqlSpecTestCase { + @ClassRule + public static ElasticsearchCluster cluster = Clusters.mixedVersionCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } - static final Version bwcVersion = Version.fromString(System.getProperty("tests.bwc_nodes_version")); - static final Version newVersion = Version.fromString(System.getProperty("tests.new_nodes_version")); + static final Version bwcVersion = Version.fromString(System.getProperty("tests.old_cluster_version")); public MixedClusterEsqlSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { super(fileName, groupName, testName, lineNumber, testCase); @@ -24,7 +36,7 @@ public MixedClusterEsqlSpecIT(String fileName, String groupName, String testName @Override protected void shouldSkipTest(String testName) { + super.shouldSkipTest(testName); assumeTrue("Test " + testName + " is skipped on " + bwcVersion, isEnabled(testName, bwcVersion)); - assumeTrue("Test " + testName + " is skipped on " + newVersion, isEnabled(testName, newVersion)); } } diff --git a/x-pack/plugin/esql/qa/server/multi-clusters/build.gradle b/x-pack/plugin/esql/qa/server/multi-clusters/build.gradle new file mode 100644 index 0000000000000..7008bd8b7aa01 --- /dev/null +++ b/x-pack/plugin/esql/qa/server/multi-clusters/build.gradle @@ -0,0 +1,33 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +import org.elasticsearch.gradle.Version +import org.elasticsearch.gradle.internal.info.BuildParams +import org.elasticsearch.gradle.testclusters.StandaloneRestIntegTestTask + +apply plugin: 'elasticsearch.internal-java-rest-test' +apply plugin: 'elasticsearch.bwc-test' + +dependencies { + javaRestTestImplementation project(xpackModule('esql:qa:testFixtures')) + javaRestTestImplementation project(xpackModule('esql:qa:server')) +} + +def supportedVersion = bwcVersion -> { + // This test is less restricted than the actual CCS compatibility matrix that we are supporting. + // CCQ is available on 8.13 or later + return bwcVersion.onOrAfter(Version.fromString("8.13.0")); +} + +BuildParams.bwcVersions.withWireCompatible(supportedVersion) { bwcVersion, baseName -> + tasks.register(bwcTaskName(bwcVersion), StandaloneRestIntegTestTask) { + usesBwcDistribution(bwcVersion) + systemProperty("tests.old_cluster_version", bwcVersion) + maxParallelForks = 1 + } +} diff --git a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/Clusters.java b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/Clusters.java new file mode 100644 index 0000000000000..20abfa2fe18fc --- /dev/null +++ b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/Clusters.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.ccq; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.Version; + +public class Clusters { + public static ElasticsearchCluster remoteCluster() { + return ElasticsearchCluster.local() + .name("remote_cluster") + .distribution(DistributionType.DEFAULT) + .version(Version.fromString(System.getProperty("tests.old_cluster_version"))) + .nodes(2) + .setting("node.roles", "[data,ingest,master]") + .setting("xpack.security.enabled", "false") + .setting("xpack.license.self_generated.type", "trial") + .shared(true) + .build(); + } + + public static ElasticsearchCluster localCluster(ElasticsearchCluster remoteCluster) { + return ElasticsearchCluster.local() + .name("local_cluster") + .distribution(DistributionType.DEFAULT) + .version(Version.CURRENT) + .nodes(2) + .setting("xpack.security.enabled", "false") + .setting("xpack.license.self_generated.type", "trial") + .setting("node.roles", "[data,ingest,master,remote_cluster_client]") + .setting("cluster.remote.remote_cluster.seeds", () -> "\"" + remoteCluster.getTransportEndpoint(0) + "\"") + .setting("cluster.remote.connections_per_cluster", "1") + .shared(true) + .build(); + } + + public static org.elasticsearch.Version oldVersion() { + return org.elasticsearch.Version.fromString(System.getProperty("tests.old_cluster_version")); + } +} diff --git a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java new file mode 100644 index 0000000000000..49ab1879aed32 --- /dev/null +++ b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClusterSpecIT.java @@ -0,0 +1,162 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.ccq; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.apache.http.HttpHost; +import org.apache.lucene.tests.util.LuceneTestCase; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.IOUtils; +import org.elasticsearch.test.TestClustersThreadFilter; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase; +import org.elasticsearch.xpack.ql.CsvSpecReader; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Locale; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; + +import static org.elasticsearch.xpack.esql.CsvTestUtils.isEnabled; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * This suite loads the data into either the local cluster or the remote cluster, then run spec tests with CCQ. + * TODO: Some spec tests prevents us from splitting data across multiple shards/indices/clusters + */ +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103737") +@ThreadLeakFilters(filters = TestClustersThreadFilter.class) +public class MultiClusterSpecIT extends EsqlSpecTestCase { + + static ElasticsearchCluster remoteCluster = Clusters.remoteCluster(); + static ElasticsearchCluster localCluster = Clusters.localCluster(remoteCluster); + + @ClassRule + public static TestRule clusterRule = RuleChain.outerRule(remoteCluster).around(localCluster); + + public MultiClusterSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvSpecReader.CsvTestCase testCase) { + super(fileName, groupName, testName, lineNumber, convertToRemoteIndices(testCase)); + } + + @Override + protected void shouldSkipTest(String testName) { + super.shouldSkipTest(testName); + assumeFalse("CCQ doesn't support enrich yet", hasEnrich(testCase.query)); + assumeFalse("can't test with _index metadata", hasIndexMetadata(testCase.query)); + assumeTrue("Test " + testName + " is skipped on " + Clusters.oldVersion(), isEnabled(testName, Clusters.oldVersion())); + } + + @Override + protected String getTestRestCluster() { + return localCluster.getHttpAddresses(); + } + + @Override + protected RestClient buildClient(Settings settings, HttpHost[] localHosts) throws IOException { + RestClient localClient = super.buildClient(settings, localHosts); + HttpHost[] remoteHosts = parseClusterHosts(remoteCluster.getHttpAddresses()).toArray(HttpHost[]::new); + RestClient remoteClient = super.buildClient(settings, remoteHosts); + return twoClients(localClient, remoteClient); + } + + /** + * Creates a new mock client that dispatches every request to both the local and remote clusters, excluding _bulk and _query requests. + * - '_bulk' requests are randomly sent to either the local or remote cluster to populate data. Some spec tests, such as AVG, + * prevent the splitting of bulk requests. + * - '_query' requests are dispatched to the local cluster only, as we are testing cross-cluster queries. + */ + static RestClient twoClients(RestClient localClient, RestClient remoteClient) throws IOException { + RestClient twoClients = mock(RestClient.class); + when(twoClients.performRequest(any())).then(invocation -> { + Request request = invocation.getArgument(0); + if (request.getEndpoint().contains("_query")) { + return localClient.performRequest(request); + } else if (request.getEndpoint().contains("_bulk")) { + if (randomBoolean()) { + return remoteClient.performRequest(request); + } else { + return localClient.performRequest(request); + } + } else { + localClient.performRequest(request); + return remoteClient.performRequest(request); + } + }); + doAnswer(invocation -> { + IOUtils.close(localClient, remoteClient); + return null; + }).when(twoClients).close(); + return twoClients; + } + + static CsvSpecReader.CsvTestCase convertToRemoteIndices(CsvSpecReader.CsvTestCase testCase) { + String query = testCase.query; + String[] commands = query.split("\\|"); + String first = commands[0].trim(); + if (commands[0].toLowerCase(Locale.ROOT).startsWith("from")) { + String[] parts = commands[0].split("\\["); + assert parts.length >= 1 : parts; + String fromStatement = parts[0]; + String[] localIndices = fromStatement.substring("FROM ".length()).split(","); + String remoteIndices = Arrays.stream(localIndices) + .map(index -> "*:" + index.trim() + "," + index.trim()) + .collect(Collectors.joining(",")); + var newFrom = "FROM " + remoteIndices + commands[0].substring(fromStatement.length()); + testCase.query = newFrom + " " + query.substring(first.length()); + } + int offset = testCase.query.length() - query.length(); + if (offset != 0) { + final String pattern = "Line (\\d+):(\\d+):"; + final Pattern regex = Pattern.compile(pattern); + testCase.adjustExpectedWarnings(warning -> { + Matcher matcher = regex.matcher(warning); + if (matcher.find()) { + int line = Integer.parseInt(matcher.group(1)); + if (line == 1) { + int position = Integer.parseInt(matcher.group(2)); + int newPosition = position + offset; + return warning.replaceFirst(pattern, "Line " + line + ":" + newPosition + ":"); + } + } + return warning; + }); + } + return testCase; + } + + static boolean hasEnrich(String query) { + String[] commands = query.split("\\|"); + for (int i = 0; i < commands.length; i++) { + commands[i] = commands[i].trim(); + if (commands[i].toLowerCase(Locale.ROOT).startsWith("enrich")) { + return true; + } + } + return false; + } + + static boolean hasIndexMetadata(String query) { + String[] commands = query.split("\\|"); + if (commands[0].trim().toLowerCase(Locale.ROOT).startsWith("from")) { + String[] parts = commands[0].split("\\["); + return parts.length > 1 && parts[1].contains("_index"); + } + return false; + } +} diff --git a/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClustersIT.java b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClustersIT.java new file mode 100644 index 0000000000000..2f0b11b7a3009 --- /dev/null +++ b/x-pack/plugin/esql/qa/server/multi-clusters/src/javaRestTest/java/org/elasticsearch/xpack/esql/ccq/MultiClustersIT.java @@ -0,0 +1,190 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.ccq; + +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.apache.http.HttpHost; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RestClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.TestClustersThreadFilter; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.esql.qa.rest.RestEsqlTestCase; +import org.junit.After; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +import static org.elasticsearch.test.MapMatcher.assertMap; +import static org.elasticsearch.test.MapMatcher.matchesMap; +import static org.elasticsearch.xpack.esql.qa.rest.RestEsqlTestCase.runEsql; + +@ThreadLeakFilters(filters = TestClustersThreadFilter.class) +public class MultiClustersIT extends ESRestTestCase { + static ElasticsearchCluster remoteCluster = Clusters.remoteCluster(); + static ElasticsearchCluster localCluster = Clusters.localCluster(remoteCluster); + + @ClassRule + public static TestRule clusterRule = RuleChain.outerRule(remoteCluster).around(localCluster); + + @Override + protected String getTestRestCluster() { + return localCluster.getHttpAddresses(); + } + + record Doc(int id, String color, long data) { + + } + + final String localIndex = "test-local-index"; + List localDocs = List.of(); + final String remoteIndex = "test-remote-index"; + List remoteDocs = List.of(); + + @Before + public void setUpIndices() throws Exception { + final String mapping = """ + "properties": { + "data": { "type": "long" }, + "color": { "type": "keyword" } + } + """; + RestClient localClient = client(); + localDocs = IntStream.range(0, between(1, 500)) + .mapToObj(n -> new Doc(n, randomFrom("red", "yellow", "green"), randomIntBetween(1, 1000))) + .toList(); + createIndex( + localClient, + localIndex, + Settings.builder().put("index.number_of_shards", randomIntBetween(1, 5)).build(), + mapping, + null + ); + indexDocs(localClient, localIndex, localDocs); + + remoteDocs = IntStream.range(0, between(1, 500)) + .mapToObj(n -> new Doc(n, randomFrom("red", "yellow", "green"), randomIntBetween(1, 1000))) + .toList(); + try (RestClient remoteClient = remoteClusterClient()) { + createIndex( + remoteClient, + remoteIndex, + Settings.builder().put("index.number_of_shards", randomIntBetween(1, 5)).build(), + mapping, + null + ); + indexDocs(remoteClient, remoteIndex, remoteDocs); + } + } + + @After + public void wipeIndices() throws Exception { + try (RestClient remoteClient = remoteClusterClient()) { + deleteIndex(remoteClient, remoteIndex); + } + } + + void indexDocs(RestClient client, String index, List docs) throws IOException { + logger.info("--> indexing {} docs to index {}", docs.size(), index); + long total = 0; + for (Doc doc : docs) { + Request createDoc = new Request("POST", "/" + index + "/_doc/id_" + doc.id); + if (randomInt(100) < 10) { + createDoc.addParameter("refresh", "true"); + } + createDoc.setJsonEntity(Strings.format(""" + { "color": "%s", "data": %s} + """, doc.color, doc.data)); + assertOK(client.performRequest(createDoc)); + total += doc.data; + } + logger.info("--> index={} total={}", index, total); + refresh(client, index); + } + + private Map run(String query) throws IOException { + Map resp = runEsql(new RestEsqlTestCase.RequestObjectBuilder().query(query).build()); + logger.info("--> query {} response {}", query, resp); + return resp; + } + + public void testCount() throws Exception { + { + Map result = run("FROM test-local-index,*:test-remote-index | STATS c = COUNT(*)"); + var columns = List.of(Map.of("name", "c", "type", "long")); + var values = List.of(List.of(localDocs.size() + remoteDocs.size())); + assertMap(result, matchesMap().entry("columns", columns).entry("values", values)); + } + { + Map result = run("FROM *:test-remote-index | STATS c = COUNT(*)"); + var columns = List.of(Map.of("name", "c", "type", "long")); + var values = List.of(List.of(remoteDocs.size())); + assertMap(result, matchesMap().entry("columns", columns).entry("values", values)); + } + } + + public void testUngroupedAggs() throws Exception { + { + Map result = run("FROM test-local-index,*:test-remote-index | STATS total = SUM(data)"); + var columns = List.of(Map.of("name", "total", "type", "long")); + long sum = Stream.concat(localDocs.stream(), remoteDocs.stream()).mapToLong(d -> d.data).sum(); + var values = List.of(List.of(Math.toIntExact(sum))); + assertMap(result, matchesMap().entry("columns", columns).entry("values", values)); + } + { + Map result = run("FROM *:test-remote-index | STATS total = SUM(data)"); + var columns = List.of(Map.of("name", "total", "type", "long")); + long sum = remoteDocs.stream().mapToLong(d -> d.data).sum(); + var values = List.of(List.of(Math.toIntExact(sum))); + assertMap(result, matchesMap().entry("columns", columns).entry("values", values)); + } + } + + public void testGroupedAggs() throws Exception { + { + Map result = run("FROM test-local-index,*:test-remote-index | STATS total = SUM(data) BY color | SORT color"); + var columns = List.of(Map.of("name", "total", "type", "long"), Map.of("name", "color", "type", "keyword")); + var values = Stream.concat(localDocs.stream(), remoteDocs.stream()) + .collect(Collectors.toMap(d -> d.color, Doc::data, Long::sum)) + .entrySet() + .stream() + .sorted(Map.Entry.comparingByKey()) + .map(e -> List.of(Math.toIntExact(e.getValue()), e.getKey())) + .toList(); + assertMap(result, matchesMap().entry("columns", columns).entry("values", values)); + } + { + Map result = run("FROM *:test-remote-index | STATS total = SUM(data) by color | SORT color"); + var columns = List.of(Map.of("name", "total", "type", "long"), Map.of("name", "color", "type", "keyword")); + var values = remoteDocs.stream() + .collect(Collectors.toMap(d -> d.color, Doc::data, Long::sum)) + .entrySet() + .stream() + .sorted(Map.Entry.comparingByKey()) + .map(e -> List.of(Math.toIntExact(e.getValue()), e.getKey())) + .toList(); + assertMap(result, matchesMap().entry("columns", columns).entry("values", values)); + } + } + + private RestClient remoteClusterClient() throws IOException { + var clusterHosts = parseClusterHosts(remoteCluster.getHttpAddresses()); + return buildClient(restClientSettings(), clusterHosts.toArray(new HttpHost[0])); + } +} diff --git a/x-pack/plugin/esql/qa/server/multi-node/build.gradle b/x-pack/plugin/esql/qa/server/multi-node/build.gradle index 300ed4df92bc2..e7ef204d77dbb 100644 --- a/x-pack/plugin/esql/qa/server/multi-node/build.gradle +++ b/x-pack/plugin/esql/qa/server/multi-node/build.gradle @@ -1,19 +1,11 @@ -apply plugin: 'elasticsearch.legacy-yaml-rest-test' +apply plugin: 'elasticsearch.internal-java-rest-test' dependencies { javaRestTestImplementation project(xpackModule('esql:qa:testFixtures')) + javaRestTestImplementation project(xpackModule('esql:qa:server')) } -restResources { - restApi { - include '_common', 'bulk', 'indices', 'esql', 'xpack' - } -} -testClusters.configureEach { - numberOfNodes = 2 - testDistribution = 'DEFAULT' - setting 'xpack.license.self_generated.type', 'trial' - setting 'xpack.monitoring.collection.enabled', 'true' - setting 'xpack.security.enabled', 'false' +tasks.named('javaRestTest') { + usesDefaultDistribution() } diff --git a/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlSpecIT.java b/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlSpecIT.java index eab26b565f93d..030d9c951b751 100644 --- a/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlSpecIT.java +++ b/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlSpecIT.java @@ -7,10 +7,26 @@ package org.elasticsearch.xpack.esql.qa.multi_node; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase; import org.elasticsearch.xpack.ql.CsvSpecReader.CsvTestCase; +import org.junit.ClassRule; public class EsqlSpecIT extends EsqlSpecTestCase { + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .nodes(2) + .setting("xpack.security.enabled", "false") + .setting("xpack.license.self_generated.type", "trial") + .build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + public EsqlSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { super(fileName, groupName, testName, lineNumber, testCase); } diff --git a/x-pack/plugin/esql/qa/server/single-node/build.gradle b/x-pack/plugin/esql/qa/server/single-node/build.gradle index 2d430965efb21..1932faa49fcba 100644 --- a/x-pack/plugin/esql/qa/server/single-node/build.gradle +++ b/x-pack/plugin/esql/qa/server/single-node/build.gradle @@ -1,7 +1,9 @@ -apply plugin: 'elasticsearch.legacy-yaml-rest-test' +apply plugin: 'elasticsearch.internal-java-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' dependencies { javaRestTestImplementation project(xpackModule('esql:qa:testFixtures')) + javaRestTestImplementation project(xpackModule('esql:qa:server')) yamlRestTestImplementation project(xpackModule('esql:qa:server')) } @@ -14,9 +16,12 @@ restResources { } } -testClusters.configureEach { - testDistribution = 'DEFAULT' - setting 'xpack.license.self_generated.type', 'trial' - setting 'xpack.monitoring.collection.enabled', 'true' - setting 'xpack.security.enabled', 'false' +tasks.named('javaRestTest') { + usesDefaultDistribution() + maxParallelForks = 1 +} + +tasks.named('yamlRestTest') { + usesDefaultDistribution() + maxParallelForks = 1 } diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/Clusters.java b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/Clusters.java new file mode 100644 index 0000000000000..f0724a411e3c5 --- /dev/null +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/Clusters.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.qa.single_node; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; + +public class Clusters { + public static ElasticsearchCluster testCluster() { + return ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .nodes(1) + .setting("xpack.security.enabled", "false") + .setting("xpack.license.self_generated.type", "trial") + .shared(true) + .build(); + } +} diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlSpecIT.java b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlSpecIT.java index d3a4d7a14a0f1..d5d730346e117 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlSpecIT.java +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlSpecIT.java @@ -7,10 +7,24 @@ package org.elasticsearch.xpack.esql.qa.single_node; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.elasticsearch.test.TestClustersThreadFilter; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase; import org.elasticsearch.xpack.ql.CsvSpecReader.CsvTestCase; +import org.junit.ClassRule; +@ThreadLeakFilters(filters = TestClustersThreadFilter.class) public class EsqlSpecIT extends EsqlSpecTestCase { + @ClassRule + public static ElasticsearchCluster cluster = Clusters.testCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + public EsqlSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { super(fileName, groupName, testName, lineNumber, testCase); } diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/GenerativeIT.java b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/GenerativeIT.java index e499b13bf1db8..9b98c29f5c3e3 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/GenerativeIT.java +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/GenerativeIT.java @@ -7,8 +7,22 @@ package org.elasticsearch.xpack.esql.qa.single_node; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + import org.apache.lucene.tests.util.LuceneTestCase.AwaitsFix; +import org.elasticsearch.test.TestClustersThreadFilter; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.esql.qa.rest.generative.GenerativeRestTest; +import org.junit.ClassRule; @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/102084") -public class GenerativeIT extends GenerativeRestTest {} +@ThreadLeakFilters(filters = TestClustersThreadFilter.class) +public class GenerativeIT extends GenerativeRestTest { + @ClassRule + public static ElasticsearchCluster cluster = Clusters.testCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEnrichIT.java b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEnrichIT.java index b2222f4f2e78e..3717e820008a0 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEnrichIT.java +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEnrichIT.java @@ -7,6 +7,20 @@ package org.elasticsearch.xpack.esql.qa.single_node; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + +import org.elasticsearch.test.TestClustersThreadFilter; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.esql.qa.rest.RestEnrichTestCase; +import org.junit.ClassRule; + +@ThreadLeakFilters(filters = TestClustersThreadFilter.class) +public class RestEnrichIT extends RestEnrichTestCase { + @ClassRule + public static ElasticsearchCluster cluster = Clusters.testCluster(); -public class RestEnrichIT extends RestEnrichTestCase {} + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } +} diff --git a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java index 10e63a563efc7..eb542a56b8c9c 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java +++ b/x-pack/plugin/esql/qa/server/single-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/RestEsqlIT.java @@ -6,14 +6,19 @@ */ package org.elasticsearch.xpack.esql.qa.single_node; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + import org.apache.http.util.EntityUtils; import org.elasticsearch.Build; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.TestClustersThreadFilter; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.xpack.esql.qa.rest.RestEsqlTestCase; import org.junit.Assert; +import org.junit.ClassRule; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -23,7 +28,15 @@ import static org.hamcrest.Matchers.containsString; +@ThreadLeakFilters(filters = TestClustersThreadFilter.class) public class RestEsqlIT extends RestEsqlTestCase { + @ClassRule + public static ElasticsearchCluster cluster = Clusters.testCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } public void testBasicEsql() throws IOException { StringBuilder b = new StringBuilder(); diff --git a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlClientYamlIT.java b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlClientYamlIT.java index 38d58644926fe..9e93ae4376896 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlClientYamlIT.java +++ b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/java/org/elasticsearch/xpack/esql/qa/single_node/EsqlClientYamlIT.java @@ -9,14 +9,30 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; import org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase; import org.junit.After; import org.junit.Before; +import org.junit.ClassRule; public class EsqlClientYamlIT extends ESClientYamlSuiteTestCase { + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .nodes(1) + .setting("xpack.security.enabled", "false") + .setting("xpack.license.self_generated.type", "trial") + .build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + public EsqlClientYamlIT(final ClientYamlTestCandidate testCandidate) { super(testCandidate); } diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlSpecTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlSpecTestCase.java index 734f26fab547a..2d6280e4ceb9b 100644 --- a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlSpecTestCase.java +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlSpecTestCase.java @@ -50,7 +50,7 @@ public abstract class EsqlSpecTestCase extends ESRestTestCase { private final String groupName; private final String testName; private final Integer lineNumber; - private final CsvTestCase testCase; + protected final CsvTestCase testCase; @ParametersFactory(argumentFormatting = "%2$s.%3$s") public static List readScriptSpec() throws Exception { diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/generative/GenerativeRestTest.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/generative/GenerativeRestTest.java index c341ad26cb7a6..5fd6b2a5618c7 100644 --- a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/generative/GenerativeRestTest.java +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/generative/GenerativeRestTest.java @@ -7,9 +7,12 @@ package org.elasticsearch.xpack.esql.qa.rest.generative; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.ResponseException; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xpack.esql.CsvTestsDataLoader; import org.elasticsearch.xpack.esql.qa.rest.RestEsqlTestCase; +import org.junit.AfterClass; import org.junit.Before; import java.io.IOException; @@ -46,6 +49,18 @@ public void setup() throws IOException { } } + @AfterClass + public static void wipeTestData() throws IOException { + try { + adminClient().performRequest(new Request("DELETE", "/*")); + } catch (ResponseException e) { + // 404 here just means we had no indexes + if (e.getResponse().getStatusLine().getStatusCode() != 404) { + throw e; + } + } + } + public void test() { List indices = availableIndices(); List policies = availableEnrichPolicies(); diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java index ebe27225becb1..d193501386488 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java @@ -11,7 +11,9 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.time.DateFormatters; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockUtils; @@ -24,7 +26,7 @@ import org.elasticsearch.core.Tuple; import org.elasticsearch.logging.Logger; import org.elasticsearch.test.VersionUtils; -import org.elasticsearch.xpack.esql.action.EsqlQueryResponse; +import org.elasticsearch.xpack.esql.action.ResponseValueUtils; import org.elasticsearch.xpack.ql.util.StringUtils; import org.supercsv.io.CsvListReader; import org.supercsv.prefs.CsvPreference; @@ -139,6 +141,7 @@ public void close() { CsvColumn[] columns = null; + var blockFactory = BlockFactory.getInstance(new NoopCircuitBreaker("test-noop"), BigArrays.NON_RECYCLING_INSTANCE); try (BufferedReader reader = org.elasticsearch.xpack.ql.TestUtils.reader(source)) { String line; int lineNumber = 1; @@ -178,7 +181,7 @@ public void close() { columns[i] = new CsvColumn( name, type, - BlockUtils.wrapperFor(BlockFactory.getNonBreakingInstance(), ElementType.fromJava(type.clazz()), 8) + BlockUtils.wrapperFor(blockFactory, ElementType.fromJava(type.clazz()), 8) ); } } @@ -477,7 +480,7 @@ record ActualResults( Map> responseHeaders ) { Iterator> values() { - return EsqlQueryResponse.pagesToValues(dataTypes(), pages); + return ResponseValueUtils.pagesToValues(dataTypes(), pages); } } diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java index eca8beb06576b..8edcdd9edb124 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java @@ -86,6 +86,11 @@ public byte[] max(String field, DataType dataType) { public boolean isSingleValue(String field) { return false; } + + @Override + public boolean isIndexed(String field) { + return exists(field); + } } public static final TestSearchStats TEST_SEARCH_STATS = new TestSearchStats(); diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/conditional.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/conditional.csv-spec index ea53ac5679aa9..177e169387642 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/conditional.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/conditional.csv-spec @@ -93,3 +93,37 @@ M |10 M |10 M |10 ; + +docsCaseSuccessRate +// tag::docsCaseSuccessRate[] +FROM sample_data +| EVAL successful = CASE( + STARTS_WITH(message, "Connected to"), 1, + message == "Connection error", 0 + ) +| STATS success_rate = AVG(successful) +// end::docsCaseSuccessRate[] +; + +// tag::docsCaseSuccessRate-result[] +success_rate:double +0.5 +// end::docsCaseSuccessRate-result[] +; + +docsCaseHourlyErrorRate +// tag::docsCaseHourlyErrorRate[] +FROM sample_data +| EVAL error = CASE(message LIKE "*error*", 1, 0) +| EVAL hour = DATE_TRUNC(1 hour, @timestamp) +| STATS error_rate = AVG(error) by hour +| SORT hour +// end::docsCaseHourlyErrorRate[] +; + +// tag::docsCaseHourlyErrorRate-result[] +error_rate:double | hour:date +0.0 |2023-10-23T12:00:00.000Z +0.6 |2023-10-23T13:00:00.000Z +// end::docsCaseHourlyErrorRate-result[] +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec index 591d395661afa..509257c4c8b4f 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec @@ -567,6 +567,30 @@ dt:datetime |plus:datetime 2100-01-01T01:01:01.001Z |2100-01-01T00:00:00.000Z ; +datePlusNull#[skip:-8.12.99, reason:date math with null enabled in 8.13] +row dt = to_dt("2100-01-01T01:01:01.001Z") +| eval plus_post = dt + null, plus_pre = null + dt; + +dt:datetime |plus_post:datetime |plus_pre:datetime +2100-01-01T01:01:01.001Z |null |null +; + +datePlusNullAndDuration#[skip:-8.12.99, reason:date math with null enabled in 8.13] +row dt = to_dt("2100-01-01T01:01:01.001Z") +| eval plus_post = dt + null + 1 hour, plus_pre = 1 second + null + dt; + +dt:datetime |plus_post:datetime |plus_pre:datetime +2100-01-01T01:01:01.001Z |null |null +; + +datePlusNullAndPeriod#[skip:-8.12.99, reason:date math with null enabled in 8.13] +row dt = to_dt("2100-01-01T01:01:01.001Z") +| eval plus_post = dt + null + 2 years, plus_pre = 3 weeks + null + dt; + +dt:datetime |plus_post:datetime |plus_pre:datetime +2100-01-01T01:01:01.001Z |null |null +; + dateMinusDuration row dt = to_dt("2100-01-01T01:01:01.001Z") | eval minus = dt - 1 hour - 1 minute - 1 second - 1 milliseconds; @@ -600,6 +624,33 @@ then:datetime 1953-04-04T00:00:00.000Z ; +dateMinusNull#[skip:-8.12.99, reason:date math with null enabled in 8.13] +row dt = to_dt("1953-04-04T04:03:02.001Z") +| eval minus = dt - null +; + +dt:datetime |minus:datetime +1953-04-04T04:03:02.001Z |null +; + +dateMinusNullAndPeriod#[skip:-8.12.99, reason:date math with null enabled in 8.13] +row dt = to_dt("1953-04-04T04:03:02.001Z") +| eval minus = dt - null - 4 minutes +; + +dt:datetime |minus:datetime +1953-04-04T04:03:02.001Z |null +; + +dateMinusNullAndDuration#[skip:-8.12.99, reason:date math with null enabled in 8.13] +row dt = to_dt("1953-04-04T04:03:02.001Z") +| eval minus = dt - 6 days - null +; + +dt:datetime |minus:datetime +1953-04-04T04:03:02.001Z |null +; + datePlusPeriodAndDuration row dt = to_dt("2100-01-01T00:00:00.000Z") | eval plus = dt + 4 years + 3 months + 2 weeks + 1 day + 1 hour + 1 minute + 1 second + 1 milliseconds; @@ -726,6 +777,86 @@ birth_date:datetime 1953-04-21T00:00:00.000Z ; +docsAutoBucketMonth +//tag::docsAutoBucketMonth[] +FROM employees +| WHERE hire_date >= "1985-01-01T00:00:00Z" AND hire_date < "1986-01-01T00:00:00Z" +| EVAL month = AUTO_BUCKET(hire_date, 20, "1985-01-01T00:00:00Z", "1986-01-01T00:00:00Z") +| KEEP hire_date, month +| SORT hire_date +//end::docsAutoBucketMonth[] +; + +//tag::docsAutoBucketMonth-result[] + hire_date:date | month:date +1985-02-18T00:00:00.000Z|1985-02-01T00:00:00.000Z +1985-02-24T00:00:00.000Z|1985-02-01T00:00:00.000Z +1985-05-13T00:00:00.000Z|1985-05-01T00:00:00.000Z +1985-07-09T00:00:00.000Z|1985-07-01T00:00:00.000Z +1985-09-17T00:00:00.000Z|1985-09-01T00:00:00.000Z +1985-10-14T00:00:00.000Z|1985-10-01T00:00:00.000Z +1985-10-20T00:00:00.000Z|1985-10-01T00:00:00.000Z +1985-11-19T00:00:00.000Z|1985-11-01T00:00:00.000Z +1985-11-20T00:00:00.000Z|1985-11-01T00:00:00.000Z +1985-11-20T00:00:00.000Z|1985-11-01T00:00:00.000Z +1985-11-21T00:00:00.000Z|1985-11-01T00:00:00.000Z +//end::docsAutoBucketMonth-result[] +; + +docsAutoBucketMonthlyHistogram +//tag::docsAutoBucketMonthlyHistogram[] +FROM employees +| WHERE hire_date >= "1985-01-01T00:00:00Z" AND hire_date < "1986-01-01T00:00:00Z" +| EVAL month = AUTO_BUCKET(hire_date, 20, "1985-01-01T00:00:00Z", "1986-01-01T00:00:00Z") +| STATS hires_per_month = COUNT(*) BY month +| SORT month +//end::docsAutoBucketMonthlyHistogram[] +; + +//tag::docsAutoBucketMonthlyHistogram-result[] + hires_per_month:long | month:date +2 |1985-02-01T00:00:00.000Z +1 |1985-05-01T00:00:00.000Z +1 |1985-07-01T00:00:00.000Z +1 |1985-09-01T00:00:00.000Z +2 |1985-10-01T00:00:00.000Z +4 |1985-11-01T00:00:00.000Z +//end::docsAutoBucketMonthlyHistogram-result[] +; + +docsAutoBucketWeeklyHistogram +//tag::docsAutoBucketWeeklyHistogram[] +FROM employees +| WHERE hire_date >= "1985-01-01T00:00:00Z" AND hire_date < "1986-01-01T00:00:00Z" +| EVAL week = AUTO_BUCKET(hire_date, 100, "1985-01-01T00:00:00Z", "1986-01-01T00:00:00Z") +| STATS hires_per_week = COUNT(*) BY week +| SORT week +//end::docsAutoBucketWeeklyHistogram[] +; + +//tag::docsAutoBucketWeeklyHistogram-result[] + hires_per_week:long | week:date +2 |1985-02-18T00:00:00.000Z +1 |1985-05-13T00:00:00.000Z +1 |1985-07-08T00:00:00.000Z +1 |1985-09-16T00:00:00.000Z +2 |1985-10-14T00:00:00.000Z +4 |1985-11-18T00:00:00.000Z +//end::docsAutoBucketWeeklyHistogram-result[] +; + +docsAutoBucketLast24hr +//tag::docsAutoBucketLast24hr[] +FROM sample_data +| WHERE @timestamp >= NOW() - 1 day and @timestamp < NOW() +| EVAL bucket = AUTO_BUCKET(@timestamp, 25, DATE_FORMAT(NOW() - 1 day), DATE_FORMAT(NOW())) +| STATS COUNT(*) BY bucket +//end::docsAutoBucketLast24hr[] +; + + COUNT(*):long | bucket:date +; + docsGettingStartedAutoBucket // tag::gs-auto_bucket[] FROM sample_data @@ -767,3 +898,92 @@ median_duration:double | bucket:date 3107561.0 |2023-10-23T12:00:00.000Z 1756467.0 |2023-10-23T13:00:00.000Z ; + +dateExtract +// tag::dateExtract[] +ROW date = DATE_PARSE("yyyy-MM-dd", "2022-05-06") +| EVAL year = DATE_EXTRACT("year", date) +// end::dateExtract[] +; + +// tag::dateExtract-result[] +date:date | year:long +2022-05-06T00:00:00.000Z | 2022 +// end::dateExtract-result[] +; + +docsDateExtractBusinessHours +// tag::docsDateExtractBusinessHours[] +FROM sample_data +| WHERE DATE_EXTRACT("hour_of_day", @timestamp) < 9 AND DATE_EXTRACT("hour_of_day", @timestamp) >= 17 +// end::docsDateExtractBusinessHours[] +; + +// tag::docsDateExtractBusinessHours-result[] +@timestamp:date | client_ip:ip |event_duration:long | message:keyword +// end::docsDateExtractBusinessHours-result[] +; + +docsDateFormat +// tag::docsDateFormat[] +FROM employees +| KEEP first_name, last_name, hire_date +| EVAL hired = DATE_FORMAT("YYYY-MM-dd", hire_date) +// end::docsDateFormat[] +| SORT first_name +| LIMIT 3 +; + +// tag::docsDateFormat-result[] +first_name:keyword | last_name:keyword | hire_date:date | hired:keyword +Alejandro |McAlpine |1991-06-26T00:00:00.000Z|1991-06-26 +Amabile |Gomatam |1992-11-18T00:00:00.000Z|1992-11-18 +Anneke |Preusig |1989-06-02T00:00:00.000Z|1989-06-02 +// end::docsDateFormat-result[] +; + +docsDateTrunc +// tag::docsDateTrunc[] +FROM employees +| KEEP first_name, last_name, hire_date +| EVAL year_hired = DATE_TRUNC(1 year, hire_date) +// end::docsDateTrunc[] +| SORT first_name +| LIMIT 3 +; + +// tag::docsDateTrunc-result[] +first_name:keyword | last_name:keyword | hire_date:date | year_hired:date +Alejandro |McAlpine |1991-06-26T00:00:00.000Z|1991-01-01T00:00:00.000Z +Amabile |Gomatam |1992-11-18T00:00:00.000Z|1992-01-01T00:00:00.000Z +Anneke |Preusig |1989-06-02T00:00:00.000Z|1989-01-01T00:00:00.000Z +// end::docsDateTrunc-result[] +; + +docsDateTruncHistogram +// tag::docsDateTruncHistogram[] +FROM employees +| EVAL year = DATE_TRUNC(1 year, hire_date) +| STATS hires = COUNT(emp_no) BY year +| SORT year +// end::docsDateTruncHistogram[] +; + +// tag::docsDateTruncHistogram-result[] +hires:long | year:date +11 |1985-01-01T00:00:00.000Z +11 |1986-01-01T00:00:00.000Z +15 |1987-01-01T00:00:00.000Z +9 |1988-01-01T00:00:00.000Z +13 |1989-01-01T00:00:00.000Z +12 |1990-01-01T00:00:00.000Z +6 |1991-01-01T00:00:00.000Z +8 |1992-01-01T00:00:00.000Z +3 |1993-01-01T00:00:00.000Z +4 |1994-01-01T00:00:00.000Z +5 |1995-01-01T00:00:00.000Z +1 |1996-01-01T00:00:00.000Z +1 |1997-01-01T00:00:00.000Z +1 |1999-01-01T00:00:00.000Z +// end::docsDateTruncHistogram-result[] +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec index 95da19e38a05d..42c5401742e6e 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec @@ -290,20 +290,6 @@ Udi |Jansch |1.93 Uri |Lenart |1.75 ; - -dateExtract -// tag::dateExtract[] -ROW date = DATE_PARSE("yyyy-MM-dd", "2022-05-06") -| EVAL year = DATE_EXTRACT("year", date) -// end::dateExtract[] -; - -// tag::dateExtract-result[] -date:date | year:long -2022-05-06T00:00:00.000Z | 2022 -// end::dateExtract-result[] -; - docsSubstring // tag::substring[] FROM employees diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec index e6486960c7e04..39d8a8bfa57e9 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/eval.csv-spec @@ -222,6 +222,24 @@ emp_no:integer | foldable:keyword | folded_mv:keyword 10002 | "foo,bar" | [foo, bar] ; +docsConcat +// tag::docsConcat[] +FROM employees +| KEEP first_name, last_name +| EVAL fullname = CONCAT(first_name, " ", last_name) +// end::docsConcat[] +| SORT first_name +| LIMIT 3 +; + +// tag::docsConcat-result[] +first_name:keyword | last_name:keyword | fullname:keyword +Alejandro |McAlpine |Alejandro McAlpine +Amabile |Gomatam |Amabile Gomatam +Anneke |Preusig |Anneke Preusig +// end::docsConcat-result[] +; + docsGettingStartedEval // tag::gs-eval[] FROM sample_data diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec index 887d931f4cd5c..baf6da2cd0bde 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec @@ -410,6 +410,30 @@ hire_date:date | salary:integer | bs:double // end::auto_bucket-result[] ; +docsAutoBucketNumeric +//tag::docsAutoBucketNumeric[] +FROM employees +| EVAL bs = AUTO_BUCKET(salary, 20, 25324, 74999) +| STATS COUNT(*) by bs +| SORT bs +//end::docsAutoBucketNumeric[] +; + +//tag::docsAutoBucketNumeric-result[] + COUNT(*):long | bs:double +9 |25000.0 +9 |30000.0 +18 |35000.0 +11 |40000.0 +11 |45000.0 +10 |50000.0 +7 |55000.0 +9 |60000.0 +8 |65000.0 +8 |70000.0 +//end::docsAutoBucketNumeric-result[] +; + cos ROW a=2 | EVAL cos=COS(a); diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec index 02e9db6ededf1..0b2ce54d5fd22 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec @@ -178,14 +178,21 @@ eth2 |epsilon |[fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece ; cdirMatchMultipleArgs#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] -from hosts | where cidr_match(ip1, "127.0.0.2/32", "127.0.0.3/32") | keep card, host, ip0, ip1; +//tag::cdirMatchMultipleArgs[] +FROM hosts +| WHERE CIDR_MATCH(ip1, "127.0.0.2/32", "127.0.0.3/32") +| KEEP card, host, ip0, ip1 +//end::cdirMatchMultipleArgs[] +; ignoreOrder:true -warning:Line 1:20: evaluation of [cidr_match(ip1, \"127.0.0.2/32\", \"127.0.0.3/32\")] failed, treating result as null. Only first 20 failures recorded. -warning:Line 1:20: java.lang.IllegalArgumentException: single-value function encountered multi-value +warning:Line 2:9: evaluation of [CIDR_MATCH(ip1, \"127.0.0.2/32\", \"127.0.0.3/32\")] failed, treating result as null. Only first 20 failures recorded. +warning:Line 2:9: java.lang.IllegalArgumentException: single-value function encountered multi-value +//tag::cdirMatchMultipleArgs-result[] card:keyword |host:keyword |ip0:ip |ip1:ip eth1 |beta |127.0.0.1 |127.0.0.2 eth0 |gamma |fe80::cae2:65ff:fece:feb9|127.0.0.3 +//end::cdirMatchMultipleArgs-result[] ; cidrMatchFunctionArg#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/math.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/math.csv-spec index daf153051bb89..31b9d6101d2c5 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/math.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/math.csv-spec @@ -1118,3 +1118,34 @@ emp_no:integer | min_plus_max:integer | are_equal:boolean 10004 | 7 | false 10005 | 3 | false ; + +docsAbs +//tag::docsAbs[] +ROW number = -1.0 +| EVAL abs_number = ABS(number) +//end::docsAbs[] +; + +//tag::docsAbs-result[] +number:double | abs_number:double +-1.0 |1.0 +//end::docsAbs-result[] +; + +docsAbsEmployees +//tag::docsAbsEmployees[] +FROM employees +| KEEP first_name, last_name, height +| EVAL abs_height = ABS(0.0 - height) +//end::docsAbsEmployees[] +| SORT first_name +| LIMIT 3 +; + +//tag::docsAbsEmployees-result[] +first_name:keyword | last_name:keyword | height:double | abs_height:double +Alejandro |McAlpine |1.48 |1.48 +Amabile |Gomatam |2.09 |2.09 +Anneke |Preusig |1.56 |1.56 +//end::docsAbsEmployees-result[] +; \ No newline at end of file diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec index 083bd1eaf8417..07d64597da51f 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec @@ -34,9 +34,9 @@ e |? e() ends_with |? ends_with(arg1:?, arg2:?) |[arg1, arg2] |[?, ?] |["", ""] |? | "" | [false, false] | false floor |"? floor(n:integer|long|double|unsigned_long)" |n |"integer|long|double|unsigned_long" | "" |? | "" | false | false greatest |"? greatest(first:integer|long|double|boolean|keyword|text|ip|version, rest...:integer|long|double|boolean|keyword|text|ip|version)" |[first, rest] |["integer|long|double|boolean|keyword|text|ip|version", "integer|long|double|boolean|keyword|text|ip|version"] |["", ""] |? | "" | [false, false] | true -is_finite |? is_finite(arg1:?) |arg1 |? | "" |? | "" | false | false -is_infinite |? is_infinite(arg1:?) |arg1 |? | "" |? | "" | false | false -is_nan |? is_nan(arg1:?) |arg1 |? | "" |? | "" | false | false +is_finite |boolean is_finite(n:double) |n |double | "A floating-point value" |boolean | "Returns true if the argument is a finite floating-point value." | false | false +is_infinite |boolean is_infinite(n:double) |n |double | "A floating-point value" |boolean | "Returns true if the specified floating-point value is infinitely large in magnitude." | false | false +is_nan |boolean is_nan(n:double) |n |double | "A floating-point value" |boolean | "Returns true if the argument is a Not-a-Number (NaN) value." | false | false least |"? least(first:integer|long|double|boolean|keyword|text|ip|version, rest...:integer|long|double|boolean|keyword|text|ip|version)" |[first, rest] |["integer|long|double|boolean|keyword|text|ip|version", "integer|long|double|boolean|keyword|text|ip|version"] |["", ""] |? | "" | [false, false] | true left |"? left(string:keyword, length:integer)" |[string, length] |["keyword", "integer"] |["", ""] |? | "" | [false, false] | false length |? length(arg1:?) |arg1 |? | "" |? | "" | false | false @@ -74,13 +74,13 @@ tanh |"double tanh(n:integer|long|double|unsigned_long)" tau |? tau() | null | null | null |? | "" | null | false to_bool |"boolean to_bool(v:boolean|keyword|text|double|long|unsigned_long|integer)" |v |"boolean|keyword|text|double|long|unsigned_long|integer" | |boolean | |false |false to_boolean |"boolean to_boolean(v:boolean|keyword|text|double|long|unsigned_long|integer)" |v |"boolean|keyword|text|double|long|unsigned_long|integer" | |boolean | |false |false -to_cartesianpoint |? to_cartesianpoint(arg1:?) |arg1 |? | "" |? | "" | false | false +to_cartesianpoint |"cartesian_point to_cartesianpoint(v:cartesian_point|long|unsigned_long|keyword|text)" |v |"cartesian_point|long|unsigned_long|keyword|text" | |cartesian_point | |false |false to_datetime |"date to_datetime(v:date|keyword|text|double|long|unsigned_long|integer)" |v |"date|keyword|text|double|long|unsigned_long|integer" | |date | |false |false to_dbl |"double to_dbl(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" |v |"boolean|date|keyword|text|double|long|unsigned_long|integer" | |double | |false |false to_degrees |"double to_degrees(v:double|long|unsigned_long|integer)" |v |"double|long|unsigned_long|integer" | |double | |false |false to_double |"double to_double(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" |v |"boolean|date|keyword|text|double|long|unsigned_long|integer" | |double | |false |false to_dt |"date to_dt(v:date|keyword|text|double|long|unsigned_long|integer)" |v |"date|keyword|text|double|long|unsigned_long|integer" | |date | |false |false -to_geopoint |? to_geopoint(arg1:?) |arg1 |? | "" |? | "" | false | false +to_geopoint |"geo_point to_geopoint(v:geo_point|long|unsigned_long|keyword|text)" |v |"geo_point|long|unsigned_long|keyword|text" | |geo_point | |false |false to_int |"integer to_int(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" |v |"boolean|date|keyword|text|double|long|unsigned_long|integer" | |integer | |false |false to_integer |"integer to_integer(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" |v |"boolean|date|keyword|text|double|long|unsigned_long|integer" | |integer | |false |false to_ip |"ip to_ip(v:ip|keyword|text)" |v |"ip|keyword|text" | |ip | |false |false @@ -97,7 +97,7 @@ trim |"keyword|text trim(str:keyword|text)" ; -showFunctionsSynopsis#[skip:-8.11.99] +showFunctionsSynopsis#[skip:-8.12.99] show functions | keep synopsis; synopsis:keyword @@ -125,9 +125,9 @@ synopsis:keyword ? ends_with(arg1:?, arg2:?) "? floor(n:integer|long|double|unsigned_long)" "? greatest(first:integer|long|double|boolean|keyword|text|ip|version, rest...:integer|long|double|boolean|keyword|text|ip|version)" -? is_finite(arg1:?) -? is_infinite(arg1:?) -? is_nan(arg1:?) +boolean is_finite(n:double) +boolean is_infinite(n:double) +boolean is_nan(n:double) "? least(first:integer|long|double|boolean|keyword|text|ip|version, rest...:integer|long|double|boolean|keyword|text|ip|version)" ? left(string:keyword, length:integer) ? length(arg1:?) @@ -165,13 +165,13 @@ synopsis:keyword ? tau() "boolean to_bool(v:boolean|keyword|text|double|long|unsigned_long|integer)" "boolean to_boolean(v:boolean|keyword|text|double|long|unsigned_long|integer)" -? to_cartesianpoint(arg1:?) +"cartesian_point to_cartesianpoint(v:cartesian_point|long|unsigned_long|keyword|text)" "date to_datetime(v:date|keyword|text|double|long|unsigned_long|integer)" "double to_dbl(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" "double to_degrees(v:double|long|unsigned_long|integer)" "double to_double(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" "date to_dt(v:date|keyword|text|double|long|unsigned_long|integer)" -? to_geopoint(arg1:?) +"geo_point to_geopoint(v:geo_point|long|unsigned_long|keyword|text)" "integer to_int(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" "integer to_integer(v:boolean|date|keyword|text|double|long|unsigned_long|integer)" "ip to_ip(v:ip|keyword|text)" @@ -188,7 +188,7 @@ synopsis:keyword ; -showFunctionsFiltered +showFunctionsFiltered#[skip:-8.12.99] // tag::showFunctionsFiltered[] SHOW functions | WHERE STARTS_WITH(name, "is_") @@ -197,9 +197,9 @@ SHOW functions // tag::showFunctionsFiltered-result[] name:keyword | synopsis:keyword | argNames:keyword | argTypes:keyword | argDescriptions:keyword | returnType:keyword | description:keyword | optionalArgs:boolean | variadic:boolean -is_finite |? is_finite(arg1:?) |arg1 |? | "" |? | "" | false | false -is_infinite |? is_infinite(arg1:?) |arg1 |? | "" |? | "" | false | false -is_nan |? is_nan(arg1:?) |arg1 |? | "" |? | "" | false | false +is_finite |boolean is_finite(n:double) |n |double | "A floating-point value" |boolean | "Returns true if the argument is a finite floating-point value." | false | false +is_infinite |boolean is_infinite(n:double) |n |double | "A floating-point value" |boolean | "Returns true if the specified floating-point value is infinitely large in magnitude." | false | false +is_nan |boolean is_nan(n:double) |n |double | "A floating-point value" |boolean | "Returns true if the argument is a Not-a-Number (NaN) value." | false | false // end::showFunctionsFiltered-result[] ; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec index 0ad759feeeea0..e216d004ba646 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec @@ -435,7 +435,7 @@ g:keyword | l:integer null | 5 ; -repetitiveAggregation +repetitiveAggregation#[skip:-8.11.99,reason:ReplaceDuplicateAggWithEval breaks bwc] from employees | stats m1 = max(salary), m2 = min(salary), m3 = min(salary), m4 = max(salary); m1:i | m2:i | m3:i | m4:i diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_percentile.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_percentile.csv-spec index 6ab061b33dfb0..f1849107d606d 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_percentile.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats_percentile.csv-spec @@ -70,14 +70,14 @@ NULL ; -medianOfLong +medianOfLong#[skip:-8.11.99,reason:ReplaceDuplicateAggWithEval breaks bwc gh-103765] from employees | stats m = median(salary_change.long), p50 = percentile(salary_change.long, 50); m:double | p50:double 0 | 0 ; -medianOfInteger +medianOfInteger#[skip:-8.11.99,reason:ReplaceDuplicateAggWithEval breaks bwc gh-103765] // tag::median[] FROM employees | STATS MEDIAN(salary), PERCENTILE(salary, 50) @@ -90,7 +90,7 @@ MEDIAN(salary):double | PERCENTILE(salary,50):double // end::median-result[] ; -medianOfDouble +medianOfDouble#[skip:-8.11.99,reason:ReplaceDuplicateAggWithEval breaks bwc gh-103765] from employees | stats m = median(salary_change), p50 = percentile(salary_change, 50); m:double | p50:double @@ -98,7 +98,7 @@ m:double | p50:double ; -medianOfLongByKeyword +medianOfLongByKeyword#[skip:-8.11.99,reason:ReplaceDuplicateAggWithEval breaks bwc gh-103765] from employees | stats m = median(salary_change.long), p50 = percentile(salary_change.long, 50) by job_positions | sort m desc | limit 4; m:double | p50:double | job_positions:keyword @@ -109,7 +109,7 @@ m:double | p50:double | job_positions:keyword ; -medianOfIntegerByKeyword +medianOfIntegerByKeyword#[skip:-8.11.99,reason:ReplaceDuplicateAggWithEval breaks bwc gh-103765] from employees | stats m = median(salary), p50 = percentile(salary, 50) by job_positions | sort m | limit 4; m:double | p50:double | job_positions:keyword @@ -120,7 +120,7 @@ m:double | p50:double | job_positions:keyword ; -medianOfDoubleByKeyword +medianOfDoubleByKeyword#[skip:-8.11.99,reason:ReplaceDuplicateAggWithEval breaks bwc gh-103765] from employees | stats m = median(salary_change), p50 = percentile(salary_change, 50)by job_positions | sort m desc | limit 4; m:double | p50:double | job_positions:keyword diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/where-like.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/where-like.csv-spec index 37a1978524e7f..160fc46dafcf2 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/where-like.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/where-like.csv-spec @@ -298,3 +298,24 @@ FROM sample_data @timestamp:date | client_ip:ip | event_duration:long | message:keyword ; + +multiValueLike#[skip:-8.12.99] +from employees | where job_positions like "Account*" | keep emp_no, job_positions; + +warning:Line 1:24: evaluation of [job_positions like \"Account*\"] failed, treating result as null. Only first 20 failures recorded. +warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value + +emp_no:integer | job_positions:keyword +10025 | Accountant +; + + +multiValueRLike#[skip:-8.12.99] +from employees | where job_positions rlike "Account.*" | keep emp_no, job_positions; + +warning:Line 1:24: evaluation of [job_positions rlike \"Account.*\"] failed, treating result as null. Only first 20 failures recorded. +warning:Line 1:24: java.lang.IllegalArgumentException: single-value function encountered multi-value + +emp_no:integer | job_positions:keyword +10025 | Accountant +; diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractEsqlIntegTestCase.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractEsqlIntegTestCase.java index 9b5012e56a3ff..0590caf2019b4 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractEsqlIntegTestCase.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractEsqlIntegTestCase.java @@ -9,6 +9,7 @@ import org.elasticsearch.Build; import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.action.admin.cluster.node.tasks.list.TransportListTasksAction; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -17,6 +18,7 @@ import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.operator.exchange.ExchangeService; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.health.node.selection.HealthNode; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; @@ -53,11 +55,25 @@ public void ensureBlocksReleased() { CircuitBreaker reqBreaker = breakerService.getBreaker(CircuitBreaker.REQUEST); try { assertBusy(() -> { - logger.info("running tasks: {}", client().admin().cluster().prepareListTasks().get()); + logger.info( + "running tasks: {}", + client().admin() + .cluster() + .prepareListTasks() + .get() + .getTasks() + .stream() + .filter( + // Skip the tasks we that'd get in the way while debugging + t -> false == t.action().contains(TransportListTasksAction.TYPE.name()) + && false == t.action().contains(HealthNode.TASK_NAME) + ) + .toList() + ); assertThat("Request breaker not reset to 0 on node: " + node, reqBreaker.getUsed(), equalTo(0L)); }); } catch (Exception e) { - assertThat("Request breaker not reset to 0 on node: " + node, reqBreaker.getUsed(), equalTo(0L)); + throw new RuntimeException("failed waiting for breakers to clear", e); } } } @@ -80,6 +96,11 @@ public List> getSettings() { BlockFactory.LOCAL_BREAKER_OVER_RESERVED_MAX_SIZE_SETTING, ByteSizeValue.ofBytes(randomIntBetween(0, 16 * 1024)), Setting.Property.NodeScope + ), + Setting.byteSizeSetting( + BlockFactory.MAX_BLOCK_PRIMITIVE_ARRAY_SIZE_SETTING, + ByteSizeValue.ofBytes(randomLongBetween(1, BlockFactory.DEFAULT_MAX_BLOCK_PRIMITIVE_ARRAY_SIZE.getBytes())), + Setting.Property.NodeScope ) ); } diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractPausableIntegTestCase.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractPausableIntegTestCase.java new file mode 100644 index 0000000000000..7a5072120e5af --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AbstractPausableIntegTestCase.java @@ -0,0 +1,165 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.index.engine.SegmentsStats; +import org.elasticsearch.index.mapper.OnScriptError; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.ScriptPlugin; +import org.elasticsearch.script.LongFieldScript; +import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.script.ScriptEngine; +import org.elasticsearch.search.lookup.SearchLookup; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.json.JsonXContent; +import org.junit.Before; + +import java.io.IOException; +import java.util.Collection; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; + +/** A pausable testcase. Subclasses extend this testcase to simulate slow running queries. + * + * Uses the evaluation of a runtime field in the mappings "pause_me" of type long, along + * with a custom script language "pause", and semaphore "scriptPermits", to block execution. + */ +public abstract class AbstractPausableIntegTestCase extends AbstractEsqlIntegTestCase { + + private static final Logger LOGGER = LogManager.getLogger(AbstractPausableIntegTestCase.class); + + protected static final Semaphore scriptPermits = new Semaphore(0); + + protected int pageSize = -1; + + protected int numberOfDocs = -1; + + @Override + protected Collection> nodePlugins() { + return CollectionUtils.appendToCopy(super.nodePlugins(), PausableFieldPlugin.class); + } + + protected int pageSize() { + if (pageSize == -1) { + pageSize = between(10, 100); + } + return pageSize; + } + + protected int numberOfDocs() { + if (numberOfDocs == -1) { + numberOfDocs = between(4 * pageSize(), 5 * pageSize()); + } + return numberOfDocs; + } + + @Before + public void setupIndex() throws IOException { + assumeTrue("requires query pragmas", canUseQueryPragmas()); + + XContentBuilder mapping = JsonXContent.contentBuilder().startObject(); + mapping.startObject("runtime"); + { + mapping.startObject("pause_me"); + { + mapping.field("type", "long"); + mapping.startObject("script").field("source", "").field("lang", "pause").endObject(); + } + mapping.endObject(); + } + mapping.endObject(); + client().admin() + .indices() + .prepareCreate("test") + .setSettings(Map.of("number_of_shards", 1, "number_of_replicas", 0)) + .setMapping(mapping.endObject()) + .get(); + + BulkRequestBuilder bulk = client().prepareBulk().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + for (int i = 0; i < numberOfDocs(); i++) { + bulk.add(prepareIndex("test").setId(Integer.toString(i)).setSource("foo", i)); + } + bulk.get(); + /* + * forceMerge so we can be sure that we don't bump into tiny + * segments that finish super quickly and cause us to report strange + * statuses when we expect "starting". + */ + client().admin().indices().prepareForceMerge("test").setMaxNumSegments(1).get(); + /* + * Double super extra paranoid check that force merge worked. It's + * failed to reduce the index to a single segment and caused this test + * to fail in very difficult to debug ways. If it fails again, it'll + * trip here. Or maybe it won't! And we'll learn something. Maybe + * it's ghosts. + */ + SegmentsStats stats = client().admin().indices().prepareStats("test").get().getPrimaries().getSegments(); + if (stats.getCount() != 1L) { + fail(Strings.toString(stats)); + } + } + + public static class PausableFieldPlugin extends Plugin implements ScriptPlugin { + + @Override + public ScriptEngine getScriptEngine(Settings settings, Collection> contexts) { + return new ScriptEngine() { + @Override + public String getType() { + return "pause"; + } + + @Override + @SuppressWarnings("unchecked") + public FactoryType compile( + String name, + String code, + ScriptContext context, + Map params + ) { + return (FactoryType) new LongFieldScript.Factory() { + @Override + public LongFieldScript.LeafFactory newFactory( + String fieldName, + Map params, + SearchLookup searchLookup, + OnScriptError onScriptError + ) { + return ctx -> new LongFieldScript(fieldName, params, searchLookup, onScriptError, ctx) { + @Override + public void execute() { + try { + assertTrue(scriptPermits.tryAcquire(1, TimeUnit.MINUTES)); + } catch (Exception e) { + throw new AssertionError(e); + } + LOGGER.debug("--> emitting value"); + emit(1); + } + }; + } + }; + } + + @Override + public Set> getSupportedContexts() { + return Set.of(LongFieldScript.CONTEXT); + } + }; + } + } +} diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AsyncEsqlQueryActionIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AsyncEsqlQueryActionIT.java new file mode 100644 index 0000000000000..b58a0cd66b904 --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/AsyncEsqlQueryActionIT.java @@ -0,0 +1,257 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.compute.operator.exchange.ExchangeService; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.tasks.TaskInfo; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; +import org.elasticsearch.xpack.core.async.DeleteAsyncResultAction; +import org.elasticsearch.xpack.core.async.DeleteAsyncResultRequest; +import org.elasticsearch.xpack.core.async.GetAsyncResultRequest; +import org.elasticsearch.xpack.esql.plugin.QueryPragmas; +import org.hamcrest.core.IsEqual; + +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.core.TimeValue.timeValueMillis; +import static org.elasticsearch.core.TimeValue.timeValueMinutes; +import static org.elasticsearch.core.TimeValue.timeValueSeconds; +import static org.elasticsearch.test.hamcrest.OptionalMatchers.isEmpty; +import static org.elasticsearch.test.hamcrest.OptionalMatchers.isPresent; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.getValuesList; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +/** + * Individual tests for specific aspects of the async query API. + */ +public class AsyncEsqlQueryActionIT extends AbstractPausableIntegTestCase { + + @Override + protected Collection> nodePlugins() { + ArrayList> actions = new ArrayList<>(super.nodePlugins()); + actions.add(EsqlAsyncActionIT.LocalStateEsqlAsync.class); + actions.add(InternalExchangePlugin.class); + return Collections.unmodifiableList(actions); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + return Settings.builder() + .put(ExchangeService.INACTIVE_SINKS_INTERVAL_SETTING, TimeValue.timeValueMillis(between(500, 2000))) + .build(); + } + + public void testBasicAsyncExecution() throws Exception { + try (var initialResponse = sendAsyncQuery()) { + assertThat(initialResponse.asyncExecutionId(), isPresent()); + assertThat(initialResponse.isRunning(), is(true)); + String id = initialResponse.asyncExecutionId().get(); + + if (randomBoolean()) { + // let's timeout first + var getResultsRequest = new GetAsyncResultRequest(id); + getResultsRequest.setWaitForCompletionTimeout(timeValueMillis(10)); + getResultsRequest.setKeepAlive(randomKeepAlive()); + var future = client().execute(EsqlAsyncGetResultAction.INSTANCE, getResultsRequest); + try (var responseWithTimeout = future.get()) { + assertThat(initialResponse.asyncExecutionId(), isPresent()); + assertThat(responseWithTimeout.asyncExecutionId().get(), equalTo(id)); + assertThat(responseWithTimeout.isRunning(), is(true)); + } + } + + // Now we wait + var getResultsRequest = new GetAsyncResultRequest(id); + getResultsRequest.setWaitForCompletionTimeout(timeValueSeconds(60)); + getResultsRequest.setKeepAlive(randomKeepAlive()); + var future = client().execute(EsqlAsyncGetResultAction.INSTANCE, getResultsRequest); + + // release the permits to allow the query to proceed + scriptPermits.release(numberOfDocs()); + + try (var finalResponse = future.get()) { + assertThat(finalResponse, notNullValue()); + assertThat(finalResponse.isRunning(), is(false)); + assertThat(finalResponse.columns(), equalTo(List.of(new ColumnInfo("sum(pause_me)", "long")))); + assertThat(getValuesList(finalResponse).size(), equalTo(1)); + } + + // Get the stored result (again) + var again = client().execute(EsqlAsyncGetResultAction.INSTANCE, getResultsRequest); + try (var finalResponse = again.get()) { + assertThat(finalResponse, notNullValue()); + assertThat(finalResponse.isRunning(), is(false)); + assertThat(finalResponse.columns(), equalTo(List.of(new ColumnInfo("sum(pause_me)", "long")))); + assertThat(getValuesList(finalResponse).size(), equalTo(1)); + } + + AcknowledgedResponse deleteResponse = deleteAsyncId(id); + assertThat(deleteResponse.isAcknowledged(), equalTo(true)); + // the stored response should no longer be retrievable + var e = expectThrows(ResourceNotFoundException.class, () -> deleteAsyncId(id)); + assertThat(e.getMessage(), IsEqual.equalTo(id)); + } finally { + scriptPermits.drainPermits(); + } + } + + public void testAsyncCancellation() throws Exception { + try (var initialResponse = sendAsyncQuery()) { + assertThat(initialResponse.asyncExecutionId(), isPresent()); + assertThat(initialResponse.isRunning(), is(true)); + String id = initialResponse.asyncExecutionId().get(); + + DeleteAsyncResultRequest request = new DeleteAsyncResultRequest(id); + var future = client().execute(DeleteAsyncResultAction.INSTANCE, request); + + // there should be just one task + List tasks = getEsqlQueryTasks(); + assertThat(tasks.size(), is(1)); + + // release the permits to allow the query to proceed + scriptPermits.release(numberOfDocs()); + + var deleteResponse = future.actionGet(timeValueSeconds(60)); + assertThat(deleteResponse.isAcknowledged(), equalTo(true)); + + // there should be no tasks after delete + tasks = getEsqlQueryTasks(); + assertThat(tasks.size(), is(0)); + + // the stored response should no longer be retrievable + var getResultsRequest = new GetAsyncResultRequest(id); + getResultsRequest.setKeepAlive(timeValueMinutes(10)); + getResultsRequest.setWaitForCompletionTimeout(timeValueSeconds(60)); + var e = expectThrows( + ResourceNotFoundException.class, + () -> client().execute(EsqlAsyncGetResultAction.INSTANCE, getResultsRequest).actionGet() + ); + assertThat(e.getMessage(), equalTo(id)); + } finally { + scriptPermits.drainPermits(); + } + } + + public void testFinishingBeforeTimeoutKeep() { + testFinishingBeforeTimeout(true); + } + + public void testFinishingBeforeTimeoutDoNotKeep() { + testFinishingBeforeTimeout(false); + } + + private void testFinishingBeforeTimeout(boolean keepOnCompletion) { + // don't block the query execution at all + scriptPermits.drainPermits(); + assert scriptPermits.availablePermits() == 0; + + scriptPermits.release(numberOfDocs()); + + var request = new EsqlQueryRequestBuilder(client()).query("from test | stats sum(pause_me)") + .pragmas(queryPragmas()) + .async(true) + .waitForCompletionTimeout(TimeValue.timeValueSeconds(60)) + .keepOnCompletion(keepOnCompletion) + .keepAlive(randomKeepAlive()); + + try (var response = request.execute().actionGet(60, TimeUnit.SECONDS)) { + assertThat(response.isRunning(), is(false)); + assertThat(response.columns(), equalTo(List.of(new ColumnInfo("sum(pause_me)", "long")))); + assertThat(getValuesList(response).size(), equalTo(1)); + + if (keepOnCompletion) { + assertThat(response.asyncExecutionId(), isPresent()); + // we should be able to retrieve the response by id, since it has been kept + String id = response.asyncExecutionId().get(); + var getResultsRequest = new GetAsyncResultRequest(id); + getResultsRequest.setWaitForCompletionTimeout(timeValueSeconds(60)); + var future = client().execute(EsqlAsyncGetResultAction.INSTANCE, getResultsRequest); + try (var resp = future.actionGet(60, TimeUnit.SECONDS)) { + assertThat(resp.asyncExecutionId().get(), equalTo(id)); + assertThat(resp.isRunning(), is(false)); + assertThat(resp.columns(), equalTo(List.of(new ColumnInfo("sum(pause_me)", "long")))); + assertThat(getValuesList(resp).size(), equalTo(1)); + } + } else { + assertThat(response.asyncExecutionId(), isEmpty()); + } + } finally { + scriptPermits.drainPermits(); + } + } + + private List getEsqlQueryTasks() throws Exception { + List foundTasks = new ArrayList<>(); + assertBusy(() -> { + List tasks = client().admin() + .cluster() + .prepareListTasks() + .setActions(EsqlQueryAction.NAME + "[a]") + .setDetailed(true) + .get() + .getTasks(); + foundTasks.addAll(tasks); + }); + return foundTasks; + } + + private EsqlQueryResponse sendAsyncQuery() { + scriptPermits.drainPermits(); + assert scriptPermits.availablePermits() == 0; + + scriptPermits.release(between(1, 5)); + var pragmas = queryPragmas(); + return new EsqlQueryRequestBuilder(client()).query("from test | stats sum(pause_me)") + .pragmas(pragmas) + .async(true) + // deliberately small timeout, to frequently trigger incomplete response + .waitForCompletionTimeout(TimeValue.timeValueNanos(1)) + .keepOnCompletion(randomBoolean()) + .keepAlive(randomKeepAlive()) + .execute() + .actionGet(60, TimeUnit.SECONDS); + } + + private QueryPragmas queryPragmas() { + return new QueryPragmas( + Settings.builder() + // Force shard partitioning because that's all the tests know how to match. It is easier to reason about too. + .put("data_partitioning", "shard") + // Limit the page size to something small so we do more than one page worth of work, so we get more status updates. + .put("page_size", pageSize()) + .build() + ); + } + + private AcknowledgedResponse deleteAsyncId(String id) { + DeleteAsyncResultRequest request = new DeleteAsyncResultRequest(id); + return client().execute(DeleteAsyncResultAction.INSTANCE, request).actionGet(timeValueSeconds(60)); + } + + TimeValue randomKeepAlive() { + return TimeValue.parseTimeValue(randomTimeValue(1, 5, "d"), "test"); + } + + public static class LocalStateEsqlAsync extends LocalStateCompositeXPackPlugin { + public LocalStateEsqlAsync(final Settings settings, final Path configPath) { + super(settings, configPath); + } + } +} diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersCancellationIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersCancellationIT.java new file mode 100644 index 0000000000000..8d7cbc5cd41be --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersCancellationIT.java @@ -0,0 +1,192 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksAction; +import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; +import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.compute.operator.DriverTaskRunner; +import org.elasticsearch.compute.operator.exchange.ExchangeService; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.mapper.OnScriptError; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.ScriptPlugin; +import org.elasticsearch.script.LongFieldScript; +import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.script.ScriptEngine; +import org.elasticsearch.search.lookup.SearchLookup; +import org.elasticsearch.tasks.TaskInfo; +import org.elasticsearch.test.AbstractMultiClustersTestCase; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.xpack.esql.action.AbstractEsqlIntegTestCase.randomPragmas; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasSize; + +public class CrossClustersCancellationIT extends AbstractMultiClustersTestCase { + private static final String REMOTE_CLUSTER = "cluster-a"; + + @Override + protected Collection remoteClusterAlias() { + return List.of(REMOTE_CLUSTER); + } + + @Override + protected Collection> nodePlugins(String clusterAlias) { + List> plugins = new ArrayList<>(super.nodePlugins(clusterAlias)); + plugins.add(EsqlPlugin.class); + plugins.add(InternalExchangePlugin.class); + plugins.add(PauseFieldPlugin.class); + return plugins; + } + + public static class InternalExchangePlugin extends Plugin { + @Override + public List> getSettings() { + return List.of( + Setting.timeSetting( + ExchangeService.INACTIVE_SINKS_INTERVAL_SETTING, + TimeValue.timeValueMillis(between(1000, 3000)), + Setting.Property.NodeScope + ) + ); + } + } + + @Before + public void resetPlugin() { + PauseFieldPlugin.allowEmitting = new CountDownLatch(1); + PauseFieldPlugin.startEmitting = new CountDownLatch(1); + } + + public static class PauseFieldPlugin extends Plugin implements ScriptPlugin { + public static CountDownLatch startEmitting = new CountDownLatch(1); + public static CountDownLatch allowEmitting = new CountDownLatch(1); + + @Override + public ScriptEngine getScriptEngine(Settings settings, Collection> contexts) { + return new ScriptEngine() { + @Override + public String getType() { + return "pause"; + } + + @Override + @SuppressWarnings("unchecked") + public FactoryType compile( + String name, + String code, + ScriptContext context, + Map params + ) { + if (context == LongFieldScript.CONTEXT) { + return (FactoryType) new LongFieldScript.Factory() { + @Override + public LongFieldScript.LeafFactory newFactory( + String fieldName, + Map params, + SearchLookup searchLookup, + OnScriptError onScriptError + ) { + return ctx -> new LongFieldScript(fieldName, params, searchLookup, onScriptError, ctx) { + @Override + public void execute() { + startEmitting.countDown(); + try { + assertTrue(allowEmitting.await(30, TimeUnit.SECONDS)); + } catch (InterruptedException e) { + throw new AssertionError(e); + } + emit(1); + } + }; + } + }; + } + throw new IllegalStateException("unsupported type " + context); + } + + @Override + public Set> getSupportedContexts() { + return Set.of(LongFieldScript.CONTEXT); + } + }; + } + } + + private void createRemoteIndex(int numDocs) throws Exception { + XContentBuilder mapping = JsonXContent.contentBuilder().startObject(); + mapping.startObject("runtime"); + { + mapping.startObject("const"); + { + mapping.field("type", "long"); + mapping.startObject("script").field("source", "").field("lang", "pause").endObject(); + } + mapping.endObject(); + } + mapping.endObject(); + mapping.endObject(); + client(REMOTE_CLUSTER).admin().indices().prepareCreate("test").setMapping(mapping).get(); + BulkRequestBuilder bulk = client(REMOTE_CLUSTER).prepareBulk("test").setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + for (int i = 0; i < numDocs; i++) { + bulk.add(new IndexRequest().source("foo", i)); + } + bulk.get(); + } + + public void testCancel() throws Exception { + createRemoteIndex(between(10, 100)); + EsqlQueryRequest request = new EsqlQueryRequest(); + request.query("FROM *:test | STATS total=sum(const) | LIMIT 1"); + request.pragmas(randomPragmas()); + PlainActionFuture requestFuture = new PlainActionFuture<>(); + client().execute(EsqlQueryAction.INSTANCE, request, requestFuture); + assertTrue(PauseFieldPlugin.startEmitting.await(30, TimeUnit.SECONDS)); + List rootTasks = new ArrayList<>(); + assertBusy(() -> { + List tasks = client().admin().cluster().prepareListTasks().setActions(EsqlQueryAction.NAME).get().getTasks(); + assertThat(tasks, hasSize(1)); + rootTasks.addAll(tasks); + }); + var cancelRequest = new CancelTasksRequest().setTargetTaskId(rootTasks.get(0).taskId()).setReason("proxy timeout"); + client().execute(CancelTasksAction.INSTANCE, cancelRequest); + assertBusy(() -> { + List drivers = client(REMOTE_CLUSTER).admin() + .cluster() + .prepareListTasks() + .setActions(DriverTaskRunner.ACTION_NAME) + .get() + .getTasks(); + assertThat(drivers.size(), greaterThanOrEqualTo(1)); + for (TaskInfo driver : drivers) { + assertTrue(driver.cancellable()); + } + }); + PauseFieldPlugin.allowEmitting.countDown(); + Exception error = expectThrows(Exception.class, requestFuture::actionGet); + assertThat(error.getMessage(), containsString("proxy timeout")); + } +} diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersEnrichIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersEnrichIT.java new file mode 100644 index 0000000000000..e3a01bd6f4dd9 --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersEnrichIT.java @@ -0,0 +1,176 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.compute.operator.exchange.ExchangeService; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.ingest.common.IngestCommonPlugin; +import org.elasticsearch.license.LicenseService; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.protocol.xpack.XPackInfoRequest; +import org.elasticsearch.protocol.xpack.XPackInfoResponse; +import org.elasticsearch.reindex.ReindexPlugin; +import org.elasticsearch.test.AbstractMultiClustersTestCase; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.action.TransportXPackInfoAction; +import org.elasticsearch.xpack.core.action.XPackInfoFeatureAction; +import org.elasticsearch.xpack.core.enrich.EnrichPolicy; +import org.elasticsearch.xpack.core.enrich.action.DeleteEnrichPolicyAction; +import org.elasticsearch.xpack.core.enrich.action.ExecuteEnrichPolicyAction; +import org.elasticsearch.xpack.core.enrich.action.PutEnrichPolicyAction; +import org.elasticsearch.xpack.enrich.EnrichPlugin; +import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; +import org.junit.After; + +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.containsString; + +public class CrossClustersEnrichIT extends AbstractMultiClustersTestCase { + private static final String REMOTE_CLUSTER = "cluster_a"; + + @Override + protected Collection remoteClusterAlias() { + return List.of(REMOTE_CLUSTER); + } + + @Override + protected Collection> nodePlugins(String clusterAlias) { + List> plugins = new ArrayList<>(super.nodePlugins(clusterAlias)); + plugins.add(EsqlPlugin.class); + plugins.add(InternalExchangePlugin.class); + plugins.add(LocalStateEnrich.class); + plugins.add(IngestCommonPlugin.class); + plugins.add(ReindexPlugin.class); + return plugins; + } + + @Override + protected Settings nodeSettings() { + return Settings.builder().put(super.nodeSettings()).put(XPackSettings.SECURITY_ENABLED.getKey(), false).build(); + } + + public static class InternalExchangePlugin extends Plugin { + @Override + public List> getSettings() { + return List.of( + Setting.timeSetting( + ExchangeService.INACTIVE_SINKS_INTERVAL_SETTING, + TimeValue.timeValueSeconds(30), + Setting.Property.NodeScope + ) + ); + } + } + + public void testUnsupportedEnrich() { + Client localClient = client(LOCAL_CLUSTER); + localClient.admin().indices().prepareCreate("hosts").setMapping("ip", "type=ip", "os", "type=keyword").get(); + record Host(String ip, String os) { + + } + var hosts = List.of(new Host("192.168.1.3", "Windows")); + for (var h : hosts) { + localClient.prepareIndex("hosts").setSource("ip", h.ip, "os", h.os).get(); + } + localClient.admin().indices().prepareRefresh("hosts").get(); + EnrichPolicy policy = new EnrichPolicy("match", null, List.of("hosts"), "ip", List.of("ip", "os")); + localClient.execute(PutEnrichPolicyAction.INSTANCE, new PutEnrichPolicyAction.Request("hosts", policy)).actionGet(); + localClient.execute(ExecuteEnrichPolicyAction.INSTANCE, new ExecuteEnrichPolicyAction.Request("hosts")).actionGet(); + assertAcked(client(LOCAL_CLUSTER).admin().indices().prepareDelete("hosts")); + + record Event(String ip, String message) { + + } + for (String cluster : List.of(LOCAL_CLUSTER, REMOTE_CLUSTER)) { + var events = List.of(new Event("192.168.1.4", "access denied"), new Event("192.168.1.3", "restart")); + assertAcked(client(cluster).admin().indices().prepareCreate("events").setMapping("ip", "type=ip", "message", "type=text")); + for (Event e : events) { + client(cluster).prepareIndex("events").setSource("ip", e.ip, "message", e.message).get(); + } + client(cluster).admin().indices().prepareRefresh("events").get(); + } + List queries = List.of( + "FROM *:events | EVAL ip_str = TO_STR(ip) | ENRICH hosts on ip_str | LIMIT 1", + "FROM events*,*:events | EVAL ip_str = TO_STR(ip) | ENRICH hosts on ip_str | LIMIT 1", + "FROM *:events | EVAL ip_str = TO_STR(ip) | ENRICH hosts on ip_str | STATS COUNT(*) BY ip | LIMIT 1", + "FROM events*,*:events | EVAL ip_str = TO_STR(ip) | ENRICH hosts on ip_str | STATS COUNT(*) BY ip | LIMIT 1" + ); + for (String q : queries) { + Exception error = expectThrows(IllegalArgumentException.class, () -> runQuery(q).close()); + assertThat(error.getMessage(), containsString("cross clusters query doesn't support enrich yet")); + } + } + + @After + public void cleanClusters() { + cluster(LOCAL_CLUSTER).wipe(Set.of()); + client(LOCAL_CLUSTER).execute(DeleteEnrichPolicyAction.INSTANCE, new DeleteEnrichPolicyAction.Request("hosts")); + cluster(REMOTE_CLUSTER).wipe(Set.of()); + } + + protected EsqlQueryResponse runQuery(String query) { + EsqlQueryRequest request = new EsqlQueryRequest(); + request.query(query); + request.pragmas(AbstractEsqlIntegTestCase.randomPragmas()); + return client(LOCAL_CLUSTER).execute(EsqlQueryAction.INSTANCE, request).actionGet(30, TimeUnit.SECONDS); + } + + public static class LocalStateEnrich extends LocalStateCompositeXPackPlugin { + + public LocalStateEnrich(final Settings settings, final Path configPath) throws Exception { + super(settings, configPath); + + plugins.add(new EnrichPlugin(settings) { + @Override + protected XPackLicenseState getLicenseState() { + return this.getLicenseState(); + } + }); + } + + public static class EnrichTransportXPackInfoAction extends TransportXPackInfoAction { + @Inject + public EnrichTransportXPackInfoAction( + TransportService transportService, + ActionFilters actionFilters, + LicenseService licenseService, + NodeClient client + ) { + super(transportService, actionFilters, licenseService, client); + } + + @Override + protected List infoActions() { + return Collections.singletonList(XPackInfoFeatureAction.ENRICH); + } + } + + @Override + protected Class> getInfoAction() { + return EnrichTransportXPackInfoAction.class; + } + } +} diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java index a24b643a299c2..02f85292f05b3 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/CrossClustersQueryIT.java @@ -7,24 +7,34 @@ package org.elasticsearch.xpack.esql.action; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.compute.lucene.DataPartitioning; +import org.elasticsearch.compute.operator.DriverProfile; import org.elasticsearch.compute.operator.exchange.ExchangeService; import org.elasticsearch.core.TimeValue; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.AbstractMultiClustersTestCase; +import org.elasticsearch.test.InternalTestCluster; +import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; +import org.elasticsearch.xpack.esql.plugin.QueryPragmas; +import org.junit.Before; import java.util.ArrayList; import java.util.Collection; -import java.util.Iterator; import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.getValuesList; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasSize; public class CrossClustersQueryIT extends AbstractMultiClustersTestCase { @@ -37,11 +47,10 @@ protected Collection remoteClusterAlias() { @Override protected Collection> nodePlugins(String clusterAlias) { - List> plugins = new ArrayList<>(); - plugins.addAll(super.nodePlugins(clusterAlias)); + List> plugins = new ArrayList<>(super.nodePlugins(clusterAlias)); plugins.add(EsqlPlugin.class); plugins.add(InternalExchangePlugin.class); - return CollectionUtils.appendToCopy(super.nodePlugins(clusterAlias), EsqlPlugin.class); + return plugins; } public static class InternalExchangePlugin extends Plugin { @@ -57,61 +66,145 @@ public List> getSettings() { } } - public void testUnsupported() { - int numDocs = between(1, 10); - for (String cluster : List.of(LOCAL_CLUSTER, REMOTE_CLUSTER)) { - Client client = client(cluster); - assertAcked( - client.admin() - .indices() - .prepareCreate("events") - .setSettings(Settings.builder().put("index.number_of_shards", randomIntBetween(1, 5))) - .setMapping("tag", "type=keyword", "v", "type=long") - ); - for (int i = 0; i < numDocs; i++) { - client.prepareIndex("events").setSource("tag", cluster, "v", i).get(); - } - client.admin().indices().prepareRefresh("events").get(); + @Before + public void populateLocalIndices() { + Client localClient = client(LOCAL_CLUSTER); + assertAcked( + localClient.admin() + .indices() + .prepareCreate("logs-1") + .setSettings(Settings.builder().put("index.number_of_shards", randomIntBetween(1, 5))) + .setMapping("id", "type=keyword", "tag", "type=keyword", "v", "type=long") + ); + for (int i = 0; i < 10; i++) { + localClient.prepareIndex("logs-1").setSource("id", "local-" + i, "tag", "local", "v", i).get(); } - var emptyQueries = List.of( - "from *:* | LIMIT 0", - "from *,*:* | LIMIT 0", - "from *:events* | LIMIT 0", - "from events,*:events* | LIMIT 0" + localClient.admin().indices().prepareRefresh("logs-1").get(); + } + + @Before + public void populateRemoteIndices() { + Client remoteClient = client(REMOTE_CLUSTER); + assertAcked( + remoteClient.admin() + .indices() + .prepareCreate("logs-2") + .setSettings(Settings.builder().put("index.number_of_shards", randomIntBetween(1, 5))) + .setMapping("id", "type=keyword", "tag", "type=keyword", "v", "type=long") ); - for (String q : emptyQueries) { - try (EsqlQueryResponse resp = runQuery(q)) { - assertThat(resp.columns(), hasSize(2)); - assertFalse(resp.values().hasNext()); + for (int i = 0; i < 10; i++) { + remoteClient.prepareIndex("logs-2").setSource("id", "remote-" + i, "tag", "remote", "v", i * i).get(); + } + remoteClient.admin().indices().prepareRefresh("logs-2").get(); + } + + public void testSimple() { + try (EsqlQueryResponse resp = runQuery("from logs-*,*:logs-* | stats sum (v)")) { + List> values = getValuesList(resp); + assertThat(values, hasSize(1)); + assertThat(values.get(0), equalTo(List.of(330L))); + } + try (EsqlQueryResponse resp = runQuery("from logs-*,*:logs-* | stats count(*) by tag | sort tag | keep tag")) { + List> values = getValuesList(resp); + assertThat(values, hasSize(2)); + assertThat(values.get(0), equalTo(List.of("local"))); + assertThat(values.get(1), equalTo(List.of("remote"))); + } + } + + public void testMetadataIndex() { + try (EsqlQueryResponse resp = runQuery("FROM logs*,*:logs* [METADATA _index] | stats sum(v) by _index | sort _index")) { + List> values = getValuesList(resp); + assertThat(values.get(0), equalTo(List.of(285L, "cluster-a:logs-2"))); + assertThat(values.get(1), equalTo(List.of(45L, "logs-1"))); + } + } + + public void testProfile() { + final int localOnlyProfiles; + // uses shard partitioning as segments can be merged during these queries + var pragmas = new QueryPragmas(Settings.builder().put(QueryPragmas.DATA_PARTITIONING.getKey(), DataPartitioning.SHARD).build()); + { + EsqlQueryRequest request = new EsqlQueryRequest(); + request.query("FROM logs* | stats sum(v)"); + request.pragmas(pragmas); + request.profile(true); + try (EsqlQueryResponse resp = runQuery(request)) { + List> values = getValuesList(resp); + assertThat(values.get(0), equalTo(List.of(45L))); + assertNotNull(resp.profile()); + List drivers = resp.profile().drivers(); + assertThat(drivers.size(), greaterThanOrEqualTo(2)); // one coordinator and at least one data + localOnlyProfiles = drivers.size(); } } - var remotePatterns = List.of("*:*", "*, *:*", "*:events*", "events, *:events*"); - for (String pattern : remotePatterns) { - var query = "FROM " + pattern + " | LIMIT " + between(1, 100); - IllegalArgumentException error = expectThrows(IllegalArgumentException.class, () -> runQuery(query).close()); - assertThat(error.getMessage(), equalTo("ES|QL does not yet support querying remote indices [" + pattern + "]")); + final int remoteOnlyProfiles; + { + EsqlQueryRequest request = new EsqlQueryRequest(); + request.query("FROM *:logs* | stats sum(v)"); + request.pragmas(pragmas); + request.profile(true); + try (EsqlQueryResponse resp = runQuery(request)) { + List> values = getValuesList(resp); + assertThat(values.get(0), equalTo(List.of(285L))); + assertNotNull(resp.profile()); + List drivers = resp.profile().drivers(); + assertThat(drivers.size(), greaterThanOrEqualTo(3)); // two coordinators and at least one data + remoteOnlyProfiles = drivers.size(); + } } - int limit = between(1, numDocs); - var localQueries = List.of("from events* | LIMIT " + limit, "from * | LIMIT " + limit); - for (String q : localQueries) { - try (EsqlQueryResponse resp = runQuery(q)) { - assertThat(resp.columns(), hasSize(2)); - int rows = 0; - Iterator> values = resp.values(); - while (values.hasNext()) { - values.next(); - ++rows; - } - assertThat(rows, equalTo(limit)); + final int allProfiles; + { + EsqlQueryRequest request = new EsqlQueryRequest(); + request.query("FROM logs*,*:logs* | stats total = sum(v)"); + request.pragmas(pragmas); + request.profile(true); + try (EsqlQueryResponse resp = runQuery(request)) { + List> values = getValuesList(resp); + assertThat(values.get(0), equalTo(List.of(330L))); + assertNotNull(resp.profile()); + List drivers = resp.profile().drivers(); + assertThat(drivers.size(), greaterThanOrEqualTo(4)); // two coordinators and at least two data + allProfiles = drivers.size(); } } + assertThat(allProfiles, equalTo(localOnlyProfiles + remoteOnlyProfiles - 1)); + } + + public void testWarnings() throws Exception { + EsqlQueryRequest request = new EsqlQueryRequest(); + request.query("FROM logs*,*:logs* | EVAL ip = to_ip(id) | STATS total = sum(v) by ip | LIMIT 10"); + PlainActionFuture future = new PlainActionFuture<>(); + InternalTestCluster cluster = cluster(LOCAL_CLUSTER); + String node = randomFrom(cluster.getNodeNames()); + CountDownLatch latch = new CountDownLatch(1); + cluster.client(node).execute(EsqlQueryAction.INSTANCE, request, ActionListener.wrap(resp -> { + TransportService ts = cluster.getInstance(TransportService.class, node); + Map> responseHeaders = ts.getThreadPool().getThreadContext().getResponseHeaders(); + List warnings = responseHeaders.getOrDefault("Warning", List.of()) + .stream() + .filter(w -> w.contains("is not an IP string literal")) + .toList(); + assertThat(warnings.size(), greaterThanOrEqualTo(20)); + List> values = getValuesList(resp); + assertThat(values.get(0).get(0), equalTo(330L)); + assertNull(values.get(0).get(1)); + latch.countDown(); + }, e -> { + latch.countDown(); + throw new AssertionError(e); + })); + assertTrue(latch.await(30, TimeUnit.SECONDS)); } protected EsqlQueryResponse runQuery(String query) { - logger.info("--> query [{}]", query); EsqlQueryRequest request = new EsqlQueryRequest(); request.query(query); request.pragmas(AbstractEsqlIntegTestCase.randomPragmas()); + return runQuery(request); + } + + protected EsqlQueryResponse runQuery(EsqlQueryRequest request) { return client(LOCAL_CLUSTER).execute(EsqlQueryAction.INSTANCE, request).actionGet(30, TimeUnit.SECONDS); } } diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java index 95a0469ba8972..0708bfce1e134 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java @@ -32,6 +32,7 @@ import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.esql.analysis.VerificationException; import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; +import org.elasticsearch.xpack.esql.parser.ParsingException; import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; import org.junit.Before; @@ -66,6 +67,7 @@ import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -986,7 +988,28 @@ public void testOverlappingIndexPatterns() throws Exception { .add(new IndexRequest("test_overlapping_index_patterns_2").id("1").source("field", "foo")) .get(); } - expectThrows(VerificationException.class, () -> run("from test_overlapping_index_patterns_* | sort field")); + + assertVerificationException("from test_overlapping_index_patterns_* | sort field"); + } + + public void testErrorMessageForUnknownColumn() { + var e = assertVerificationException("row a = 1 | eval x = b"); + assertThat(e.getMessage(), containsString("Unknown column [b]")); + } + + // Straightforward verification. Subclasses can override. + protected Exception assertVerificationException(String esqlCommand) { + return expectThrows(VerificationException.class, () -> run(esqlCommand)); + } + + public void testErrorMessageForEmptyParams() { + var e = assertParsingException("row a = 1 | eval x = ?"); + assertThat(e.getMessage(), containsString("Not enough actual parameters 0")); + } + + // Straightforward verification. Subclasses can override. + protected Exception assertParsingException(String esqlCommand) { + return expectThrows(ParsingException.class, () -> run(esqlCommand)); } public void testEmptyIndex() { diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java index 6c86a5664e17a..5a8e2fef724f4 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionTaskIT.java @@ -12,45 +12,26 @@ import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksAction; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; -import org.elasticsearch.action.bulk.BulkRequestBuilder; -import org.elasticsearch.action.support.WriteRequest; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.compute.lucene.LuceneSourceOperator; import org.elasticsearch.compute.lucene.ValuesSourceReaderOperator; import org.elasticsearch.compute.operator.DriverStatus; import org.elasticsearch.compute.operator.DriverTaskRunner; import org.elasticsearch.compute.operator.exchange.ExchangeSinkOperator; import org.elasticsearch.compute.operator.exchange.ExchangeSourceOperator; -import org.elasticsearch.index.engine.SegmentsStats; -import org.elasticsearch.index.mapper.OnScriptError; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.ScriptPlugin; -import org.elasticsearch.script.LongFieldScript; -import org.elasticsearch.script.ScriptContext; -import org.elasticsearch.script.ScriptEngine; -import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.test.junit.annotations.TestLogging; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.esql.plugin.QueryPragmas; import org.junit.Before; -import java.io.IOException; import java.util.ArrayList; -import java.util.Collection; import java.util.List; -import java.util.Map; import java.util.Set; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; import static org.elasticsearch.test.MapMatcher.assertMap; import static org.elasticsearch.test.MapMatcher.matchesMap; @@ -71,84 +52,34 @@ value = "org.elasticsearch.xpack.esql:TRACE,org.elasticsearch.compute:TRACE", reason = "These tests were failing frequently, let's learn as much as we can" ) -public class EsqlActionTaskIT extends AbstractEsqlIntegTestCase { - private static int PAGE_SIZE; - private static int NUM_DOCS; +public class EsqlActionTaskIT extends AbstractPausableIntegTestCase { - private static String READ_DESCRIPTION; - private static String MERGE_DESCRIPTION; private static final Logger LOGGER = LogManager.getLogger(EsqlActionTaskIT.class); - @Override - protected Collection> nodePlugins() { - return CollectionUtils.appendToCopy(super.nodePlugins(), PausableFieldPlugin.class); - } + private String READ_DESCRIPTION; + private String MERGE_DESCRIPTION; @Before - public void setupIndex() throws IOException { + public void setup() { assumeTrue("requires query pragmas", canUseQueryPragmas()); - PAGE_SIZE = between(10, 100); - NUM_DOCS = between(4 * PAGE_SIZE, 5 * PAGE_SIZE); READ_DESCRIPTION = """ - \\_LuceneSourceOperator[dataPartitioning = SHARD, maxPageSize = PAGE_SIZE, limit = 2147483647] + \\_LuceneSourceOperator[dataPartitioning = SHARD, maxPageSize = pageSize(), limit = 2147483647] \\_ValuesSourceReaderOperator[fields = [pause_me]] \\_AggregationOperator[mode = INITIAL, aggs = sum of longs] - \\_ExchangeSinkOperator""".replace("PAGE_SIZE", Integer.toString(PAGE_SIZE)); + \\_ExchangeSinkOperator""".replace("pageSize()", Integer.toString(pageSize())); MERGE_DESCRIPTION = """ \\_ExchangeSourceOperator[] \\_AggregationOperator[mode = FINAL, aggs = sum of longs] \\_ProjectOperator[projection = [0]] \\_LimitOperator[limit = 500] \\_OutputOperator[columns = [sum(pause_me)]]"""; - - XContentBuilder mapping = JsonXContent.contentBuilder().startObject(); - mapping.startObject("runtime"); - { - mapping.startObject("pause_me"); - { - mapping.field("type", "long"); - mapping.startObject("script").field("source", "").field("lang", "pause").endObject(); - } - mapping.endObject(); - } - mapping.endObject(); - client().admin() - .indices() - .prepareCreate("test") - .setSettings(Map.of("number_of_shards", 1, "number_of_replicas", 0)) - .setMapping(mapping.endObject()) - .get(); - - try (BulkRequestBuilder bulk = client().prepareBulk().setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)) { - for (int i = 0; i < NUM_DOCS; i++) { - bulk.add(prepareIndex("test").setId(Integer.toString(i)).setSource("foo", i)); - } - bulk.get(); - } - /* - * forceMerge so we can be sure that we don't bump into tiny - * segments that finish super quickly and cause us to report strange - * statuses when we expect "starting". - */ - client().admin().indices().prepareForceMerge("test").setMaxNumSegments(1).get(); - /* - * Double super extra paranoid check that force merge worked. It's - * failed to reduce the index to a single segment and caused this test - * to fail in very difficult to debug ways. If it fails again, it'll - * trip here. Or maybe it won't! And we'll learn something. Maybe - * it's ghosts. - */ - SegmentsStats stats = client().admin().indices().prepareStats("test").get().getPrimaries().getSegments(); - if (stats.getCount() != 1L) { - fail(Strings.toString(stats)); - } } public void testTaskContents() throws Exception { ActionFuture response = startEsql(); try { getTasksStarting(); - scriptPermits.release(PAGE_SIZE); + scriptPermits.release(pageSize()); List foundTasks = getTasksRunning(); int luceneSources = 0; int valuesSourceReaders = 0; @@ -159,9 +90,11 @@ public void testTaskContents() throws Exception { assertThat(status.sessionId(), not(emptyOrNullString())); for (DriverStatus.OperatorStatus o : status.activeOperators()) { logger.info("status {}", o); - if (o.operator().startsWith("LuceneSourceOperator[maxPageSize=" + PAGE_SIZE)) { + if (o.operator().startsWith("LuceneSourceOperator[maxPageSize=" + pageSize())) { LuceneSourceOperator.Status oStatus = (LuceneSourceOperator.Status) o.status(); assertThat(oStatus.processedSlices(), lessThanOrEqualTo(oStatus.totalSlices())); + assertThat(oStatus.processedQueries(), equalTo(Set.of("*:*"))); + assertThat(oStatus.processedShards(), equalTo(Set.of("test:0"))); assertThat(oStatus.sliceIndex(), lessThanOrEqualTo(oStatus.totalSlices())); assertThat(oStatus.sliceMin(), greaterThanOrEqualTo(0)); assertThat(oStatus.sliceMax(), greaterThanOrEqualTo(oStatus.sliceMin())); @@ -205,9 +138,9 @@ public void testTaskContents() throws Exception { assertThat(exchangeSinks, greaterThanOrEqualTo(1)); assertThat(exchangeSources, equalTo(1)); } finally { - scriptPermits.release(NUM_DOCS); + scriptPermits.release(numberOfDocs()); try (EsqlQueryResponse esqlResponse = response.get()) { - assertThat(Iterators.flatMap(esqlResponse.values(), i -> i).next(), equalTo((long) NUM_DOCS)); + assertThat(Iterators.flatMap(esqlResponse.values(), i -> i).next(), equalTo((long) numberOfDocs())); } } } @@ -220,7 +153,7 @@ public void testCancelRead() throws Exception { cancelTask(running.taskId()); assertCancelled(response); } finally { - scriptPermits.release(NUM_DOCS); + scriptPermits.release(numberOfDocs()); } } @@ -232,7 +165,7 @@ public void testCancelMerge() throws Exception { cancelTask(running.taskId()); assertCancelled(response); } finally { - scriptPermits.release(NUM_DOCS); + scriptPermits.release(numberOfDocs()); } } @@ -250,7 +183,7 @@ public void testCancelEsqlTask() throws Exception { cancelTask(tasks.get(0).taskId()); assertCancelled(response); } finally { - scriptPermits.release(NUM_DOCS); + scriptPermits.release(numberOfDocs()); } } @@ -262,7 +195,7 @@ private ActionFuture startEsql() { // Force shard partitioning because that's all the tests know how to match. It is easier to reason about too. .put("data_partitioning", "shard") // Limit the page size to something small so we do more than one page worth of work, so we get more status updates. - .put("page_size", PAGE_SIZE) + .put("page_size", pageSize()) // Report the status after every action .put("status_interval", "0ms") .build() @@ -275,7 +208,7 @@ private void cancelTask(TaskId taskId) { request.setWaitForCompletion(false); LOGGER.debug("--> cancelling task [{}] without waiting for completion", taskId); client().admin().cluster().execute(CancelTasksAction.INSTANCE, request).actionGet(); - scriptPermits.release(NUM_DOCS); + scriptPermits.release(numberOfDocs()); request = new CancelTasksRequest().setTargetTaskId(taskId).setReason("test cancel"); request.setWaitForCompletion(true); LOGGER.debug("--> cancelling task [{}] with waiting for completion", taskId); @@ -346,7 +279,7 @@ private List getTasksRunning() throws Exception { } private void assertCancelled(ActionFuture response) throws Exception { - Exception e = expectThrows(Exception.class, response::actionGet); + Exception e = expectThrows(Exception.class, response); Throwable cancelException = ExceptionsHelper.unwrap(e, TaskCancelledException.class); assertNotNull(cancelException); /* @@ -368,56 +301,4 @@ private void assertCancelled(ActionFuture response) throws Ex ) ); } - - private static final Semaphore scriptPermits = new Semaphore(0); - - public static class PausableFieldPlugin extends Plugin implements ScriptPlugin { - @Override - public ScriptEngine getScriptEngine(Settings settings, Collection> contexts) { - return new ScriptEngine() { - @Override - public String getType() { - return "pause"; - } - - @Override - @SuppressWarnings("unchecked") - public FactoryType compile( - String name, - String code, - ScriptContext context, - Map params - ) { - return (FactoryType) new LongFieldScript.Factory() { - @Override - public LongFieldScript.LeafFactory newFactory( - String fieldName, - Map params, - SearchLookup searchLookup, - OnScriptError onScriptError - ) { - return ctx -> new LongFieldScript(fieldName, params, searchLookup, onScriptError, ctx) { - @Override - public void execute() { - try { - assertTrue(scriptPermits.tryAcquire(1, TimeUnit.MINUTES)); - } catch (Exception e) { - throw new AssertionError(e); - } - LOGGER.debug("--> emitting value"); - emit(1); - } - }; - } - }; - } - - @Override - public Set> getSupportedContexts() { - return Set.of(LongFieldScript.CONTEXT); - } - }; - } - } - } diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlAsyncActionIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlAsyncActionIT.java new file mode 100644 index 0000000000000..063855f2903e3 --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlAsyncActionIT.java @@ -0,0 +1,167 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.io.stream.NotSerializableExceptionWrapper; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BlockUtils; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; +import org.elasticsearch.xpack.core.async.DeleteAsyncResultAction; +import org.elasticsearch.xpack.core.async.DeleteAsyncResultRequest; +import org.elasticsearch.xpack.core.async.GetAsyncResultRequest; +import org.elasticsearch.xpack.esql.TestBlockFactory; +import org.elasticsearch.xpack.esql.analysis.VerificationException; +import org.elasticsearch.xpack.esql.parser.ParsingException; +import org.elasticsearch.xpack.esql.plugin.QueryPragmas; + +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.core.TimeValue.timeValueSeconds; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.core.IsEqual.equalTo; +import static org.hamcrest.core.IsNot.not; + +/** + * Runs test scenarios from EsqlActionIT, with an extra level of indirection + * through the async query and async get APIs. + */ +public class EsqlAsyncActionIT extends EsqlActionIT { + + @Override + protected Collection> nodePlugins() { + ArrayList> actions = new ArrayList<>(super.nodePlugins()); + actions.add(LocalStateEsqlAsync.class); + return Collections.unmodifiableList(actions); + } + + @Override + protected EsqlQueryResponse run(String esqlCommands, QueryPragmas pragmas, QueryBuilder filter) { + EsqlQueryRequest request = new EsqlQueryRequest(); + request.query(esqlCommands); + request.pragmas(pragmas); + request.async(true); + // deliberately small timeout, to frequently trigger incomplete response + request.waitForCompletionTimeout(TimeValue.timeValueNanos(1)); + request.keepOnCompletion(randomBoolean()); + if (filter != null) { + request.filter(filter); + } + + var response = run(request); + if (response.asyncExecutionId().isPresent()) { + List initialColumns = null; + List initialPages = null; + String id = response.asyncExecutionId().get(); + if (response.isRunning() == false) { + assertThat(request.keepOnCompletion(), is(true)); + assertThat(response.columns(), is(not(empty()))); + assertThat(response.pages(), is(not(empty()))); + initialColumns = List.copyOf(response.columns()); + initialPages = deepCopyOf(response.pages(), TestBlockFactory.getNonBreakingInstance()); + } else { + assertThat(response.columns(), is(empty())); // no partial results + assertThat(response.pages(), is(empty())); + } + response.close(); + var getResponse = getAsyncResponse(id); + + // assert initial contents, if any, are the same as async get contents + if (initialColumns != null) { + assertEquals(initialColumns, getResponse.columns()); + assertEquals(initialPages, getResponse.pages()); + } + + assertDeletable(id); + return getResponse; + } else { + return response; + } + } + + void assertDeletable(String id) { + var resp = deleteAsyncId(id); + assertTrue(resp.isAcknowledged()); + // the stored response should no longer be retrievable + var e = expectThrows(ResourceNotFoundException.class, () -> getAsyncResponse(id)); + assertThat(e.getMessage(), equalTo(id)); + } + + EsqlQueryResponse getAsyncResponse(String id) { + try { + var getResultsRequest = new GetAsyncResultRequest(id).setWaitForCompletionTimeout(timeValueSeconds(60)); + return client().execute(EsqlAsyncGetResultAction.INSTANCE, getResultsRequest).actionGet(30, TimeUnit.SECONDS); + } catch (ElasticsearchTimeoutException e) { + throw new AssertionError("timeout", e); + } + } + + AcknowledgedResponse deleteAsyncId(String id) { + try { + DeleteAsyncResultRequest request = new DeleteAsyncResultRequest(id); + return client().execute(DeleteAsyncResultAction.INSTANCE, request).actionGet(30, TimeUnit.SECONDS); + } catch (ElasticsearchTimeoutException e) { + throw new AssertionError("timeout", e); + } + } + + // Overridden to allow for not-serializable wrapper. + @Override + protected Exception assertVerificationException(String esqlCommand) { + var e = expectThrowsAnyOf(List.of(NotSerializableExceptionWrapper.class, VerificationException.class), () -> run(esqlCommand)); + if (e instanceof NotSerializableExceptionWrapper wrapper) { + assertThat(wrapper.unwrapCause().getMessage(), containsString("verification_exception")); + } + return e; + } + + // Overridden to allow for not-serializable wrapper. + @Override + protected Exception assertParsingException(String esqlCommand) { + var e = expectThrowsAnyOf(List.of(NotSerializableExceptionWrapper.class, ParsingException.class), () -> run(esqlCommand)); + if (e instanceof NotSerializableExceptionWrapper wrapper) { + assertThat(wrapper.unwrapCause().getMessage(), containsString("parsing_exception")); + } + return e; + } + + public static class LocalStateEsqlAsync extends LocalStateCompositeXPackPlugin { + public LocalStateEsqlAsync(final Settings settings, final Path configPath) { + super(settings, configPath); + } + } + + // -- TODO: eventually remove and use common compute test infra + + public static List deepCopyOf(List pages, BlockFactory blockFactory) { + return pages.stream().map(page -> deepCopyOf(page, blockFactory)).toList(); + } + + public static Page deepCopyOf(Page page, BlockFactory blockFactory) { + Block[] blockCopies = new Block[page.getBlockCount()]; + for (int i = 0; i < blockCopies.length; i++) { + blockCopies[i] = BlockUtils.deepCopyOf(page.getBlock(i), blockFactory); + } + return new Page(blockCopies); + } +} diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/WarningsIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/WarningsIT.java index 12897979a47e0..fb6d23695f837 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/WarningsIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/WarningsIT.java @@ -8,23 +8,21 @@ package org.elasticsearch.xpack.esql.action; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.transport.TransportService; import java.util.List; import java.util.Map; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.greaterThanOrEqualTo; -@TestLogging(value = "org.elasticsearch.xpack.esql:TRACE", reason = "debug") public class WarningsIT extends AbstractEsqlIntegTestCase { - public void testCollectWarnings() { + public void testCollectWarnings() throws Exception { final String node1, node2; if (randomBoolean()) { internalCluster().ensureAtLeastNumDataNodes(2); @@ -64,19 +62,23 @@ public void testCollectWarnings() { EsqlQueryRequest request = new EsqlQueryRequest(); request.query("FROM index-* | EVAL ip = to_ip(host) | STATS s = COUNT(*) by ip | KEEP ip | LIMIT 100"); request.pragmas(randomPragmas()); - PlainActionFuture future = new PlainActionFuture<>(); - client(coordinator.getName()).execute(EsqlQueryAction.INSTANCE, request, ActionListener.runBefore(future, () -> { - var threadpool = internalCluster().getInstance(TransportService.class, coordinator.getName()).getThreadPool(); - Map> responseHeaders = threadpool.getThreadContext().getResponseHeaders(); - List warnings = responseHeaders.getOrDefault("Warning", List.of()) - .stream() - .filter(w -> w.contains("is not an IP string literal")) - .toList(); - int expectedWarnings = Math.min(20, numDocs1 + numDocs2); - // we cap the number of warnings per node - assertThat(warnings.size(), greaterThanOrEqualTo(expectedWarnings)); + CountDownLatch latch = new CountDownLatch(1); + client(coordinator.getName()).execute(EsqlQueryAction.INSTANCE, request, ActionListener.running(() -> { + try { + var threadpool = internalCluster().getInstance(TransportService.class, coordinator.getName()).getThreadPool(); + Map> responseHeaders = threadpool.getThreadContext().getResponseHeaders(); + List warnings = responseHeaders.getOrDefault("Warning", List.of()) + .stream() + .filter(w -> w.contains("is not an IP string literal")) + .toList(); + int expectedWarnings = Math.min(20, numDocs1 + numDocs2); + // we cap the number of warnings per node + assertThat(warnings.size(), greaterThanOrEqualTo(expectedWarnings)); + } finally { + latch.countDown(); + } })); - future.actionGet(30, TimeUnit.SECONDS).close(); + latch.await(30, TimeUnit.SECONDS); } private DiscoveryNode randomDataNode() { diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/CanMatchIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/CanMatchIT.java index fd0ffef97ae78..8fc61872c4acd 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/CanMatchIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/CanMatchIT.java @@ -209,6 +209,7 @@ public void testAliasFilters() { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103749") public void testFailOnUnavailableShards() throws Exception { internalCluster().ensureAtLeastNumDataNodes(2); String logsOnlyNode = internalCluster().startDataOnlyNode(); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlAsyncGetResultAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlAsyncGetResultAction.java new file mode 100644 index 0000000000000..1603dd8fd3746 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlAsyncGetResultAction.java @@ -0,0 +1,21 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.action.ActionType; + +public class EsqlAsyncGetResultAction extends ActionType { + + public static final EsqlAsyncGetResultAction INSTANCE = new EsqlAsyncGetResultAction(); + + public static final String NAME = "indices:data/read/esql/async/get"; + + private EsqlAsyncGetResultAction() { + super(NAME, in -> { throw new IllegalArgumentException("can't transport EsqlAsyncGetResultAction"); }); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequest.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequest.java index 0de89a4d8de2a..5e8c5c27edd23 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequest.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequest.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.tasks.CancellableTask; @@ -43,6 +44,9 @@ public class EsqlQueryRequest extends ActionRequest implements CompositeIndicesRequest { + public static TimeValue DEFAULT_KEEP_ALIVE = TimeValue.timeValueDays(5); + public static TimeValue DEFAULT_WAIT_FOR_COMPLETION = TimeValue.timeValueSeconds(1); + private static final ConstructingObjectParser PARAM_PARSER = new ConstructingObjectParser<>( "params", true, @@ -64,7 +68,14 @@ public class EsqlQueryRequest extends ActionRequest implements CompositeIndicesR private static final ParseField LOCALE_FIELD = new ParseField("locale"); private static final ParseField PROFILE_FIELD = new ParseField("profile"); - private static final ObjectParser PARSER = objectParser(EsqlQueryRequest::new); + static final ParseField WAIT_FOR_COMPLETION_TIMEOUT = new ParseField("wait_for_completion_timeout"); + static final ParseField KEEP_ALIVE = new ParseField("keep_alive"); + static final ParseField KEEP_ON_COMPLETION = new ParseField("keep_on_completion"); + + private static final ObjectParser SYNC_PARSER = objectParserSync(EsqlQueryRequest::syncEsqlQueryRequest); + private static final ObjectParser ASYNC_PARSER = objectParserAsync(EsqlQueryRequest::asyncEsqlQueryRequest); + + private boolean async; private String query; private boolean columnar; @@ -73,6 +84,21 @@ public class EsqlQueryRequest extends ActionRequest implements CompositeIndicesR private QueryBuilder filter; private QueryPragmas pragmas = new QueryPragmas(Settings.EMPTY); private List params = List.of(); + private TimeValue waitForCompletionTimeout = DEFAULT_WAIT_FOR_COMPLETION; + private TimeValue keepAlive = DEFAULT_KEEP_ALIVE; + private boolean keepOnCompletion; + + private static EsqlQueryRequest syncEsqlQueryRequest() { + return new EsqlQueryRequest(false); + } + + private static EsqlQueryRequest asyncEsqlQueryRequest() { + return new EsqlQueryRequest(true); + } + + private EsqlQueryRequest(boolean async) { + this.async = async; + } public EsqlQueryRequest(StreamInput in) throws IOException { super(in); @@ -100,6 +126,14 @@ public String query() { return query; } + public void async(boolean async) { + this.async = async; + } + + public boolean async() { + return async; + } + public void columnar(boolean columnar) { this.columnar = columnar; } @@ -155,12 +189,39 @@ public void params(List params) { this.params = params; } - public static EsqlQueryRequest fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); + public TimeValue waitForCompletionTimeout() { + return waitForCompletionTimeout; } - private static ObjectParser objectParser(Supplier supplier) { - ObjectParser parser = new ObjectParser<>("esql/query", false, supplier); + public void waitForCompletionTimeout(TimeValue waitForCompletionTimeout) { + this.waitForCompletionTimeout = waitForCompletionTimeout; + } + + public TimeValue keepAlive() { + return keepAlive; + } + + public void keepAlive(TimeValue keepAlive) { + this.keepAlive = keepAlive; + } + + public boolean keepOnCompletion() { + return keepOnCompletion; + } + + public void keepOnCompletion(boolean keepOnCompletion) { + this.keepOnCompletion = keepOnCompletion; + } + + public static EsqlQueryRequest fromXContentSync(XContentParser parser) { + return SYNC_PARSER.apply(parser, null); + } + + public static EsqlQueryRequest fromXContentAsync(XContentParser parser) { + return ASYNC_PARSER.apply(parser, null); + } + + private static void objectParserCommon(ObjectParser parser) { parser.declareString(EsqlQueryRequest::query, QUERY_FIELD); parser.declareBoolean(EsqlQueryRequest::columnar, COLUMNAR_FIELD); parser.declareObject(EsqlQueryRequest::filter, (p, c) -> AbstractQueryBuilder.parseTopLevelQuery(p), FILTER_FIELD); @@ -172,7 +233,30 @@ private static ObjectParser objectParser(Supplier request.locale(Locale.forLanguageTag(localeTag)), LOCALE_FIELD); parser.declareBoolean(EsqlQueryRequest::profile, PROFILE_FIELD); + } + + private static ObjectParser objectParserSync(Supplier supplier) { + ObjectParser parser = new ObjectParser<>("esql/query", false, supplier); + objectParserCommon(parser); + return parser; + } + private static ObjectParser objectParserAsync(Supplier supplier) { + ObjectParser parser = new ObjectParser<>("esql/async_query", false, supplier); + objectParserCommon(parser); + parser.declareBoolean(EsqlQueryRequest::keepOnCompletion, KEEP_ON_COMPLETION); + parser.declareField( + EsqlQueryRequest::waitForCompletionTimeout, + (p, c) -> TimeValue.parseTimeValue(p.text(), WAIT_FOR_COMPLETION_TIMEOUT.getPreferredName()), + WAIT_FOR_COMPLETION_TIMEOUT, + ObjectParser.ValueType.VALUE + ); + parser.declareField( + EsqlQueryRequest::keepAlive, + (p, c) -> TimeValue.parseTimeValue(p.text(), KEEP_ALIVE.getPreferredName()), + KEEP_ALIVE, + ObjectParser.ValueType.VALUE + ); return parser; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestBuilder.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestBuilder.java index be3aeec190ded..4746ea81aa0c1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestBuilder.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestBuilder.java @@ -9,6 +9,7 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.client.internal.ElasticsearchClient; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.xpack.esql.plugin.QueryPragmas; @@ -37,4 +38,24 @@ public EsqlQueryRequestBuilder pragmas(QueryPragmas pragmas) { request.pragmas(pragmas); return this; } + + public EsqlQueryRequestBuilder waitForCompletionTimeout(TimeValue waitForCompletionTimeout) { + request.waitForCompletionTimeout(waitForCompletionTimeout); + return this; + } + + public EsqlQueryRequestBuilder keepAlive(TimeValue keepAlive) { + request.keepAlive(keepAlive); + return this; + } + + public EsqlQueryRequestBuilder keepOnCompletion(boolean keepOnCompletion) { + request.keepOnCompletion(keepOnCompletion); + return this; + } + + public EsqlQueryRequestBuilder async(boolean async) { + request.async(async); + return this; + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java index e571713420950..63686820574b5 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java @@ -7,12 +7,9 @@ package org.elasticsearch.xpack.esql.action; -import org.apache.lucene.util.BytesRef; import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -20,106 +17,92 @@ import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.common.xcontent.ChunkedToXContentObject; -import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockStreamInput; -import org.elasticsearch.compute.data.BooleanBlock; -import org.elasticsearch.compute.data.BytesRefBlock; -import org.elasticsearch.compute.data.DoubleBlock; -import org.elasticsearch.compute.data.IntBlock; -import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.Page; -import org.elasticsearch.compute.lucene.UnsupportedValueSource; import org.elasticsearch.compute.operator.DriverProfile; +import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; -import org.elasticsearch.search.DocValueFormat; -import org.elasticsearch.xcontent.InstantiatingObjectParser; -import org.elasticsearch.xcontent.ObjectParser; -import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xcontent.XContentParserConfiguration; -import org.elasticsearch.xcontent.json.JsonXContent; -import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; -import org.elasticsearch.xpack.esql.planner.PlannerUtils; -import org.elasticsearch.xpack.esql.type.EsqlDataTypes; -import org.elasticsearch.xpack.versionfield.Version; import java.io.IOException; -import java.io.UncheckedIOException; -import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; import java.util.List; -import java.util.Map; import java.util.Objects; -import java.util.function.Function; - -import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; -import static org.elasticsearch.xpack.ql.util.DateUtils.UTC_DATE_TIME_FORMATTER; -import static org.elasticsearch.xpack.ql.util.NumericUtils.asLongUnsigned; -import static org.elasticsearch.xpack.ql.util.NumericUtils.unsignedLongAsNumber; -import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.CARTESIAN; -import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.GEO; -import static org.elasticsearch.xpack.ql.util.StringUtils.parseIP; +import java.util.Optional; public class EsqlQueryResponse extends ActionResponse implements ChunkedToXContentObject, Releasable { - private static final InstantiatingObjectParser PARSER; - static { - InstantiatingObjectParser.Builder parser = InstantiatingObjectParser.builder( - "esql/query_response", - true, - EsqlQueryResponse.class - ); - parser.declareObjectArray(constructorArg(), (p, c) -> ColumnInfo.fromXContent(p), new ParseField("columns")); - parser.declareField(constructorArg(), (p, c) -> p.list(), new ParseField("values"), ObjectParser.ValueType.OBJECT_ARRAY); - PARSER = parser.build(); - } + + @SuppressWarnings("this-escape") + private final AbstractRefCounted counted = AbstractRefCounted.of(this::closeInternal); private final List columns; private final List pages; private final Profile profile; private final boolean columnar; - - public EsqlQueryResponse(List columns, List pages, @Nullable Profile profile, boolean columnar) { + private final String asyncExecutionId; + private final boolean isRunning; + // True if this response is as a result of an async query request + private final boolean isAsync; + + public EsqlQueryResponse( + List columns, + List pages, + @Nullable Profile profile, + boolean columnar, + @Nullable String asyncExecutionId, + boolean isRunning, + boolean isAsync + ) { this.columns = columns; this.pages = pages; this.profile = profile; this.columnar = columnar; + this.asyncExecutionId = asyncExecutionId; + this.isRunning = isRunning; + this.isAsync = isAsync; } - public EsqlQueryResponse(List columns, List> values) { - this.columns = columns; - this.pages = List.of(valuesToPage(columns.stream().map(ColumnInfo::type).toList(), values)); - this.profile = null; - this.columnar = false; + public EsqlQueryResponse(List columns, List pages, @Nullable Profile profile, boolean columnar, boolean isAsync) { + this(columns, pages, profile, columnar, null, false, isAsync); } /** * Build a reader for the response. */ public static Writeable.Reader reader(BlockFactory blockFactory) { - return in -> new EsqlQueryResponse(new BlockStreamInput(in, blockFactory)); + return in -> deserialize(new BlockStreamInput(in, blockFactory)); } - private EsqlQueryResponse(BlockStreamInput in) throws IOException { - super(in); - this.columns = in.readCollectionAsList(ColumnInfo::new); - this.pages = in.readCollectionAsList(Page::new); + static EsqlQueryResponse deserialize(BlockStreamInput in) throws IOException { + String asyncExecutionId = null; + boolean isRunning = false; + boolean isAsync = false; + Profile profile = null; + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_ASYNC_QUERY)) { + asyncExecutionId = in.readOptionalString(); + isRunning = in.readBoolean(); + isAsync = in.readBoolean(); + } + List columns = in.readCollectionAsList(ColumnInfo::new); + List pages = in.readCollectionAsList(Page::new); if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE)) { - this.profile = in.readOptionalWriteable(Profile::new); - } else { - this.profile = null; + profile = in.readOptionalWriteable(Profile::new); } - this.columnar = in.readBoolean(); + boolean columnar = in.readBoolean(); + return new EsqlQueryResponse(columns, pages, profile, columnar, asyncExecutionId, isRunning, isAsync); } @Override public void writeTo(StreamOutput out) throws IOException { + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_ASYNC_QUERY)) { + out.writeOptionalString(asyncExecutionId); + out.writeBoolean(isRunning); + out.writeBoolean(isAsync); + } out.writeCollection(columns); out.writeCollection(pages); if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE)) { @@ -137,7 +120,8 @@ List pages() { } public Iterator> values() { - return pagesToValues(columns.stream().map(ColumnInfo::type).toList(), pages); + List dataTypes = columns.stream().map(ColumnInfo::type).toList(); + return ResponseValueUtils.pagesToValues(dataTypes, pages); } public Profile profile() { @@ -148,63 +132,42 @@ public boolean columnar() { return columnar; } - @Override - public Iterator toXContentChunked(ToXContent.Params params) { - final BytesRef scratch = new BytesRef(); - final Iterator valuesIt; - if (pages.isEmpty()) { - valuesIt = Collections.emptyIterator(); - } else if (columnar) { - valuesIt = Iterators.flatMap( - Iterators.forRange( - 0, - columns().size(), - column -> Iterators.concat( - Iterators.single(((builder, p) -> builder.startArray())), - Iterators.flatMap(pages.iterator(), page -> { - ColumnInfo.PositionToXContent toXContent = columns.get(column) - .positionToXContent(page.getBlock(column), scratch); - return Iterators.forRange( - 0, - page.getPositionCount(), - position -> (builder, p) -> toXContent.positionToXContent(builder, p, position) - ); - }), - ChunkedToXContentHelper.endArray() - ) - ), - Function.identity() - ); - } else { - valuesIt = Iterators.flatMap(pages.iterator(), page -> { - final int columnCount = columns.size(); - assert page.getBlockCount() == columnCount : page.getBlockCount() + " != " + columnCount; - final ColumnInfo.PositionToXContent[] toXContents = new ColumnInfo.PositionToXContent[columnCount]; - for (int column = 0; column < columnCount; column++) { - toXContents[column] = columns.get(column).positionToXContent(page.getBlock(column), scratch); + public Optional asyncExecutionId() { + return Optional.ofNullable(asyncExecutionId); + } + + public boolean isRunning() { + return isRunning; + } + + public boolean isAsync() { + return isRunning; + } + + private Iterator asyncPropertiesOrEmpty() { + if (isAsync) { + return ChunkedToXContentHelper.singleChunk((builder, params) -> { + if (asyncExecutionId != null) { + builder.field("id", asyncExecutionId); } - return Iterators.forRange(0, page.getPositionCount(), position -> (builder, p) -> { - builder.startArray(); - for (int c = 0; c < columnCount; c++) { - toXContents[c].positionToXContent(builder, p, position); - } - return builder.endArray(); - }); + builder.field("is_running", isRunning); + return builder; }); + } else { + return Collections.emptyIterator(); } - Iterator columnsRender = ChunkedToXContentHelper.singleChunk((builder, p) -> { - builder.startArray("columns"); - for (ColumnInfo col : columns) { - col.toXContent(builder, p); - } - return builder.endArray(); - }); + } + + @Override + public Iterator toXContentChunked(ToXContent.Params params) { + final Iterator valuesIt = ResponseXContentUtils.columnValues(this.columns, this.pages, columnar); Iterator profileRender = profile == null ? List.of().iterator() : ChunkedToXContentHelper.field("profile", profile, params); return Iterators.concat( ChunkedToXContentHelper.startObject(), - columnsRender, + asyncPropertiesOrEmpty(), + ResponseXContentUtils.columnHeadings(columns), ChunkedToXContentHelper.array("values", valuesIt), profileRender, ChunkedToXContentHelper.endObject() @@ -216,16 +179,14 @@ public boolean isFragment() { return false; } - public static EsqlQueryResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } - @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; EsqlQueryResponse that = (EsqlQueryResponse) o; return Objects.equals(columns, that.columns) + && Objects.equals(asyncExecutionId, that.asyncExecutionId) + && Objects.equals(isRunning, that.isRunning) && columnar == that.columnar && Iterators.equals(values(), that.values(), (row1, row2) -> Iterators.equals(row1, row2, Objects::equals)) && Objects.equals(profile, that.profile); @@ -233,7 +194,13 @@ public boolean equals(Object o) { @Override public int hashCode() { - return Objects.hash(columns, Iterators.hashCode(values(), row -> Iterators.hashCode(row, Objects::hashCode)), columnar); + return Objects.hash( + asyncExecutionId, + isRunning, + columns, + Iterators.hashCode(values(), row -> Iterators.hashCode(row, Objects::hashCode)), + columnar + ); } @Override @@ -242,129 +209,32 @@ public String toString() { } @Override - public void close() { - Releasables.close(() -> Iterators.map(pages.iterator(), p -> p::releaseBlocks)); + public void incRef() { + tryIncRef(); } - public static Iterator> pagesToValues(List dataTypes, List pages) { - BytesRef scratch = new BytesRef(); - return Iterators.flatMap( - pages.iterator(), - page -> Iterators.forRange(0, page.getPositionCount(), p -> Iterators.forRange(0, page.getBlockCount(), b -> { - Block block = page.getBlock(b); - if (block.isNull(p)) { - return null; - } - /* - * Use the ESQL data type to map to the output to make sure compute engine - * respects its types. See the INTEGER clause where is doesn't always - * respect it. - */ - int count = block.getValueCount(p); - int start = block.getFirstValueIndex(p); - String dataType = dataTypes.get(b); - if (count == 1) { - return valueAt(dataType, block, start, scratch); - } - List thisResult = new ArrayList<>(count); - int end = count + start; - for (int i = start; i < end; i++) { - thisResult.add(valueAt(dataType, block, i, scratch)); - } - return thisResult; - })) - ); + @Override + public boolean tryIncRef() { + return counted.tryIncRef(); } - private static Object valueAt(String dataType, Block block, int offset, BytesRef scratch) { - return switch (dataType) { - case "unsigned_long" -> unsignedLongAsNumber(((LongBlock) block).getLong(offset)); - case "long" -> ((LongBlock) block).getLong(offset); - case "integer" -> ((IntBlock) block).getInt(offset); - case "double" -> ((DoubleBlock) block).getDouble(offset); - case "keyword", "text" -> ((BytesRefBlock) block).getBytesRef(offset, scratch).utf8ToString(); - case "ip" -> { - BytesRef val = ((BytesRefBlock) block).getBytesRef(offset, scratch); - yield DocValueFormat.IP.format(val); - } - case "date" -> { - long longVal = ((LongBlock) block).getLong(offset); - yield UTC_DATE_TIME_FORMATTER.formatMillis(longVal); - } - case "boolean" -> ((BooleanBlock) block).getBoolean(offset); - case "version" -> new Version(((BytesRefBlock) block).getBytesRef(offset, scratch)).toString(); - case "geo_point" -> GEO.longAsPoint(((LongBlock) block).getLong(offset)); - case "cartesian_point" -> CARTESIAN.longAsPoint(((LongBlock) block).getLong(offset)); - case "unsupported" -> UnsupportedValueSource.UNSUPPORTED_OUTPUT; - case "_source" -> { - BytesRef val = ((BytesRefBlock) block).getBytesRef(offset, scratch); - try { - try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, new BytesArray(val))) { - parser.nextToken(); - yield parser.mapOrdered(); - } - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } - default -> throw EsqlIllegalArgumentException.illegalDataType(dataType); - }; + @Override + public boolean decRef() { + return counted.decRef(); } - /** - * Convert a list of values to Pages so we can parse from xcontent. It's not - * super efficient but it doesn't really have to be. - */ - private static Page valuesToPage(List dataTypes, List> values) { - List results = dataTypes.stream() - .map(c -> PlannerUtils.toElementType(EsqlDataTypes.fromName(c)).newBlockBuilder(values.size())) - .toList(); - - for (List row : values) { - for (int c = 0; c < row.size(); c++) { - var builder = results.get(c); - var value = row.get(c); - switch (dataTypes.get(c)) { - case "unsigned_long" -> ((LongBlock.Builder) builder).appendLong(asLongUnsigned(((Number) value).longValue())); - case "long" -> ((LongBlock.Builder) builder).appendLong(((Number) value).longValue()); - case "integer" -> ((IntBlock.Builder) builder).appendInt(((Number) value).intValue()); - case "double" -> ((DoubleBlock.Builder) builder).appendDouble(((Number) value).doubleValue()); - case "keyword", "text", "unsupported" -> ((BytesRefBlock.Builder) builder).appendBytesRef( - new BytesRef(value.toString()) - ); - case "ip" -> ((BytesRefBlock.Builder) builder).appendBytesRef(parseIP(value.toString())); - case "date" -> { - long longVal = UTC_DATE_TIME_FORMATTER.parseMillis(value.toString()); - ((LongBlock.Builder) builder).appendLong(longVal); - } - case "boolean" -> ((BooleanBlock.Builder) builder).appendBoolean(((Boolean) value)); - case "null" -> builder.appendNull(); - case "version" -> ((BytesRefBlock.Builder) builder).appendBytesRef(new Version(value.toString()).toBytesRef()); - case "_source" -> { - @SuppressWarnings("unchecked") - Map o = (Map) value; - try { - try (XContentBuilder sourceBuilder = JsonXContent.contentBuilder()) { - sourceBuilder.map(o); - ((BytesRefBlock.Builder) builder).appendBytesRef(BytesReference.bytes(sourceBuilder).toBytesRef()); - } - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } - case "geo_point" -> { - long longVal = GEO.pointAsLong(GEO.stringAsPoint(value.toString())); - ((LongBlock.Builder) builder).appendLong(longVal); - } - case "cartesian_point" -> { - long longVal = CARTESIAN.pointAsLong(CARTESIAN.stringAsPoint(value.toString())); - ((LongBlock.Builder) builder).appendLong(longVal); - } - default -> throw EsqlIllegalArgumentException.illegalDataType(dataTypes.get(c)); - } - } - } - return new Page(results.stream().map(Block.Builder::build).toArray(Block[]::new)); + @Override + public boolean hasReferences() { + return counted.hasReferences(); + } + + @Override + public void close() { + decRef(); + } + + void closeInternal() { + Releasables.close(() -> Iterators.map(pages.iterator(), p -> p::releaseBlocks)); } public static class Profile implements Writeable, ChunkedToXContentObject { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryTask.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryTask.java new file mode 100644 index 0000000000000..917355b2d88b5 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryTask.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.xpack.core.async.AsyncExecutionId; +import org.elasticsearch.xpack.core.async.StoredAsyncTask; + +import java.util.List; +import java.util.Map; + +public class EsqlQueryTask extends StoredAsyncTask { + + public EsqlQueryTask( + long id, + String type, + String action, + String description, + TaskId parentTaskId, + Map headers, + Map originHeaders, + AsyncExecutionId asyncExecutionId, + TimeValue keepAlive + ) { + super(id, type, action, description, parentTaskId, headers, originHeaders, asyncExecutionId, keepAlive); + } + + @Override + public EsqlQueryResponse getCurrentResult() { + return new EsqlQueryResponse(List.of(), List.of(), null, false, getExecutionId().getEncoded(), true, true); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlResponseListener.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlResponseListener.java index ee641cd9209a7..7b525642009a7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlResponseListener.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlResponseListener.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.esql.action; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.core.Releasable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; @@ -16,11 +17,12 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.rest.action.RestResponseListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import org.elasticsearch.xcontent.MediaType; import org.elasticsearch.xpack.esql.formatter.TextFormat; import org.elasticsearch.xpack.esql.plugin.EsqlMediaTypeParser; +import java.io.IOException; import java.util.Locale; import java.util.concurrent.TimeUnit; @@ -31,7 +33,7 @@ /** * Listens for a single {@link EsqlQueryResponse}, builds a corresponding {@link RestResponse} and sends it. */ -public class EsqlResponseListener extends RestResponseListener { +public final class EsqlResponseListener extends RestRefCountedChunkedToXContentListener { /** * A simple, thread-safe stop watch for timing a single action. * Allows to stop the time for building a response and to log it at a later point. @@ -118,8 +120,13 @@ public EsqlResponseListener(RestChannel channel, RestRequest restRequest, EsqlQu } @Override - public RestResponse buildResponse(EsqlQueryResponse esqlResponse) throws Exception { + protected void processResponse(EsqlQueryResponse esqlQueryResponse) throws IOException { + channel.sendResponse(buildResponse(esqlQueryResponse)); + } + + private RestResponse buildResponse(EsqlQueryResponse esqlResponse) throws IOException { boolean success = false; + final Releasable releasable = releasableFromResponse(esqlResponse); try { RestResponse restResponse; if (mediaType instanceof TextFormat format) { @@ -128,13 +135,13 @@ public RestResponse buildResponse(EsqlQueryResponse esqlResponse) throws Excepti ChunkedRestResponseBody.fromTextChunks( format.contentType(restRequest), format.format(restRequest, esqlResponse), - esqlResponse + releasable ) ); } else { restResponse = RestResponse.chunked( RestStatus.OK, - ChunkedRestResponseBody.fromXContent(esqlResponse, channel.request(), channel, esqlResponse) + ChunkedRestResponseBody.fromXContent(esqlResponse, channel.request(), channel, releasable) ); } long tookNanos = stopWatch.stop().getNanos(); @@ -143,7 +150,7 @@ public RestResponse buildResponse(EsqlQueryResponse esqlResponse) throws Excepti return restResponse; } finally { if (success == false) { - esqlResponse.close(); + releasable.close(); } } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseValueUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseValueUtils.java new file mode 100644 index 0000000000000..8a4efa1d16a69 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseValueUtils.java @@ -0,0 +1,178 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.Iterators; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.lucene.UnsupportedValueSource; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; +import org.elasticsearch.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; +import org.elasticsearch.xpack.esql.planner.PlannerUtils; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import org.elasticsearch.xpack.versionfield.Version; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.xpack.ql.util.DateUtils.UTC_DATE_TIME_FORMATTER; +import static org.elasticsearch.xpack.ql.util.NumericUtils.asLongUnsigned; +import static org.elasticsearch.xpack.ql.util.NumericUtils.unsignedLongAsNumber; +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.CARTESIAN; +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.GEO; +import static org.elasticsearch.xpack.ql.util.StringUtils.parseIP; + +/** + * Collection of static utility methods for helping transform response data between pages and values. + */ +public final class ResponseValueUtils { + + /** + * Returns an iterator of iterators over the values in the given pages. There is one iterator + * for each block. + */ + public static Iterator> pagesToValues(List dataTypes, List pages) { + BytesRef scratch = new BytesRef(); + return Iterators.flatMap( + pages.iterator(), + page -> Iterators.forRange(0, page.getPositionCount(), p -> Iterators.forRange(0, page.getBlockCount(), b -> { + Block block = page.getBlock(b); + if (block.isNull(p)) { + return null; + } + /* + * Use the ESQL data type to map to the output to make sure compute engine + * respects its types. See the INTEGER clause where is doesn't always + * respect it. + */ + int count = block.getValueCount(p); + int start = block.getFirstValueIndex(p); + String dataType = dataTypes.get(b); + if (count == 1) { + return valueAt(dataType, block, start, scratch); + } + List thisResult = new ArrayList<>(count); + int end = count + start; + for (int i = start; i < end; i++) { + thisResult.add(valueAt(dataType, block, i, scratch)); + } + return thisResult; + })) + ); + } + + private static Object valueAt(String dataType, Block block, int offset, BytesRef scratch) { + return switch (dataType) { + case "unsigned_long" -> unsignedLongAsNumber(((LongBlock) block).getLong(offset)); + case "long" -> ((LongBlock) block).getLong(offset); + case "integer" -> ((IntBlock) block).getInt(offset); + case "double" -> ((DoubleBlock) block).getDouble(offset); + case "keyword", "text" -> ((BytesRefBlock) block).getBytesRef(offset, scratch).utf8ToString(); + case "ip" -> { + BytesRef val = ((BytesRefBlock) block).getBytesRef(offset, scratch); + yield DocValueFormat.IP.format(val); + } + case "date" -> { + long longVal = ((LongBlock) block).getLong(offset); + yield UTC_DATE_TIME_FORMATTER.formatMillis(longVal); + } + case "boolean" -> ((BooleanBlock) block).getBoolean(offset); + case "version" -> new Version(((BytesRefBlock) block).getBytesRef(offset, scratch)).toString(); + case "geo_point" -> GEO.longAsPoint(((LongBlock) block).getLong(offset)); + case "cartesian_point" -> CARTESIAN.longAsPoint(((LongBlock) block).getLong(offset)); + case "unsupported" -> UnsupportedValueSource.UNSUPPORTED_OUTPUT; + case "_source" -> { + BytesRef val = ((BytesRefBlock) block).getBytesRef(offset, scratch); + try { + try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, new BytesArray(val))) { + parser.nextToken(); + yield parser.mapOrdered(); + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + default -> throw EsqlIllegalArgumentException.illegalDataType(dataType); + }; + } + + /** + * Converts a list of values to Pages so that we can parse from xcontent. It's not + * super efficient, but it doesn't really have to be. + */ + static Page valuesToPage(BlockFactory blockFactory, List columns, List> values) { + List dataTypes = columns.stream().map(ColumnInfo::type).toList(); + List results = dataTypes.stream() + .map(c -> PlannerUtils.toElementType(EsqlDataTypes.fromName(c)).newBlockBuilder(values.size(), blockFactory)) + .toList(); + + for (List row : values) { + for (int c = 0; c < row.size(); c++) { + var builder = results.get(c); + var value = row.get(c); + switch (dataTypes.get(c)) { + case "unsigned_long" -> ((LongBlock.Builder) builder).appendLong(asLongUnsigned(((Number) value).longValue())); + case "long" -> ((LongBlock.Builder) builder).appendLong(((Number) value).longValue()); + case "integer" -> ((IntBlock.Builder) builder).appendInt(((Number) value).intValue()); + case "double" -> ((DoubleBlock.Builder) builder).appendDouble(((Number) value).doubleValue()); + case "keyword", "text", "unsupported" -> ((BytesRefBlock.Builder) builder).appendBytesRef( + new BytesRef(value.toString()) + ); + case "ip" -> ((BytesRefBlock.Builder) builder).appendBytesRef(parseIP(value.toString())); + case "date" -> { + long longVal = UTC_DATE_TIME_FORMATTER.parseMillis(value.toString()); + ((LongBlock.Builder) builder).appendLong(longVal); + } + case "boolean" -> ((BooleanBlock.Builder) builder).appendBoolean(((Boolean) value)); + case "null" -> builder.appendNull(); + case "version" -> ((BytesRefBlock.Builder) builder).appendBytesRef(new Version(value.toString()).toBytesRef()); + case "_source" -> { + @SuppressWarnings("unchecked") + Map o = (Map) value; + try { + try (XContentBuilder sourceBuilder = JsonXContent.contentBuilder()) { + sourceBuilder.map(o); + ((BytesRefBlock.Builder) builder).appendBytesRef(BytesReference.bytes(sourceBuilder).toBytesRef()); + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + case "geo_point" -> { + long longVal = GEO.pointAsLong(GEO.stringAsPoint(value.toString())); + ((LongBlock.Builder) builder).appendLong(longVal); + } + case "cartesian_point" -> { + long longVal = CARTESIAN.pointAsLong(CARTESIAN.stringAsPoint(value.toString())); + ((LongBlock.Builder) builder).appendLong(longVal); + } + default -> throw EsqlIllegalArgumentException.illegalDataType(dataTypes.get(c)); + } + } + } + return new Page(results.stream().map(Block.Builder::build).toArray(Block[]::new)); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseXContentUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseXContentUtils.java new file mode 100644 index 0000000000000..e28e6beebabed --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/ResponseXContentUtils.java @@ -0,0 +1,91 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.collect.Iterators; +import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.xcontent.ToXContent; + +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.function.Function; + +/** + * Collection of static utility methods for helping transform response data to XContent. + */ +final class ResponseXContentUtils { + + /** Returns the column headings for the given columns. */ + static Iterator columnHeadings(List columns) { + return ChunkedToXContentHelper.singleChunk((builder, params) -> { + builder.startArray("columns"); + for (ColumnInfo col : columns) { + col.toXContent(builder, params); + } + return builder.endArray(); + }); + } + + /** Returns the column values for the given pages (described by the column infos). */ + static Iterator columnValues(List columns, List pages, boolean columnar) { + if (pages.isEmpty()) { + return Collections.emptyIterator(); + } else if (columnar) { + return columnarValues(columns, pages); + } else { + return rowValues(columns, pages); + } + } + + /** Returns a columnar based representation of the values in the given pages (described by the column infos). */ + static Iterator columnarValues(List columns, List pages) { + final BytesRef scratch = new BytesRef(); + return Iterators.flatMap( + Iterators.forRange( + 0, + columns.size(), + column -> Iterators.concat( + Iterators.single(((builder, params) -> builder.startArray())), + Iterators.flatMap(pages.iterator(), page -> { + ColumnInfo.PositionToXContent toXContent = columns.get(column).positionToXContent(page.getBlock(column), scratch); + return Iterators.forRange( + 0, + page.getPositionCount(), + position -> (builder, params) -> toXContent.positionToXContent(builder, params, position) + ); + }), + ChunkedToXContentHelper.endArray() + ) + ), + Function.identity() + ); + } + + /** Returns a row based representation of the values in the given pages (described by the column infos). */ + static Iterator rowValues(List columns, List pages) { + final BytesRef scratch = new BytesRef(); + return Iterators.flatMap(pages.iterator(), page -> { + final int columnCount = columns.size(); + assert page.getBlockCount() == columnCount : page.getBlockCount() + " != " + columnCount; + final ColumnInfo.PositionToXContent[] toXContents = new ColumnInfo.PositionToXContent[columnCount]; + for (int column = 0; column < columnCount; column++) { + toXContents[column] = columns.get(column).positionToXContent(page.getBlock(column), scratch); + } + return Iterators.forRange(0, page.getPositionCount(), position -> (builder, params) -> { + builder.startArray(); + for (int c = 0; c < columnCount; c++) { + toXContents[c].positionToXContent(builder, params, position); + } + return builder.endArray(); + }); + }); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlAsyncQueryAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlAsyncQueryAction.java new file mode 100644 index 0000000000000..04b37616b3ebf --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlAsyncQueryAction.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestCancellableNodeClient; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Set; + +import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.xpack.esql.formatter.TextFormat.URL_PARAM_DELIMITER; + +public class RestEsqlAsyncQueryAction extends BaseRestHandler { + private static final Logger LOGGER = LogManager.getLogger(RestEsqlAsyncQueryAction.class); + + @Override + public String getName() { + return "esql_async_query"; + } + + @Override + public List routes() { + return List.of(new Route(POST, "/_query/async")); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + EsqlQueryRequest esqlRequest; + try (XContentParser parser = request.contentOrSourceParamParser()) { + esqlRequest = EsqlQueryRequest.fromXContentAsync(parser); + } + + LOGGER.info("Beginning execution of ESQL async query.\nQuery string: [{}]", esqlRequest.query()); + + return channel -> { + RestCancellableNodeClient cancellableClient = new RestCancellableNodeClient(client, request.getHttpChannel()); + cancellableClient.execute( + EsqlQueryAction.INSTANCE, + esqlRequest, + new EsqlResponseListener(channel, request, esqlRequest).wrapWithLogging() + ); + }; + } + + @Override + protected Set responseParams() { + return Collections.singleton(URL_PARAM_DELIMITER); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlDeleteAsyncResultAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlDeleteAsyncResultAction.java new file mode 100644 index 0000000000000..1857e32e99b06 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlDeleteAsyncResultAction.java @@ -0,0 +1,38 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.async.DeleteAsyncResultAction; +import org.elasticsearch.xpack.core.async.DeleteAsyncResultRequest; + +import java.util.List; + +import static org.elasticsearch.rest.RestRequest.Method.DELETE; + +public class RestEsqlDeleteAsyncResultAction extends BaseRestHandler { + @Override + public List routes() { + return List.of(new RestHandler.Route(DELETE, "/_query/async/{id}")); + } + + @Override + public String getName() { + return "esql_delete_async_result"; + } + + @Override + protected BaseRestHandler.RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { + DeleteAsyncResultRequest delete = new DeleteAsyncResultRequest(request.param("id")); + return channel -> client.execute(DeleteAsyncResultAction.INSTANCE, delete, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlGetAsyncResultAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlGetAsyncResultAction.java new file mode 100644 index 0000000000000..9d83ed117be76 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlGetAsyncResultAction.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; +import org.elasticsearch.xpack.core.async.GetAsyncResultRequest; + +import java.util.List; + +import static org.elasticsearch.rest.RestRequest.Method.GET; + +public class RestEsqlGetAsyncResultAction extends BaseRestHandler { + @Override + public List routes() { + return List.of(new Route(GET, "/_query/async/{id}")); + } + + @Override + public String getName() { + return "esql_get_async_result"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) { + GetAsyncResultRequest get = new GetAsyncResultRequest(request.param("id")); + if (request.hasParam("wait_for_completion_timeout")) { + get.setWaitForCompletionTimeout(request.paramAsTime("wait_for_completion_timeout", get.getWaitForCompletionTimeout())); + } + if (request.hasParam("keep_alive")) { + get.setKeepAlive(request.paramAsTime("keep_alive", get.getKeepAlive())); + } + return channel -> client.execute(EsqlAsyncGetResultAction.INSTANCE, get, new RestRefCountedChunkedToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlQueryAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlQueryAction.java index 7a1b7f7b9b927..6b8e7fc397865 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlQueryAction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlQueryAction.java @@ -48,7 +48,7 @@ public List routes() { protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { EsqlQueryRequest esqlRequest; try (XContentParser parser = request.contentOrSourceParamParser()) { - esqlRequest = EsqlQueryRequest.fromXContent(parser); + esqlRequest = EsqlQueryRequest.fromXContentSync(parser); } LOGGER.info("Beginning execution of ESQL query.\nQuery string: [{}]", esqlRequest.query()); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java index 945f543329c15..6d57b239e94a0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java @@ -11,6 +11,7 @@ import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.UnavailableShardsException; +import org.elasticsearch.action.support.ChannelActionListener; import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.ClusterState; @@ -24,7 +25,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.compute.OwningChannelActionListener; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BlockStreamInput; @@ -369,7 +369,7 @@ private class TransportHandler implements TransportRequestHandler @Override public void messageReceived(LookupRequest request, TransportChannel channel, Task task) { request.incRef(); - ActionListener listener = ActionListener.runBefore(new OwningChannelActionListener<>(channel), request::decRef); + ActionListener listener = ActionListener.runBefore(new ChannelActionListener<>(channel), request::decRef); doLookup( request.sessionId, (CancellableTask) task, @@ -378,7 +378,7 @@ public void messageReceived(LookupRequest request, TransportChannel channel, Tas request.matchField, request.inputPage, request.extractFields, - listener.map(LookupResponse::new) + listener.delegateFailureAndWrap((l, outPage) -> ActionListener.respondAndRelease(l, new LookupResponse(outPage))) ); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java index 246849896bcdf..1e21886a7ac4b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java @@ -9,12 +9,12 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; +import org.elasticsearch.action.support.ChannelActionListener; import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.compute.OwningChannelActionListener; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportChannel; @@ -110,7 +110,7 @@ public void messageReceived(ResolveRequest request, TransportChannel channel, Ta String policyName = request.policyName; EnrichPolicy policy = policies().get(policyName); ThreadContext threadContext = threadPool.getThreadContext(); - ActionListener listener = new OwningChannelActionListener<>(channel); + ActionListener listener = new ChannelActionListener<>(channel); listener = ContextPreservingActionListener.wrapPreservingContext(listener, threadContext); try (ThreadContext.StoredContext ignored = threadContext.stashWithOrigin(ClientHelper.ENRICH_ORIGIN)) { indexResolver.resolveAsMergedMapping( diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichQuerySourceOperator.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichQuerySourceOperator.java index 8d23a59779e6b..b0582e211fdba 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichQuerySourceOperator.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichQuerySourceOperator.java @@ -94,7 +94,7 @@ private Page queryOneLeaf(Weight weight, int leafIndex) throws IOException { if (scorer == null) { return null; } - IntVector docs = null, segments = null, shards = null; + IntVector docs = null, segments = null, shards = null, positions = null; boolean success = false; try (IntVector.Builder docsBuilder = blockFactory.newIntVectorBuilder(1)) { scorer.score(new DocCollector(docsBuilder), leafReaderContext.reader().getLiveDocs()); @@ -102,12 +102,13 @@ private Page queryOneLeaf(Weight weight, int leafIndex) throws IOException { final int positionCount = docs.getPositionCount(); segments = blockFactory.newConstantIntVector(leafIndex, positionCount); shards = blockFactory.newConstantIntVector(0, positionCount); - var positions = blockFactory.newConstantIntBlockWith(queryPosition, positionCount); + positions = blockFactory.newConstantIntVector(queryPosition, positionCount); + Page page = new Page(new DocVector(shards, segments, docs, true).asBlock(), positions.asBlock()); success = true; - return new Page(new DocVector(shards, segments, docs, true).asBlock(), positions); + return page; } finally { if (success == false) { - Releasables.close(docs, shards, segments); + Releasables.close(docs, shards, segments, positions); } } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/EvalMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/EvalMapper.java index 280ef898c3b90..54c9fec4da96a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/EvalMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/EvalMapper.java @@ -105,7 +105,7 @@ public Block eval(Page page) { */ private Block eval(Block lhs, Block rhs) { int positionCount = lhs.getPositionCount(); - try (BooleanBlock.Builder result = BooleanBlock.newBlockBuilder(positionCount, lhs.blockFactory())) { + try (BooleanBlock.Builder result = lhs.blockFactory().newBooleanBlockBuilder(positionCount)) { for (int p = 0; p < positionCount; p++) { if (lhs.getValueCount(p) > 1) { result.appendNull(); @@ -132,7 +132,7 @@ private Block eval(Block lhs, Block rhs) { private Block eval(BooleanVector lhs, BooleanVector rhs) { int positionCount = lhs.getPositionCount(); - try (var result = BooleanVector.newVectorFixedBuilder(positionCount, lhs.blockFactory())) { + try (var result = lhs.blockFactory().newBooleanVectorFixedBuilder(positionCount)) { for (int p = 0; p < positionCount; p++) { result.appendBoolean(bl.function().apply(lhs.getBoolean(p), rhs.getBoolean(p))); } @@ -225,12 +225,12 @@ public String toString() { private static Block block(Literal lit, BlockFactory blockFactory, int positions) { var value = lit.value(); if (value == null) { - return Block.constantNullBlock(positions, blockFactory); + return blockFactory.newConstantNullBlock(positions); } if (value instanceof List multiValue) { if (multiValue.isEmpty()) { - return Block.constantNullBlock(positions, blockFactory); + return blockFactory.newConstantNullBlock(positions); } var wrapper = BlockUtils.wrapperFor(blockFactory, ElementType.fromJava(multiValue.get(0).getClass()), positions); for (int i = 0; i < positions; i++) { @@ -267,14 +267,9 @@ record IsNullEvaluator(DriverContext driverContext, EvalOperator.ExpressionEvalu public Block eval(Page page) { try (Block fieldBlock = field.eval(page)) { if (fieldBlock.asVector() != null) { - return BooleanBlock.newConstantBlockWith(false, page.getPositionCount(), driverContext.blockFactory()); + return driverContext.blockFactory().newConstantBooleanBlockWith(false, page.getPositionCount()); } - try ( - BooleanVector.FixedBuilder builder = BooleanVector.newVectorFixedBuilder( - page.getPositionCount(), - driverContext.blockFactory() - ) - ) { + try (var builder = driverContext.blockFactory().newBooleanVectorFixedBuilder(page.getPositionCount())) { for (int p = 0; p < page.getPositionCount(); p++) { builder.appendBoolean(fieldBlock.isNull(p)); } @@ -321,14 +316,9 @@ record IsNotNullEvaluator(DriverContext driverContext, EvalOperator.ExpressionEv public Block eval(Page page) { try (Block fieldBlock = field.eval(page)) { if (fieldBlock.asVector() != null) { - return BooleanBlock.newConstantBlockWith(true, page.getPositionCount(), driverContext.blockFactory()); + return driverContext.blockFactory().newConstantBooleanBlockWith(true, page.getPositionCount()); } - try ( - BooleanVector.FixedBuilder builder = BooleanVector.newVectorFixedBuilder( - page.getPositionCount(), - driverContext.blockFactory() - ) - ) { + try (var builder = driverContext.blockFactory().newBooleanVectorFixedBuilder(page.getPositionCount())) { for (int p = 0; p < page.getPositionCount(); p++) { builder.appendBoolean(fieldBlock.isNull(p) == false); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InMapper.java index 6ef37abf5a9b4..7b4e867adad91 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InMapper.java @@ -8,11 +8,11 @@ package org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison; import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.BooleanArrayBlock; -import org.elasticsearch.compute.data.BooleanArrayVector; +import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.BooleanVector; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; import org.elasticsearch.core.Releasables; @@ -41,10 +41,12 @@ public ExpressionEvaluator.Factory map(In in, Layout layout) { ExpressionEvaluator.Factory eqEvaluator = ((ExpressionMapper) EQUALS).map(eq, layout); listEvaluators.add(eqEvaluator); }); - return dvrCtx -> new InExpressionEvaluator(listEvaluators.stream().map(fac -> fac.get(dvrCtx)).toList()); + return dvrCtx -> new InExpressionEvaluator(dvrCtx, listEvaluators.stream().map(fac -> fac.get(dvrCtx)).toList()); } - record InExpressionEvaluator(List listEvaluators) implements EvalOperator.ExpressionEvaluator { + record InExpressionEvaluator(DriverContext driverContext, List listEvaluators) + implements + EvalOperator.ExpressionEvaluator { @Override public Block eval(Page page) { int positionCount = page.getPositionCount(); @@ -68,7 +70,7 @@ public Block eval(Page page) { } } - return evalWithNulls(values, nulls, nullInValues); + return evalWithNulls(driverContext.blockFactory(), values, nulls, nullInValues); } private static void updateValues(BooleanVector vector, boolean[] values) { @@ -94,9 +96,9 @@ private static void updateValues(BooleanBlock block, boolean[] values, BitSet nu } } - private static Block evalWithNulls(boolean[] values, BitSet nulls, boolean nullInValues) { + private static Block evalWithNulls(BlockFactory blockFactory, boolean[] values, BitSet nulls, boolean nullInValues) { if (nulls.isEmpty() && nullInValues == false) { - return new BooleanArrayVector(values, values.length).asBlock(); + return blockFactory.newBooleanArrayVector(values, values.length).asBlock(); } else { // 3VL: true trumps null; null trumps false. for (int i = 0; i < values.length; i++) { @@ -108,9 +110,9 @@ private static Block evalWithNulls(boolean[] values, BitSet nulls, boolean nullI } if (nulls.isEmpty()) { // no nulls and no multi-values means we must use a Vector - return new BooleanArrayVector(values, values.length).asBlock(); + return blockFactory.newBooleanArrayVector(values, values.length).asBlock(); } else { - return new BooleanArrayBlock(values, values.length, null, nulls, Block.MvOrdering.UNORDERED); + return blockFactory.newBooleanArrayBlock(values, values.length, null, nulls, Block.MvOrdering.UNORDERED); } } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPoint.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPoint.java index bb384ae846f26..33bd3098f2e3b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPoint.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPoint.java @@ -9,6 +9,8 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.ann.ConvertEvaluator; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.NodeInfo; import org.elasticsearch.xpack.ql.tree.Source; @@ -34,7 +36,11 @@ public class ToCartesianPoint extends AbstractConvertFunction { Map.entry(TEXT, ToCartesianPointFromStringEvaluator.Factory::new) ); - public ToCartesianPoint(Source source, Expression field) { + @FunctionInfo(returnType = "cartesian_point") + public ToCartesianPoint( + Source source, + @Param(name = "v", type = { "cartesian_point", "long", "unsigned_long", "keyword", "text" }) Expression field + ) { super(source, field); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPoint.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPoint.java index 75ef5c324541b..c78597706de45 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPoint.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPoint.java @@ -9,6 +9,8 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.ann.ConvertEvaluator; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.NodeInfo; import org.elasticsearch.xpack.ql.tree.Source; @@ -34,7 +36,11 @@ public class ToGeoPoint extends AbstractConvertFunction { Map.entry(TEXT, ToGeoPointFromStringEvaluator.Factory::new) ); - public ToGeoPoint(Source source, Expression field) { + @FunctionInfo(returnType = "geo_point") + public ToGeoPoint( + Source source, + @Param(name = "v", type = { "geo_point", "long", "unsigned_long", "keyword", "text" }) Expression field + ) { super(source, field); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsFinite.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsFinite.java index 012b8ce25f258..f4bf8a628c9b6 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsFinite.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsFinite.java @@ -9,6 +9,8 @@ import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.NodeInfo; import org.elasticsearch.xpack.ql.tree.Source; @@ -17,7 +19,9 @@ import java.util.function.Function; public class IsFinite extends RationalUnaryPredicate { - public IsFinite(Source source, Expression field) { + + @FunctionInfo(returnType = "boolean", description = "Returns true if the argument is a finite floating-point value.") + public IsFinite(Source source, @Param(name = "n", type = { "double" }, description = "A floating-point value") Expression field) { super(source, field); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsInfinite.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsInfinite.java index 80068f3aaf8d4..c0c3b3149f3d0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsInfinite.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsInfinite.java @@ -9,6 +9,8 @@ import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.NodeInfo; import org.elasticsearch.xpack.ql.tree.Source; @@ -17,7 +19,12 @@ import java.util.function.Function; public class IsInfinite extends RationalUnaryPredicate { - public IsInfinite(Source source, Expression field) { + + @FunctionInfo( + returnType = "boolean", + description = "Returns true if the specified floating-point value is infinitely large in magnitude." + ) + public IsInfinite(Source source, @Param(name = "n", type = { "double" }, description = "A floating-point value") Expression field) { super(source, field); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsNaN.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsNaN.java index 07875987f74d7..27ddd39c86c21 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsNaN.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsNaN.java @@ -9,6 +9,8 @@ import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; +import org.elasticsearch.xpack.esql.expression.function.FunctionInfo; +import org.elasticsearch.xpack.esql.expression.function.Param; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.NodeInfo; import org.elasticsearch.xpack.ql.tree.Source; @@ -17,7 +19,9 @@ import java.util.function.Function; public class IsNaN extends RationalUnaryPredicate { - public IsNaN(Source source, Expression field) { + + @FunctionInfo(returnType = "boolean", description = "Returns true if the argument is a Not-a-Number (NaN) value.") + public IsNaN(Source source, @Param(name = "n", type = { "double" }, description = "A floating-point value") Expression field) { super(source, field); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvConcat.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvConcat.java index 9f5c492d7fe7c..5df0ac03206c4 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvConcat.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvConcat.java @@ -123,7 +123,7 @@ private static class Evaluator implements ExpressionEvaluator { public final Block eval(Page page) { try (BytesRefBlock fieldVal = (BytesRefBlock) field.eval(page); BytesRefBlock delimVal = (BytesRefBlock) delim.eval(page)) { int positionCount = page.getPositionCount(); - try (BytesRefBlock.Builder builder = BytesRefBlock.newBlockBuilder(positionCount, context.blockFactory())) { + try (BytesRefBlock.Builder builder = context.blockFactory().newBytesRefBlockBuilder(positionCount)) { BytesRefBuilder work = new BytesRefBuilder(); // TODO BreakingBytesRefBuilder so we don't blow past circuit breakers BytesRef fieldScratch = new BytesRef(); BytesRef delimScratch = new BytesRef(); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCount.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCount.java index bf05aeee4d228..b7d9a3a73929e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCount.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCount.java @@ -8,8 +8,6 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.multivalue; import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.IntBlock; -import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; @@ -108,7 +106,7 @@ protected String name() { @Override protected Block evalNullable(Block block) { - try (var builder = IntBlock.newBlockBuilder(block.getPositionCount(), driverContext.blockFactory())) { + try (var builder = driverContext.blockFactory().newIntBlockBuilder(block.getPositionCount())) { for (int p = 0; p < block.getPositionCount(); p++) { int valueCount = block.getValueCount(p); if (valueCount == 0) { @@ -123,7 +121,7 @@ protected Block evalNullable(Block block) { @Override protected Block evalNotNullable(Block block) { - try (var builder = IntVector.newVectorFixedBuilder(block.getPositionCount(), driverContext.blockFactory())) { + try (var builder = driverContext.blockFactory().newIntVectorFixedBuilder(block.getPositionCount())) { for (int p = 0; p < block.getPositionCount(); p++) { builder.appendInt(block.getValueCount(p)); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSum.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSum.java index a0abced909c48..48b83aa205549 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSum.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSum.java @@ -45,7 +45,7 @@ protected ExpressionEvaluator.Factory evaluator(ExpressionEvaluator.Factory fiel case LONG -> field().dataType() == DataTypes.UNSIGNED_LONG ? new MvSumUnsignedLongEvaluator.Factory(source(), fieldEval) : new MvSumLongEvaluator.Factory(source(), fieldEval); - case NULL -> dvrCtx -> EvalOperator.CONSTANT_NULL; + case NULL -> EvalOperator.CONSTANT_NULL_FACTORY; default -> throw EsqlIllegalArgumentException.illegalDataType(field.dataType()); }; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java index 0132301cb79b5..e29502920d3d8 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java @@ -13,7 +13,6 @@ import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; -import org.elasticsearch.xpack.ql.type.DataTypes; import java.time.Duration; import java.time.Period; @@ -26,6 +25,9 @@ import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.TIME_DURATION; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isDateTimeOrTemporal; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isTemporalAmount; +import static org.elasticsearch.xpack.ql.type.DataTypes.DATETIME; +import static org.elasticsearch.xpack.ql.type.DataTypes.isDateTime; +import static org.elasticsearch.xpack.ql.type.DataTypes.isNull; abstract class DateTimeArithmeticOperation extends EsqlArithmeticOperation { /** Arithmetic (quad) function. */ @@ -57,16 +59,16 @@ protected TypeResolution resolveType() { // Date math is only possible if either // - one argument is a DATETIME and the other a (foldable) TemporalValue, or - // - both arguments are TemporalValues (so we can fold them). + // - both arguments are TemporalValues (so we can fold them), or + // - one argument is NULL and the other one a DATETIME. if (isDateTimeOrTemporal(leftType) || isDateTimeOrTemporal(rightType)) { - if ((leftType == DataTypes.DATETIME && isTemporalAmount(rightType)) - || (rightType == DataTypes.DATETIME && isTemporalAmount(leftType))) { + if (isNull(leftType) || isNull(rightType)) { return TypeResolution.TYPE_RESOLVED; } - if (leftType == TIME_DURATION && rightType == TIME_DURATION) { + if ((isDateTime(leftType) && isTemporalAmount(rightType)) || (isTemporalAmount(leftType) && isDateTime(rightType))) { return TypeResolution.TYPE_RESOLVED; } - if (leftType == DATE_PERIOD && rightType == DATE_PERIOD) { + if (isTemporalAmount(leftType) && isTemporalAmount(rightType) && leftType == rightType) { return TypeResolution.TYPE_RESOLVED; } @@ -126,16 +128,19 @@ public final Object fold() { throw ExceptionUtils.math(source(), e); } } + if (isNull(leftDataType) || isNull(rightDataType)) { + return null; + } return super.fold(); } @Override public ExpressionEvaluator.Factory toEvaluator(Function toEvaluator) { - if (dataType() == DataTypes.DATETIME) { + if (dataType() == DATETIME) { // One of the arguments has to be a datetime and the other a temporal amount. Expression datetimeArgument; Expression temporalAmountArgument; - if (left().dataType() == DataTypes.DATETIME) { + if (left().dataType() == DATETIME) { datetimeArgument = left(); temporalAmountArgument = right(); } else { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutput.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutput.java index 66bd4163013ee..ac894ce7a099e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutput.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutput.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.esql.io.stream; -import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; +import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry.PlanWriter; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; @@ -26,18 +26,19 @@ * A customized stream output used to serialize ESQL physical plan fragments. Complements stream * output with methods that write plan nodes, Attributes, Expressions, etc. */ -public final class PlanStreamOutput extends OutputStreamStreamOutput { +public final class PlanStreamOutput extends StreamOutput { + private final StreamOutput delegate; private final PlanNameRegistry registry; private final Function, String> nameSupplier; - public PlanStreamOutput(StreamOutput streamOutput, PlanNameRegistry registry) { - this(streamOutput, registry, PlanNamedTypes::name); + public PlanStreamOutput(StreamOutput delegate, PlanNameRegistry registry) { + this(delegate, registry, PlanNamedTypes::name); } - public PlanStreamOutput(StreamOutput streamOutput, PlanNameRegistry registry, Function, String> nameSupplier) { - super(streamOutput); + public PlanStreamOutput(StreamOutput delegate, PlanNameRegistry registry, Function, String> nameSupplier) { + this.delegate = delegate; this.registry = registry; this.nameSupplier = nameSupplier; } @@ -89,4 +90,35 @@ public void writeNamed(Class type, T value) throws IOException { writeString(name); writer.write(this, value); } + + @Override + public void writeByte(byte b) throws IOException { + delegate.writeByte(b); + } + + @Override + public void writeBytes(byte[] b, int offset, int length) throws IOException { + delegate.writeBytes(b, offset, length); + } + + @Override + public void flush() throws IOException { + delegate.flush(); + } + + @Override + public void close() throws IOException { + delegate.close(); + } + + @Override + public TransportVersion getTransportVersion() { + return delegate.getTransportVersion(); + } + + @Override + public void setTransportVersion(TransportVersion version) { + delegate.setTransportVersion(version); + super.setTransportVersion(version); + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizer.java index e05dd9a00c567..86ef7b6d1e618 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizer.java @@ -7,28 +7,47 @@ package org.elasticsearch.xpack.esql.optimizer; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.BlockUtils; +import org.elasticsearch.xpack.esql.expression.function.aggregate.Count; import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; +import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer.PropagateEmptyRelation; import org.elasticsearch.xpack.esql.plan.logical.Eval; import org.elasticsearch.xpack.esql.plan.logical.TopN; +import org.elasticsearch.xpack.esql.planner.AbstractPhysicalOperationProviders; +import org.elasticsearch.xpack.esql.planner.PlannerUtils; import org.elasticsearch.xpack.esql.stats.SearchStats; import org.elasticsearch.xpack.ql.expression.Alias; +import org.elasticsearch.xpack.ql.expression.Attribute; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.FieldAttribute; import org.elasticsearch.xpack.ql.expression.Literal; import org.elasticsearch.xpack.ql.expression.NamedExpression; +import org.elasticsearch.xpack.ql.expression.function.aggregate.AggregateFunction; +import org.elasticsearch.xpack.ql.expression.predicate.Predicates; +import org.elasticsearch.xpack.ql.expression.predicate.nulls.IsNotNull; import org.elasticsearch.xpack.ql.optimizer.OptimizerRules; import org.elasticsearch.xpack.ql.plan.logical.Aggregate; import org.elasticsearch.xpack.ql.plan.logical.EsRelation; +import org.elasticsearch.xpack.ql.plan.logical.Filter; import org.elasticsearch.xpack.ql.plan.logical.Limit; import org.elasticsearch.xpack.ql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.ql.plan.logical.OrderBy; import org.elasticsearch.xpack.ql.plan.logical.Project; import org.elasticsearch.xpack.ql.rule.ParameterizedRule; import org.elasticsearch.xpack.ql.rule.ParameterizedRuleExecutor; +import org.elasticsearch.xpack.ql.type.DataType; +import org.elasticsearch.xpack.ql.type.DataTypes; import java.util.ArrayList; import java.util.List; +import java.util.Set; +import static java.util.Arrays.asList; +import static org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer.cleanup; +import static org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer.operators; import static org.elasticsearch.xpack.ql.optimizer.OptimizerRules.TransformDirection.UP; public class LocalLogicalPlanOptimizer extends ParameterizedRuleExecutor { @@ -44,16 +63,30 @@ protected List> batches() { Limiter.ONCE, new ReplaceTopNWithLimitAndSort(), new ReplaceMissingFieldWithNull(), - new InferIsNotNull() + new InferIsNotNull(), + new InferNonNullAggConstraint() ); var rules = new ArrayList>(); rules.add(local); // TODO: if the local rules haven't touched the tree, the rest of the rules can be skipped - rules.addAll(LogicalPlanOptimizer.rules()); + rules.addAll(asList(operators(), cleanup())); + replaceRules(rules); return rules; } + private List> replaceRules(List> listOfRules) { + for (Batch batch : listOfRules) { + var rules = batch.rules(); + for (int i = 0; i < rules.length; i++) { + if (rules[i] instanceof PropagateEmptyRelation) { + rules[i] = new LocalPropagateEmptyRelation(); + } + } + } + return listOfRules; + } + public LogicalPlan localOptimize(LogicalPlan plan) { return execute(plan); } @@ -132,6 +165,84 @@ protected boolean skipExpression(Expression e) { } } + /** + * Local aggregation can only produce intermediate state that get wired into the global agg. + */ + private static class LocalPropagateEmptyRelation extends PropagateEmptyRelation { + + /** + * Local variant of the aggregation that returns the intermediate value. + */ + @Override + protected void aggOutput(NamedExpression agg, AggregateFunction aggFunc, BlockFactory blockFactory, List blocks) { + List output = AbstractPhysicalOperationProviders.intermediateAttributes(List.of(agg), List.of()); + for (Attribute o : output) { + DataType dataType = o.dataType(); + // boolean right now is used for the internal #seen so always return true + var value = dataType == DataTypes.BOOLEAN ? true + // look for count(literal) with literal != null + : aggFunc instanceof Count count && (count.foldable() == false || count.fold() != null) ? 0L + // otherwise nullify + : null; + var wrapper = BlockUtils.wrapperFor(blockFactory, PlannerUtils.toElementType(dataType), 1); + wrapper.accept(value); + blocks.add(wrapper.builder().build()); + } + } + } + + /** + * The vast majority of aggs ignore null entries - this rule adds a pushable filter, as it is cheap + * to execute, to filter this entries out to begin with. + * STATS x = min(a), y = sum(b) + * becomes + * | WHERE a IS NOT NULL OR b IS NOT NULL + * | STATS x = min(a), y = sum(b) + *
+ * Unfortunately this optimization cannot be applied when grouping is necessary since it can filter out + * groups containing only null values + */ + static class InferNonNullAggConstraint extends ParameterizedOptimizerRule { + + @Override + protected LogicalPlan rule(Aggregate aggregate, LocalLogicalOptimizerContext context) { + // only look at aggregates with default grouping + if (aggregate.groupings().size() > 0) { + return aggregate; + } + + SearchStats stats = context.searchStats(); + LogicalPlan plan = aggregate; + var aggs = aggregate.aggregates(); + Set nonNullAggFields = Sets.newLinkedHashSetWithExpectedSize(aggs.size()); + for (var agg : aggs) { + Expression expr = agg; + if (agg instanceof Alias as) { + expr = as.child(); + } + if (expr instanceof AggregateFunction af) { + Expression field = af.field(); + // ignore literals (e.g. COUNT(1)) + // make sure the field exists at the source and is indexed (not runtime) + if (field.foldable() == false && field instanceof FieldAttribute fa && stats.isIndexed(fa.name())) { + nonNullAggFields.add(field); + } else { + // otherwise bail out since unless disjunction needs to cover _all_ fields, things get filtered out + return plan; + } + } + } + + if (nonNullAggFields.size() > 0) { + Expression condition = Predicates.combineOr( + nonNullAggFields.stream().map(f -> (Expression) new IsNotNull(aggregate.source(), f)).toList() + ); + plan = aggregate.replaceChild(new Filter(aggregate.source(), aggregate.child(), condition)); + } + return plan; + } + } + abstract static class ParameterizedOptimizerRule extends ParameterizedRule { public final LogicalPlan apply(LogicalPlan plan, P context) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java index 55ead7aa3fe4e..3744adbc0bfaf 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java @@ -10,7 +10,6 @@ import org.elasticsearch.core.Tuple; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEquals; import org.elasticsearch.xpack.esql.expression.function.aggregate.Count; @@ -29,9 +28,9 @@ import org.elasticsearch.xpack.esql.plan.physical.TopNExec; import org.elasticsearch.xpack.esql.plan.physical.UnaryExec; import org.elasticsearch.xpack.esql.planner.AbstractPhysicalOperationProviders; +import org.elasticsearch.xpack.esql.planner.EsqlTranslatorHandler; import org.elasticsearch.xpack.esql.planner.PhysicalVerificationException; import org.elasticsearch.xpack.esql.planner.PhysicalVerifier; -import org.elasticsearch.xpack.esql.querydsl.query.SingleValueQuery; import org.elasticsearch.xpack.ql.common.Failure; import org.elasticsearch.xpack.ql.expression.Alias; import org.elasticsearch.xpack.ql.expression.Attribute; @@ -43,15 +42,15 @@ import org.elasticsearch.xpack.ql.expression.NamedExpression; import org.elasticsearch.xpack.ql.expression.Order; import org.elasticsearch.xpack.ql.expression.TypedAttribute; -import org.elasticsearch.xpack.ql.expression.function.scalar.ScalarFunction; +import org.elasticsearch.xpack.ql.expression.function.scalar.UnaryScalarFunction; import org.elasticsearch.xpack.ql.expression.predicate.Predicates; import org.elasticsearch.xpack.ql.expression.predicate.logical.BinaryLogic; import org.elasticsearch.xpack.ql.expression.predicate.logical.Not; +import org.elasticsearch.xpack.ql.expression.predicate.nulls.IsNotNull; +import org.elasticsearch.xpack.ql.expression.predicate.nulls.IsNull; import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparison; import org.elasticsearch.xpack.ql.expression.predicate.regex.RegexMatch; import org.elasticsearch.xpack.ql.expression.predicate.regex.WildcardLike; -import org.elasticsearch.xpack.ql.planner.ExpressionTranslator; -import org.elasticsearch.xpack.ql.planner.QlTranslatorHandler; import org.elasticsearch.xpack.ql.querydsl.query.Query; import org.elasticsearch.xpack.ql.rule.ParameterizedRuleExecutor; import org.elasticsearch.xpack.ql.rule.Rule; @@ -65,7 +64,6 @@ import java.util.LinkedList; import java.util.List; import java.util.Set; -import java.util.function.Supplier; import java.util.stream.Collectors; import static java.util.Arrays.asList; @@ -76,7 +74,7 @@ import static org.elasticsearch.xpack.ql.optimizer.OptimizerRules.TransformDirection.UP; public class LocalPhysicalPlanOptimizer extends ParameterizedRuleExecutor { - public static final QlTranslatorHandler TRANSLATOR_HANDLER = new EsqlTranslatorHandler(); + public static final EsqlTranslatorHandler TRANSLATOR_HANDLER = new EsqlTranslatorHandler(); private final PhysicalVerifier verifier = new PhysicalVerifier(); @@ -206,7 +204,8 @@ protected PhysicalPlan rule(FilterExec filterExec) { (canPushToSource(exp) ? pushable : nonPushable).add(exp); } if (pushable.size() > 0) { // update the executable with pushable conditions - QueryBuilder planQuery = TRANSLATOR_HANDLER.asQuery(Predicates.combineAnd(pushable)).asBuilder(); + Query queryDSL = TRANSLATOR_HANDLER.asQuery(Predicates.combineAnd(pushable)); + QueryBuilder planQuery = queryDSL.asBuilder(); var query = Queries.combine(Clause.FILTER, asList(queryExec.query(), planQuery)); queryExec = new EsQueryExec( queryExec.source(), @@ -233,17 +232,19 @@ public static boolean canPushToSource(Expression exp) { return isAttributePushable(bc.left(), bc) && bc.right().foldable(); } else if (exp instanceof BinaryLogic bl) { return canPushToSource(bl.left()) && canPushToSource(bl.right()); - } else if (exp instanceof RegexMatch rm) { - return isAttributePushable(rm.field(), rm); } else if (exp instanceof In in) { return isAttributePushable(in.value(), null) && Expressions.foldable(in.list()); } else if (exp instanceof Not not) { return canPushToSource(not.field()); + } else if (exp instanceof UnaryScalarFunction usf) { + if (usf instanceof RegexMatch || usf instanceof IsNull || usf instanceof IsNotNull) { + return isAttributePushable(usf.field(), usf); + } } return false; } - private static boolean isAttributePushable(Expression expression, ScalarFunction operation) { + private static boolean isAttributePushable(Expression expression, Expression operation) { if (expression instanceof FieldAttribute f && f.getExactInfo().hasExact()) { return isAggregatable(f); } @@ -404,22 +405,4 @@ private Tuple, List> pushableStats(AggregateExec aggregate } } - private static final class EsqlTranslatorHandler extends QlTranslatorHandler { - @Override - public Query wrapFunctionQuery(ScalarFunction sf, Expression field, Supplier querySupplier) { - if (field instanceof FieldAttribute fa) { - if (fa.getExactInfo().hasExact()) { - var exact = fa.exactAttribute(); - if (exact != fa) { - fa = exact; - } - } - return ExpressionTranslator.wrapIfNested(new SingleValueQuery(querySupplier.get(), fa.name()), field); - } - if (field instanceof MetadataAttribute) { - return querySupplier.get(); // MetadataAttributes are always single valued - } - throw new EsqlIllegalArgumentException("Expected a FieldAttribute or MetadataAttribute but received [" + field + "]"); - } - } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java index 66654b78c3af4..e4f67838731a0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java @@ -25,7 +25,6 @@ import org.elasticsearch.xpack.esql.plan.logical.local.EsqlProject; import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; import org.elasticsearch.xpack.esql.plan.logical.local.LocalSupplier; -import org.elasticsearch.xpack.esql.planner.AbstractPhysicalOperationProviders; import org.elasticsearch.xpack.esql.planner.PlannerUtils; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.Alias; @@ -63,7 +62,6 @@ import org.elasticsearch.xpack.ql.rule.ParameterizedRuleExecutor; import org.elasticsearch.xpack.ql.rule.Rule; import org.elasticsearch.xpack.ql.tree.Source; -import org.elasticsearch.xpack.ql.type.DataType; import org.elasticsearch.xpack.ql.type.DataTypes; import org.elasticsearch.xpack.ql.util.CollectionUtils; import org.elasticsearch.xpack.ql.util.Holder; @@ -101,17 +99,8 @@ protected List> batches() { return rules(); } - protected static List> rules() { - var substitutions = new Batch<>( - "Substitutions", - Limiter.ONCE, - new SubstituteSurrogates(), - new ReplaceRegexMatch(), - new ReplaceAliasingEvalWithProject() - // new NormalizeAggregate(), - waits on https://github.com/elastic/elasticsearch/issues/100634 - ); - - var operators = new Batch<>( + protected static Batch operators() { + return new Batch<>( "Operator Optimization", new CombineProjections(), new CombineEvals(), @@ -146,19 +135,33 @@ protected static List> rules() { new PruneOrderByBeforeStats(), new PruneRedundantSortClauses() ); + } - var skip = new Batch<>("Skip Compute", new SkipQueryOnLimitZero()); - var cleanup = new Batch<>( + protected static Batch cleanup() { + return new Batch<>( "Clean Up", new ReplaceDuplicateAggWithEval(), // pushing down limits again, because ReplaceDuplicateAggWithEval could create new Project nodes that can still be optimized new PushDownAndCombineLimits(), new ReplaceLimitAndSortAsTopN() ); + } + + protected static List> rules() { + var substitutions = new Batch<>( + "Substitutions", + Limiter.ONCE, + new SubstituteSurrogates(), + new ReplaceRegexMatch(), + new ReplaceAliasingEvalWithProject() + // new NormalizeAggregate(), - waits on https://github.com/elastic/elasticsearch/issues/100634 + ); + + var skip = new Batch<>("Skip Compute", new SkipQueryOnLimitZero()); var defaultTopN = new Batch<>("Add default TopN", new AddDefaultTopN()); var label = new Batch<>("Set as Optimized", Limiter.ONCE, new SetAsOptimized()); - return asList(substitutions, operators, skip, cleanup, defaultTopN, label); + return asList(substitutions, operators(), skip, cleanup(), defaultTopN, label); } // TODO: currently this rule only works for aggregate functions (AVG) @@ -633,6 +636,7 @@ protected LogicalPlan rule(UnaryPlan plan) { } } + @SuppressWarnings("removal") static class PropagateEmptyRelation extends OptimizerRules.OptimizerRule { @Override @@ -650,29 +654,14 @@ protected LogicalPlan rule(UnaryPlan plan) { return p; } - private static List aggsFromEmpty(List aggs) { - // TODO: Should we introduce skip operator that just never queries the source + private List aggsFromEmpty(List aggs) { List blocks = new ArrayList<>(); - var blockFactory = BlockFactory.getNonBreakingInstance(); + var blockFactory = PlannerUtils.NON_BREAKING_BLOCK_FACTORY; int i = 0; for (var agg : aggs) { // there needs to be an alias if (agg instanceof Alias a && a.child() instanceof AggregateFunction aggFunc) { - List output = AbstractPhysicalOperationProviders.intermediateAttributes(List.of(agg), List.of()); - for (Attribute o : output) { - DataType dataType = o.dataType(); - // fill the boolean block later in LocalExecutionPlanner - if (dataType != DataTypes.BOOLEAN) { - // look for count(literal) with literal != null - var wrapper = BlockUtils.wrapperFor(blockFactory, PlannerUtils.toElementType(dataType), 1); - if (aggFunc instanceof Count count && (count.foldable() == false || count.fold() != null)) { - wrapper.accept(0L); - } else { - wrapper.accept(null); - } - blocks.add(wrapper.builder().build()); - } - } + aggOutput(agg, aggFunc, blockFactory, blocks); } else { throw new EsqlIllegalArgumentException("Did not expect a non-aliased aggregation {}", agg); } @@ -680,6 +669,16 @@ private static List aggsFromEmpty(List aggs) { return blocks; } + /** + * The folded aggregation output - this variant is for the coordinator/final. + */ + protected void aggOutput(NamedExpression agg, AggregateFunction aggFunc, BlockFactory blockFactory, List blocks) { + // look for count(literal) with literal != null + Object value = aggFunc instanceof Count count && (count.foldable() == false || count.fold() != null) ? 0L : null; + var wrapper = BlockUtils.wrapperFor(blockFactory, PlannerUtils.toElementType(aggFunc.dataType()), 1); + wrapper.accept(value); + blocks.add(wrapper.builder().build()); + } } private static LogicalPlan skipPlan(UnaryPlan plan) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlTranslatorHandler.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlTranslatorHandler.java new file mode 100644 index 0000000000000..98ac1a2d9910a --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsqlTranslatorHandler.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.planner; + +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; +import org.elasticsearch.xpack.esql.querydsl.query.SingleValueQuery; +import org.elasticsearch.xpack.esql.type.EsqlDataTypeConverter; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.FieldAttribute; +import org.elasticsearch.xpack.ql.expression.MetadataAttribute; +import org.elasticsearch.xpack.ql.expression.function.scalar.ScalarFunction; +import org.elasticsearch.xpack.ql.expression.predicate.nulls.IsNotNull; +import org.elasticsearch.xpack.ql.expression.predicate.nulls.IsNull; +import org.elasticsearch.xpack.ql.planner.ExpressionTranslator; +import org.elasticsearch.xpack.ql.planner.ExpressionTranslators; +import org.elasticsearch.xpack.ql.planner.QlTranslatorHandler; +import org.elasticsearch.xpack.ql.querydsl.query.Query; +import org.elasticsearch.xpack.ql.type.DataType; + +import java.util.function.Supplier; + +public final class EsqlTranslatorHandler extends QlTranslatorHandler { + @Override + public Query asQuery(Expression e) { + return ExpressionTranslators.toQuery(e, this); + } + + @Override + public Object convert(Object value, DataType dataType) { + return EsqlDataTypeConverter.convert(value, dataType); + } + + @Override + public Query wrapFunctionQuery(ScalarFunction sf, Expression field, Supplier querySupplier) { + if (field instanceof FieldAttribute fa) { + if (fa.getExactInfo().hasExact()) { + var exact = fa.exactAttribute(); + if (exact != fa) { + fa = exact; + } + } + // don't wrap is null/is not null with SVQ + Query query = querySupplier.get(); + if ((sf instanceof IsNull || sf instanceof IsNotNull) == false) { + query = new SingleValueQuery(query, fa.name()); + } + return ExpressionTranslator.wrapIfNested(query, field); + } + if (field instanceof MetadataAttribute) { + return querySupplier.get(); // MetadataAttributes are always single valued + } + throw new EsqlIllegalArgumentException("Expected a FieldAttribute or MetadataAttribute but received [" + field + "]"); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java index e4e2402a9c7a3..5620969625575 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java @@ -56,7 +56,6 @@ import org.elasticsearch.xpack.esql.enrich.EnrichLookupService; import org.elasticsearch.xpack.esql.evaluator.EvalMapper; import org.elasticsearch.xpack.esql.evaluator.command.GrokEvaluatorExtracter; -import org.elasticsearch.xpack.esql.plan.logical.local.LocalSupplier; import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; import org.elasticsearch.xpack.esql.plan.physical.DissectExec; import org.elasticsearch.xpack.esql.plan.physical.EnrichExec; @@ -88,7 +87,6 @@ import org.elasticsearch.xpack.ql.expression.NameId; import org.elasticsearch.xpack.ql.expression.NamedExpression; import org.elasticsearch.xpack.ql.expression.Order; -import org.elasticsearch.xpack.ql.type.DataTypes; import org.elasticsearch.xpack.ql.util.Holder; import java.util.ArrayList; @@ -323,29 +321,6 @@ private PhysicalOperation planExchange(ExchangeExec exchangeExec, LocalExecution private PhysicalOperation planExchangeSink(ExchangeSinkExec exchangeSink, LocalExecutionPlannerContext context) { Objects.requireNonNull(exchangeSinkHandler, "ExchangeSinkHandler wasn't provided"); var child = exchangeSink.child(); - // see https://github.com/elastic/elasticsearch/issues/100807 - handle case where the plan has been fully minimized - // to a local relation and the aggregate intermediate data erased. For this scenario, match the output the exchange output - // with that of the local relation - - if (child instanceof LocalSourceExec localExec) { - var output = exchangeSink.output(); - var localOutput = localExec.output(); - if (output.equals(localOutput) == false) { - // the outputs are going to be similar except for the bool "seen" flags which are added in below - List blocks = new ArrayList<>(asList(localExec.supplier().get())); - if (blocks.size() > 0) { - for (int i = 0, s = output.size(); i < s; i++) { - var out = output.get(i); - if (out.dataType() == DataTypes.BOOLEAN) { - blocks.add(i, BlockFactory.getNonBreakingInstance().newConstantBooleanBlockWith(true, 1)); - } - } - } - var newSupplier = LocalSupplier.of(blocks.toArray(Block[]::new)); - - child = new LocalSourceExec(localExec.source(), output, newSupplier); - } - } PhysicalOperation source = plan(child, context); @@ -814,9 +789,7 @@ public List createDrivers(String sessionId) { @Override public String describe() { - StringBuilder sb = new StringBuilder(); - sb.append(driverFactories.stream().map(DriverFactory::describe).collect(joining("\n"))); - return sb.toString(); + return driverFactories.stream().map(DriverFactory::describe).collect(joining("\n")); } } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java index adf684d573cd1..9780093fb2dc7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java @@ -8,6 +8,9 @@ package org.elasticsearch.xpack.esql.planner; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.breaker.NoopCircuitBreaker; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.query.QueryBuilder; @@ -17,6 +20,8 @@ import org.elasticsearch.xpack.esql.optimizer.LocalLogicalPlanOptimizer; import org.elasticsearch.xpack.esql.optimizer.LocalPhysicalOptimizerContext; import org.elasticsearch.xpack.esql.optimizer.LocalPhysicalPlanOptimizer; +import org.elasticsearch.xpack.esql.plan.logical.Enrich; +import org.elasticsearch.xpack.esql.plan.physical.EnrichExec; import org.elasticsearch.xpack.esql.plan.physical.EsQueryExec; import org.elasticsearch.xpack.esql.plan.physical.EsSourceExec; import org.elasticsearch.xpack.esql.plan.physical.EstimatesRowSize; @@ -65,6 +70,19 @@ public static Tuple breakPlanBetweenCoordinatorAndDa return new Tuple<>(coordinatorPlan, dataNodePlan.get()); } + public static boolean hasEnrich(PhysicalPlan plan) { + boolean[] found = { false }; + plan.forEachDown(p -> { + if (p instanceof EnrichExec) { + found[0] = true; + } + if (p instanceof FragmentExec f) { + f.fragment().forEachDown(Enrich.class, e -> found[0] = true); + } + }); + return found[0]; + } + /** * Returns a set of concrete indices after resolving the original indices specified in the FROM command. */ @@ -222,4 +240,14 @@ public static ElementType toElementType(DataType dataType) { } throw EsqlIllegalArgumentException.illegalDataType(dataType); } + + /** + * A non-breaking block factory used to create small pages during the planning + * TODO: Remove this + */ + @Deprecated(forRemoval = true) + public static final BlockFactory NON_BREAKING_BLOCK_FACTORY = BlockFactory.getInstance( + new NoopCircuitBreaker("noop-esql-breaker"), + BigArrays.NON_RECYCLING_INSTANCE + ); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ClusterComputeRequest.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ClusterComputeRequest.java new file mode 100644 index 0000000000000..e25136f4d9532 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ClusterComputeRequest.java @@ -0,0 +1,165 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.plugin; + +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.compute.operator.exchange.ExchangeService; +import org.elasticsearch.tasks.CancellableTask; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; +import org.elasticsearch.xpack.esql.io.stream.PlanStreamOutput; +import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; +import org.elasticsearch.xpack.esql.session.EsqlConfiguration; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Map; +import java.util.Objects; + +/** + * A request to initiate a compute on a remote cluster. The output pages of the compute on the remote cluster will be placed in an + * exchange sink specified by the {@code sessionId}. The exchange sink associated with this {@code sessionId} should have been opened + * via {@link ExchangeService#openExchange} before sending this request to the remote cluster. The coordinator on the main cluster + * will poll pages from this sink. Internally, this compute will trigger sub-computes on data nodes via {@link DataNodeRequest}. + */ +final class ClusterComputeRequest extends TransportRequest implements IndicesRequest { + private static final PlanNameRegistry planNameRegistry = new PlanNameRegistry(); + private final String clusterAlias; + private final String sessionId; + private final EsqlConfiguration configuration; + private final PhysicalPlan plan; + + private final String[] originalIndices; + private final String[] indices; + + /** + * A request to start a compute on a remote cluster. + * + * @param clusterAlias the cluster alias of this remote cluster + * @param sessionId the sessionId in which the output pages will be placed in the exchange sink specified by this id + * @param configuration the configuration for this compute + * @param plan the physical plan to be executed + * @param indices the target indices + * @param originalIndices the original indices - needed to resolve alias filters + */ + ClusterComputeRequest( + String clusterAlias, + String sessionId, + EsqlConfiguration configuration, + PhysicalPlan plan, + String[] indices, + String[] originalIndices + ) { + this.clusterAlias = clusterAlias; + this.sessionId = sessionId; + this.configuration = configuration; + this.plan = plan; + this.indices = indices; + this.originalIndices = originalIndices; + } + + ClusterComputeRequest(StreamInput in) throws IOException { + super(in); + this.clusterAlias = in.readString(); + this.sessionId = in.readString(); + this.configuration = new EsqlConfiguration(in); + this.plan = new PlanStreamInput(in, planNameRegistry, in.namedWriteableRegistry(), configuration).readPhysicalPlanNode(); + this.indices = in.readStringArray(); + this.originalIndices = in.readStringArray(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(clusterAlias); + out.writeString(sessionId); + configuration.writeTo(out); + new PlanStreamOutput(out, planNameRegistry).writePhysicalPlanNode(plan); + out.writeStringArray(indices); + out.writeStringArray(originalIndices); + } + + @Override + public String[] indices() { + return indices; + } + + @Override + public IndicesOptions indicesOptions() { + return IndicesOptions.strictSingleIndexNoExpandForbidClosed(); + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + if (parentTaskId.isSet() == false) { + assert false : "DataNodeRequest must have a parent task"; + throw new IllegalStateException("DataNodeRequest must have a parent task"); + } + return new CancellableTask(id, type, action, "", parentTaskId, headers) { + @Override + public String getDescription() { + return ClusterComputeRequest.this.getDescription(); + } + }; + } + + String clusterAlias() { + return clusterAlias; + } + + String sessionId() { + return sessionId; + } + + EsqlConfiguration configuration() { + return configuration; + } + + String[] originalIndices() { + return originalIndices; + } + + PhysicalPlan plan() { + return plan; + } + + @Override + public String getDescription() { + return "indices=" + Arrays.toString(indices) + " plan=" + plan; + } + + @Override + public String toString() { + return "ClusterComputeRequest{" + getDescription() + "}"; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ClusterComputeRequest request = (ClusterComputeRequest) o; + return clusterAlias.equals(request.clusterAlias) + && sessionId.equals(request.sessionId) + && configuration.equals(request.configuration) + && Arrays.equals(indices, request.indices) + && Arrays.equals(originalIndices, request.originalIndices) + && plan.equals(request.plan) + && getParentTask().equals(request.getParentTask()); + } + + @Override + public int hashCode() { + return Objects.hash(sessionId, configuration, Arrays.hashCode(indices), Arrays.hashCode(originalIndices), plan); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeResponse.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeResponse.java new file mode 100644 index 0000000000000..44796ca78aa91 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeResponse.java @@ -0,0 +1,57 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.plugin; + +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.compute.operator.DriverProfile; +import org.elasticsearch.transport.TransportResponse; + +import java.io.IOException; +import java.util.List; + +/** + * The compute result of {@link DataNodeRequest} or {@link ClusterComputeRequest} + */ +final class ComputeResponse extends TransportResponse { + private final List profiles; + + ComputeResponse(List profiles) { + this.profiles = profiles; + } + + ComputeResponse(StreamInput in) throws IOException { + super(in); + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE)) { + if (in.readBoolean()) { + profiles = in.readCollectionAsImmutableList(DriverProfile::new); + } else { + profiles = null; + } + } else { + profiles = null; + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE)) { + if (profiles == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeCollection(profiles); + } + } + } + + public List getProfiles() { + return profiles; + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java index 20fcc05e80440..0211432378711 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java @@ -7,26 +7,24 @@ package org.elasticsearch.xpack.esql.plugin; -import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; +import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchShardsGroup; import org.elasticsearch.action.search.SearchShardsRequest; import org.elasticsearch.action.search.SearchShardsResponse; import org.elasticsearch.action.search.TransportSearchShardsAction; +import org.elasticsearch.action.support.ChannelActionListener; import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.action.support.RefCountingListener; import org.elasticsearch.action.support.RefCountingRunnable; import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.compute.OwningChannelActionListener; import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.Driver; @@ -37,6 +35,7 @@ import org.elasticsearch.compute.operator.exchange.ExchangeService; import org.elasticsearch.compute.operator.exchange.ExchangeSinkHandler; import org.elasticsearch.compute.operator.exchange.ExchangeSourceHandler; +import org.elasticsearch.compute.operator.exchange.RemoteSink; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; @@ -57,13 +56,16 @@ import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteClusterAware; +import org.elasticsearch.transport.RemoteClusterService; +import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportRequestOptions; -import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.esql.action.EsqlQueryAction; import org.elasticsearch.xpack.esql.enrich.EnrichLookupService; +import org.elasticsearch.xpack.esql.plan.physical.ExchangeSinkExec; +import org.elasticsearch.xpack.esql.plan.physical.ExchangeSourceExec; import org.elasticsearch.xpack.esql.plan.physical.OutputExec; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.esql.planner.EsPhysicalOperationProviders; @@ -71,9 +73,7 @@ import org.elasticsearch.xpack.esql.planner.PlannerUtils; import org.elasticsearch.xpack.esql.session.EsqlConfiguration; -import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -120,6 +120,12 @@ public ComputeService( this.blockFactory = blockFactory; this.esqlExecutor = threadPool.executor(ESQL_THREAD_POOL_NAME); transportService.registerRequestHandler(DATA_ACTION_NAME, this.esqlExecutor, DataNodeRequest::new, new DataNodeRequestHandler()); + transportService.registerRequestHandler( + CLUSTER_ACTION_NAME, + this.esqlExecutor, + ClusterComputeRequest::new, + new ClusterRequestHandler() + ); this.driverRunner = new DriverTaskRunner(transportService, this.esqlExecutor); this.exchangeService = exchangeService; this.enrichLookupService = enrichLookupService; @@ -144,12 +150,14 @@ public void execute( }); PhysicalPlan coordinatorPlan = new OutputExec(coordinatorAndDataNodePlan.v1(), collectedPages::add); PhysicalPlan dataNodePlan = coordinatorAndDataNodePlan.v2(); - - var concreteIndices = PlannerUtils.planConcreteIndices(physicalPlan); - + if (dataNodePlan != null && dataNodePlan instanceof ExchangeSinkExec == false) { + listener.onFailure(new IllegalStateException("expect data node plan starts with an ExchangeSink; got " + dataNodePlan)); + return; + } + Map clusterToConcreteIndices = transportService.getRemoteClusterService() + .groupIndices(SearchRequest.DEFAULT_INDICES_OPTIONS, PlannerUtils.planConcreteIndices(physicalPlan).toArray(String[]::new)); QueryPragmas queryPragmas = configuration.pragmas(); - - if (concreteIndices.isEmpty()) { + if (dataNodePlan == null || clusterToConcreteIndices.values().stream().allMatch(v -> v.indices().length == 0)) { var computeContext = new ComputeContext(sessionId, List.of(), configuration, null, null); runCompute( rootTask, @@ -159,108 +167,204 @@ public void execute( ); return; } - QueryBuilder requestFilter = PlannerUtils.requestFilter(dataNodePlan); - - LOGGER.debug("Sending data node plan\n{}\n with filter [{}]", dataNodePlan, requestFilter); - + Map clusterToOriginalIndices = transportService.getRemoteClusterService() + .groupIndices(SearchRequest.DEFAULT_INDICES_OPTIONS, PlannerUtils.planOriginalIndices(physicalPlan)); + var localOriginalIndices = clusterToOriginalIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + var localConcreteIndices = clusterToConcreteIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + if (clusterToOriginalIndices.isEmpty() == false && PlannerUtils.hasEnrich(physicalPlan)) { + listener.onFailure(new IllegalArgumentException("cross clusters query doesn't support enrich yet")); + return; + } final var responseHeadersCollector = new ResponseHeadersCollector(transportService.getThreadPool().getThreadContext()); listener = ActionListener.runBefore(listener, responseHeadersCollector::finish); - String[] originalIndices = PlannerUtils.planOriginalIndices(physicalPlan); - computeTargetNodes( - rootTask, - requestFilter, - concreteIndices, - originalIndices, - listener.delegateFailureAndWrap((delegate, targetNodes) -> { - final ExchangeSourceHandler exchangeSource = exchangeService.createSourceHandler( + final AtomicBoolean cancelled = new AtomicBoolean(); + final List collectedProfiles = configuration.profile() ? Collections.synchronizedList(new ArrayList<>()) : List.of(); + final var exchangeSource = new ExchangeSourceHandler( + queryPragmas.exchangeBufferSize(), + transportService.getThreadPool().executor(ESQL_THREAD_POOL_NAME) + ); + try ( + Releasable ignored = exchangeSource::decRef; + RefCountingListener refs = new RefCountingListener(listener.map(unused -> new Result(collectedPages, collectedProfiles))) + ) { + // wait until the source handler is completed + exchangeSource.addCompletionListener(refs.acquire()); + // run compute on the coordinator + runCompute( + rootTask, + new ComputeContext(sessionId, List.of(), configuration, exchangeSource, null), + coordinatorPlan, + cancelOnFailure(rootTask, cancelled, refs.acquire()).map(driverProfiles -> { + responseHeadersCollector.collect(); + if (configuration.profile()) { + collectedProfiles.addAll(driverProfiles); + } + return null; + }) + ); + // starts computes on data nodes on the main cluster + if (localConcreteIndices != null && localConcreteIndices.indices().length > 0) { + startComputeOnDataNodes( sessionId, - queryPragmas.exchangeBufferSize(), - ESQL_THREAD_POOL_NAME + RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, + rootTask, + configuration, + dataNodePlan, + Set.of(localConcreteIndices.indices()), + localOriginalIndices.indices(), + exchangeSource, + () -> cancelOnFailure(rootTask, cancelled, refs.acquire()).map(response -> { + responseHeadersCollector.collect(); + if (configuration.profile()) { + collectedProfiles.addAll(response.getProfiles()); + } + return null; + }) ); - final List collectedProfiles = configuration.profile() - ? Collections.synchronizedList(new ArrayList<>()) - : null; - try ( - Releasable ignored = exchangeSource::decRef; - RefCountingListener requestRefs = new RefCountingListener( - delegate.map(unused -> new Result(collectedPages, collectedProfiles)) - ) - ) { - final AtomicBoolean cancelled = new AtomicBoolean(); - // wait until the source handler is completed - exchangeSource.addCompletionListener(requestRefs.acquire()); - // run compute on the coordinator - var computeContext = new ComputeContext(sessionId, List.of(), configuration, exchangeSource, null); - runCompute( - rootTask, - computeContext, - coordinatorPlan, - cancelOnFailure(rootTask, cancelled, requestRefs.acquire()).map(driverProfiles -> { - responseHeadersCollector.collect(); - if (configuration.profile()) { - collectedProfiles.addAll(driverProfiles); - } - return null; - }) - ); - // run compute on remote nodes - runComputeOnRemoteNodes( + } + // starts computes on remote cluster + startComputeOnRemoteClusters( + sessionId, + rootTask, + configuration, + dataNodePlan, + exchangeSource, + getRemoteClusters(clusterToConcreteIndices, clusterToOriginalIndices), + () -> cancelOnFailure(rootTask, cancelled, refs.acquire()).map(response -> { + responseHeadersCollector.collect(); + if (configuration.profile()) { + collectedProfiles.addAll(response.getProfiles()); + } + return null; + }) + ); + } + } + + private List getRemoteClusters( + Map clusterToConcreteIndices, + Map clusterToOriginalIndices + ) { + List remoteClusters = new ArrayList<>(clusterToConcreteIndices.size()); + RemoteClusterService remoteClusterService = transportService.getRemoteClusterService(); + for (Map.Entry e : clusterToConcreteIndices.entrySet()) { + String clusterAlias = e.getKey(); + OriginalIndices concreteIndices = clusterToConcreteIndices.get(clusterAlias); + OriginalIndices originalIndices = clusterToOriginalIndices.get(clusterAlias); + if (originalIndices == null) { + assert false : "can't find original indices for cluster " + clusterAlias; + throw new IllegalStateException("can't find original indices for cluster " + clusterAlias); + } + if (concreteIndices.indices().length > 0) { + Transport.Connection connection = remoteClusterService.getConnection(clusterAlias); + remoteClusters.add(new RemoteCluster(clusterAlias, connection, concreteIndices.indices(), originalIndices.indices())); + } + } + return remoteClusters; + } + + static final class EmptyRemoteSink implements RemoteSink { + final SubscribableListener future = new SubscribableListener<>(); + + @Override + public void fetchPageAsync(boolean allSourcesFinished, ActionListener listener) { + future.addListener(listener.map(ignored -> new ExchangeResponse(null, true))); + } + + void finish() { + future.onResponse(null); + } + } + + private void startComputeOnDataNodes( + String sessionId, + String clusterAlias, + CancellableTask parentTask, + EsqlConfiguration configuration, + PhysicalPlan dataNodePlan, + Set concreteIndices, + String[] originalIndices, + ExchangeSourceHandler exchangeSource, + Supplier> listener + ) { + // Do not complete the exchange sources until we have linked all remote sinks + final EmptyRemoteSink emptyRemoteSink = new EmptyRemoteSink(); + exchangeSource.addRemoteSink(emptyRemoteSink, 1); + QueryBuilder requestFilter = PlannerUtils.requestFilter(dataNodePlan); + lookupDataNodes(parentTask, clusterAlias, requestFilter, concreteIndices, originalIndices, ActionListener.wrap(dataNodes -> { + try (RefCountingRunnable refs = new RefCountingRunnable(emptyRemoteSink::finish)) { + // For each target node, first open a remote exchange on the remote node, then link the exchange source to + // the new remote exchange sink, and initialize the computation on the target node via data-node-request. + for (DataNode node : dataNodes) { + var dataNodeListener = ActionListener.releaseAfter(listener.get(), refs.acquire()); + var queryPragmas = configuration.pragmas(); + ExchangeService.openExchange( + transportService, + node.connection, sessionId, - rootTask, - configuration, - dataNodePlan, - exchangeSource, - targetNodes, - () -> cancelOnFailure(rootTask, cancelled, requestRefs.acquire()).map(response -> { - responseHeadersCollector.collect(); - if (configuration.profile()) { - collectedProfiles.addAll(response.profiles); - } - return null; + queryPragmas.exchangeBufferSize(), + esqlExecutor, + dataNodeListener.delegateFailureAndWrap((delegate, unused) -> { + var remoteSink = exchangeService.newRemoteSink(parentTask, sessionId, transportService, node.connection); + exchangeSource.addRemoteSink(remoteSink, queryPragmas.concurrentExchangeClients()); + transportService.sendChildRequest( + node.connection, + DATA_ACTION_NAME, + new DataNodeRequest(sessionId, configuration, clusterAlias, node.shardIds, node.aliasFilters, dataNodePlan), + parentTask, + TransportRequestOptions.EMPTY, + new ActionListenerResponseHandler<>(delegate, ComputeResponse::new, esqlExecutor) + ); }) ); } - }) - ); + } + }, e -> { + emptyRemoteSink.finish(); + listener.get().onFailure(e); + })); } - private void runComputeOnRemoteNodes( + private void startComputeOnRemoteClusters( String sessionId, CancellableTask rootTask, EsqlConfiguration configuration, - PhysicalPlan dataNodePlan, + PhysicalPlan plan, ExchangeSourceHandler exchangeSource, - List targetNodes, - Supplier> listener + List clusters, + Supplier> listener ) { // Do not complete the exchange sources until we have linked all remote sinks - final SubscribableListener blockingSinkFuture = new SubscribableListener<>(); - exchangeSource.addRemoteSink( - (sourceFinished, l) -> blockingSinkFuture.addListener(l.map(ignored -> new ExchangeResponse(null, true))), - 1 - ); - try (RefCountingRunnable exchangeRefs = new RefCountingRunnable(() -> blockingSinkFuture.onResponse(null))) { - // For each target node, first open a remote exchange on the remote node, then link the exchange source to - // the new remote exchange sink, and initialize the computation on the target node via data-node-request. - for (TargetNode targetNode : targetNodes) { - var targetNodeListener = ActionListener.releaseAfter(listener.get(), exchangeRefs.acquire()); + final EmptyRemoteSink emptyRemoteSink = new EmptyRemoteSink(); + exchangeSource.addRemoteSink(emptyRemoteSink, 1); + try (RefCountingRunnable refs = new RefCountingRunnable(emptyRemoteSink::finish)) { + for (RemoteCluster cluster : clusters) { + var targetNodeListener = ActionListener.releaseAfter(listener.get(), refs.acquire()); var queryPragmas = configuration.pragmas(); ExchangeService.openExchange( transportService, - targetNode.node(), + cluster.connection, sessionId, queryPragmas.exchangeBufferSize(), esqlExecutor, - targetNodeListener.delegateFailureAndWrap((delegate, unused) -> { - var remoteSink = exchangeService.newRemoteSink(rootTask, sessionId, transportService, targetNode.node); + targetNodeListener.delegateFailureAndWrap((l, unused) -> { + var remoteSink = exchangeService.newRemoteSink(rootTask, sessionId, transportService, cluster.connection); exchangeSource.addRemoteSink(remoteSink, queryPragmas.concurrentExchangeClients()); + var clusterRequest = new ClusterComputeRequest( + cluster.clusterAlias, + sessionId, + configuration, + plan, + cluster.concreteIndices, + cluster.originalIndices + ); transportService.sendChildRequest( - targetNode.node, - DATA_ACTION_NAME, - new DataNodeRequest(sessionId, configuration, targetNode.shardIds, targetNode.aliasFilters, dataNodePlan), + cluster.connection, + CLUSTER_ACTION_NAME, + clusterRequest, rootTask, TransportRequestOptions.EMPTY, - new ActionListenerResponseHandler<>(delegate, DataNodeResponse::new, esqlExecutor) + new ActionListenerResponseHandler<>(l, ComputeResponse::new, esqlExecutor) ); }) ); @@ -327,6 +431,7 @@ void runCompute(CancellableTask task, ComputeContext context, PhysicalPlan plan, } private void acquireSearchContexts( + String clusterAlias, List shardIds, EsqlConfiguration configuration, Map aliasFilters, @@ -355,7 +460,8 @@ private void acquireSearchContexts( var shardRequest = new ShardSearchRequest( shard.shardId(), configuration.absoluteStartedTimeInMillis(), - aliasFilter + aliasFilter, + clusterAlias ); SearchContext context = searchService.createSearchContext(shardRequest, SearchService.NO_TIMEOUT); searchContexts.add(context); @@ -379,27 +485,28 @@ private void acquireSearchContexts( } } - record TargetNode(DiscoveryNode node, List shardIds, Map aliasFilters) { + record DataNode(Transport.Connection connection, List shardIds, Map aliasFilters) { } - private void computeTargetNodes( + record RemoteCluster(String clusterAlias, Transport.Connection connection, String[] concreteIndices, String[] originalIndices) { + + } + + /** + * Performs can_match and find the target nodes for the given target indices and filter. + *

+ * Ideally, the search_shards API should be called before the field-caps API; however, this can lead + * to a situation where the column structure (i.e., matched data types) differs depending on the query. + */ + void lookupDataNodes( Task parentTask, + String clusterAlias, QueryBuilder filter, Set concreteIndices, String[] originalIndices, - ActionListener> listener + ActionListener> listener ) { - var remoteIndices = transportService.getRemoteClusterService().groupIndices(SearchRequest.DEFAULT_INDICES_OPTIONS, originalIndices); - remoteIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); - if (remoteIndices.isEmpty() == false) { - listener.onFailure( - new IllegalArgumentException("ES|QL does not yet support querying remote indices " + Arrays.toString(originalIndices)) - ); - return; - } - // Ideally, the search_shards API should be called before the field-caps API; however, this can lead - // to a situation where the column structure (i.e., matched data types) differs depending on the query. ThreadContext threadContext = transportService.getThreadPool().getThreadContext(); ActionListener preservingContextListener = ContextPreservingActionListener.wrapPreservingContext( listener.map(resp -> { @@ -427,13 +534,13 @@ private void computeTargetNodes( nodeToAliasFilters.computeIfAbsent(targetNode, k -> new HashMap<>()).put(shardId.getIndex(), aliasFilter); } } - List targetNodes = new ArrayList<>(nodeToShards.size()); + List dataNodes = new ArrayList<>(nodeToShards.size()); for (Map.Entry> e : nodeToShards.entrySet()) { DiscoveryNode node = nodes.get(e.getKey()); Map aliasFilters = nodeToAliasFilters.getOrDefault(e.getKey(), Map.of()); - targetNodes.add(new TargetNode(node, e.getValue(), aliasFilters)); + dataNodes.add(new DataNode(transportService.getConnection(node), e.getValue(), aliasFilters)); } - return targetNodes; + return dataNodes; }), threadContext ); @@ -446,7 +553,7 @@ private void computeTargetNodes( null, null, false, - null + clusterAlias ); transportService.sendChildRequest( transportService.getLocalNode(), @@ -459,39 +566,6 @@ private void computeTargetNodes( } } - private static class DataNodeResponse extends TransportResponse { - private final List profiles; - - DataNodeResponse(List profiles) { - this.profiles = profiles; - } - - DataNodeResponse(StreamInput in) throws IOException { - super(in); - if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE)) { - if (in.readBoolean()) { - profiles = in.readCollectionAsImmutableList(DriverProfile::new); - } else { - profiles = null; - } - } else { - profiles = null; - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_PROFILE)) { - if (profiles == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - out.writeCollection(profiles); - } - } - } - } - // TODO: Use an internal action here public static final String DATA_ACTION_NAME = EsqlQueryAction.NAME + "/data"; @@ -501,30 +575,138 @@ public void messageReceived(DataNodeRequest request, TransportChannel channel, T final var parentTask = (CancellableTask) task; final var sessionId = request.sessionId(); final var exchangeSink = exchangeService.getSinkHandler(sessionId); - parentTask.addListener(() -> exchangeService.finishSinkHandler(sessionId, new TaskCancelledException("task cancelled"))); - final ActionListener listener = new OwningChannelActionListener<>(channel); + parentTask.addListener( + () -> exchangeService.finishSinkHandler(sessionId, new TaskCancelledException(parentTask.getReasonCancelled())) + ); + final ActionListener listener = new ChannelActionListener<>(channel); final EsqlConfiguration configuration = request.configuration(); - acquireSearchContexts(request.shardIds(), configuration, request.aliasFilters(), ActionListener.wrap(searchContexts -> { - var computeContext = new ComputeContext(sessionId, searchContexts, configuration, null, exchangeSink); - runCompute(parentTask, computeContext, request.plan(), ActionListener.wrap(driverProfiles -> { - // don't return until all pages are fetched - exchangeSink.addCompletionListener( - ContextPreservingActionListener.wrapPreservingContext( - ActionListener.releaseAfter( - listener.map(nullValue -> new DataNodeResponse(driverProfiles)), - () -> exchangeService.finishSinkHandler(sessionId, null) - ), - transportService.getThreadPool().getThreadContext() - ) - ); + acquireSearchContexts( + request.clusterAlias(), + request.shardIds(), + configuration, + request.aliasFilters(), + ActionListener.wrap(searchContexts -> { + var computeContext = new ComputeContext(sessionId, searchContexts, configuration, null, exchangeSink); + runCompute(parentTask, computeContext, request.plan(), ActionListener.wrap(driverProfiles -> { + // don't return until all pages are fetched + exchangeSink.addCompletionListener( + ContextPreservingActionListener.wrapPreservingContext( + ActionListener.releaseAfter( + listener.map(nullValue -> new ComputeResponse(driverProfiles)), + () -> exchangeService.finishSinkHandler(sessionId, null) + ), + transportService.getThreadPool().getThreadContext() + ) + ); + }, e -> { + exchangeService.finishSinkHandler(sessionId, e); + listener.onFailure(e); + })); }, e -> { exchangeService.finishSinkHandler(sessionId, e); listener.onFailure(e); - })); - }, e -> { - exchangeService.finishSinkHandler(sessionId, e); - listener.onFailure(e); - })); + }) + ); + } + } + + public static final String CLUSTER_ACTION_NAME = EsqlQueryAction.NAME + "/cluster"; + + private class ClusterRequestHandler implements TransportRequestHandler { + @Override + public void messageReceived(ClusterComputeRequest request, TransportChannel channel, Task task) { + ChannelActionListener listener = new ChannelActionListener<>(channel); + if (request.plan() instanceof ExchangeSinkExec == false) { + listener.onFailure(new IllegalStateException("expected exchange sink for a remote compute; got " + request.plan())); + return; + } + runComputeOnRemoteCluster( + request.clusterAlias(), + request.sessionId(), + (CancellableTask) task, + request.configuration(), + (ExchangeSinkExec) request.plan(), + Set.of(request.indices()), + request.originalIndices(), + listener + ); + } + } + + /** + * Performs a compute on a remote cluster. The output pages are placed in an exchange sink specified by + * {@code globalSessionId}. The coordinator on the main cluster will poll pages from there. + *

+ * Currently, the coordinator on the remote cluster simply collects pages from data nodes in the remote cluster + * and places them in the exchange sink. We can achieve this by using a single exchange buffer to minimize overhead. + * However, here we use two exchange buffers so that we can run an actual plan on this coordinator to perform partial + * reduce operations, such as limit, topN, and partial-to-partial aggregation in the future. + */ + void runComputeOnRemoteCluster( + String clusterAlias, + String globalSessionId, + CancellableTask parentTask, + EsqlConfiguration configuration, + ExchangeSinkExec plan, + Set concreteIndices, + String[] originalIndices, + ActionListener listener + ) { + final var exchangeSink = exchangeService.getSinkHandler(globalSessionId); + parentTask.addListener( + () -> exchangeService.finishSinkHandler(globalSessionId, new TaskCancelledException(parentTask.getReasonCancelled())) + ); + ThreadPool threadPool = transportService.getThreadPool(); + final var responseHeadersCollector = new ResponseHeadersCollector(threadPool.getThreadContext()); + listener = ActionListener.runBefore(listener, responseHeadersCollector::finish); + final AtomicBoolean cancelled = new AtomicBoolean(); + final List collectedProfiles = configuration.profile() ? Collections.synchronizedList(new ArrayList<>()) : List.of(); + final String localSessionId = clusterAlias + ":" + globalSessionId; + var exchangeSource = new ExchangeSourceHandler( + configuration.pragmas().exchangeBufferSize(), + transportService.getThreadPool().executor(ESQL_THREAD_POOL_NAME) + ); + try ( + Releasable ignored = exchangeSource::decRef; + RefCountingListener refs = new RefCountingListener(listener.map(unused -> new ComputeResponse(collectedProfiles))) + ) { + exchangeSource.addCompletionListener(refs.acquire()); + exchangeSink.addCompletionListener(refs.acquire()); + PhysicalPlan coordinatorPlan = new ExchangeSinkExec( + plan.source(), + plan.output(), + plan.isIntermediateAgg(), + new ExchangeSourceExec(plan.source(), plan.output(), plan.isIntermediateAgg()) + ); + runCompute( + parentTask, + new ComputeContext(localSessionId, List.of(), configuration, exchangeSource, exchangeSink), + coordinatorPlan, + cancelOnFailure(parentTask, cancelled, refs.acquire()).map(driverProfiles -> { + responseHeadersCollector.collect(); + if (configuration.profile()) { + collectedProfiles.addAll(driverProfiles); + } + return null; + }) + ); + startComputeOnDataNodes( + localSessionId, + clusterAlias, + parentTask, + configuration, + plan, + concreteIndices, + originalIndices, + exchangeSource, + () -> cancelOnFailure(parentTask, cancelled, refs.acquire()).map(r -> { + responseHeadersCollector.collect(); + if (configuration.profile()) { + collectedProfiles.addAll(r.getProfiles()); + } + return null; + }) + ); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequest.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequest.java index d8e5e576386e3..5067e62fa6970 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequest.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequest.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.esql.plugin; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.io.stream.StreamInput; @@ -17,6 +18,7 @@ import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; @@ -33,6 +35,7 @@ final class DataNodeRequest extends TransportRequest implements IndicesRequest { private static final PlanNameRegistry planNameRegistry = new PlanNameRegistry(); private final String sessionId; private final EsqlConfiguration configuration; + private final String clusterAlias; private final List shardIds; private final Map aliasFilters; private final PhysicalPlan plan; @@ -42,12 +45,14 @@ final class DataNodeRequest extends TransportRequest implements IndicesRequest { DataNodeRequest( String sessionId, EsqlConfiguration configuration, + String clusterAlias, List shardIds, Map aliasFilters, PhysicalPlan plan ) { this.sessionId = sessionId; this.configuration = configuration; + this.clusterAlias = clusterAlias; this.shardIds = shardIds; this.aliasFilters = aliasFilters; this.plan = plan; @@ -57,6 +62,11 @@ final class DataNodeRequest extends TransportRequest implements IndicesRequest { super(in); this.sessionId = in.readString(); this.configuration = new EsqlConfiguration(in); + if (in.getTransportVersion().onOrAfter(TransportVersions.ESQL_CLUSTER_ALIAS)) { + this.clusterAlias = in.readString(); + } else { + this.clusterAlias = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY; + } this.shardIds = in.readCollectionAsList(ShardId::new); this.aliasFilters = in.readMap(Index::new, AliasFilter::readFrom); this.plan = new PlanStreamInput(in, planNameRegistry, in.namedWriteableRegistry(), configuration).readPhysicalPlanNode(); @@ -67,6 +77,9 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(sessionId); configuration.writeTo(out); + if (out.getTransportVersion().onOrAfter(TransportVersions.ESQL_CLUSTER_ALIAS)) { + out.writeString(clusterAlias); + } out.writeCollection(shardIds); out.writeMap(aliasFilters); new PlanStreamOutput(out, planNameRegistry).writePhysicalPlanNode(plan); @@ -111,6 +124,10 @@ QueryPragmas pragmas() { return configuration.pragmas(); } + String clusterAlias() { + return clusterAlias; + } + List shardIds() { return shardIds; } @@ -143,6 +160,7 @@ public boolean equals(Object o) { DataNodeRequest request = (DataNodeRequest) o; return sessionId.equals(request.sessionId) && configuration.equals(request.configuration) + && clusterAlias.equals(request.clusterAlias) && shardIds.equals(request.shardIds) && aliasFilters.equals(request.aliasFilters) && plan.equals(request.plan) @@ -151,6 +169,6 @@ public boolean equals(Object o) { @Override public int hashCode() { - return Objects.hash(sessionId, configuration, shardIds, aliasFilters, plan); + return Objects.hash(sessionId, configuration, clusterAlias, shardIds, aliasFilters, plan); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java index e8a57e5a49808..07ca55aa665eb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlPlugin.java @@ -17,6 +17,8 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BlockFactory; @@ -41,7 +43,11 @@ import org.elasticsearch.xpack.core.action.XPackUsageFeatureAction; import org.elasticsearch.xpack.esql.EsqlInfoTransportAction; import org.elasticsearch.xpack.esql.EsqlUsageTransportAction; +import org.elasticsearch.xpack.esql.action.EsqlAsyncGetResultAction; import org.elasticsearch.xpack.esql.action.EsqlQueryAction; +import org.elasticsearch.xpack.esql.action.RestEsqlAsyncQueryAction; +import org.elasticsearch.xpack.esql.action.RestEsqlDeleteAsyncResultAction; +import org.elasticsearch.xpack.esql.action.RestEsqlGetAsyncResultAction; import org.elasticsearch.xpack.esql.action.RestEsqlQueryAction; import org.elasticsearch.xpack.esql.execution.PlanExecutor; import org.elasticsearch.xpack.esql.querydsl.query.SingleValueQuery; @@ -82,7 +88,13 @@ public class EsqlPlugin extends Plugin implements ActionPlugin { public Collection createComponents(PluginServices services) { CircuitBreaker circuitBreaker = services.indicesService().getBigArrays().breakerService().getBreaker("request"); Objects.requireNonNull(circuitBreaker, "request circuit breaker wasn't set"); - BlockFactory blockFactory = new BlockFactory(circuitBreaker, services.indicesService().getBigArrays().withCircuitBreaking()); + Settings settings = services.clusterService().getSettings(); + ByteSizeValue maxPrimitiveArrayBlockSize = settings.getAsBytesSize( + BlockFactory.MAX_BLOCK_PRIMITIVE_ARRAY_SIZE_SETTING, + BlockFactory.DEFAULT_MAX_BLOCK_PRIMITIVE_ARRAY_SIZE + ); + BigArrays bigArrays = services.indicesService().getBigArrays().withCircuitBreaking(); + BlockFactory blockFactory = new BlockFactory(circuitBreaker, bigArrays, maxPrimitiveArrayBlockSize); return List.of( new PlanExecutor( new IndexResolver( @@ -116,6 +128,7 @@ public List> getSettings() { public List> getActions() { return List.of( new ActionHandler<>(EsqlQueryAction.INSTANCE, TransportEsqlQueryAction.class), + new ActionHandler<>(EsqlAsyncGetResultAction.INSTANCE, TransportEsqlAsyncGetResultsAction.class), new ActionHandler<>(EsqlStatsAction.INSTANCE, TransportEsqlStatsAction.class), new ActionHandler<>(XPackUsageFeatureAction.ESQL, EsqlUsageTransportAction.class), new ActionHandler<>(XPackInfoFeatureAction.ESQL, EsqlInfoTransportAction.class) @@ -132,7 +145,12 @@ public List getRestHandlers( IndexNameExpressionResolver indexNameExpressionResolver, Supplier nodesInCluster ) { - return List.of(new RestEsqlQueryAction()); + return List.of( + new RestEsqlQueryAction(), + new RestEsqlAsyncQueryAction(), + new RestEsqlGetAsyncResultAction(), + new RestEsqlDeleteAsyncResultAction() + ); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlAsyncGetResultsAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlAsyncGetResultsAction.java new file mode 100644 index 0000000000000..8785b8f5de887 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlAsyncGetResultsAction.java @@ -0,0 +1,58 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.plugin; + +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.esql.action.EsqlAsyncGetResultAction; +import org.elasticsearch.xpack.esql.action.EsqlQueryResponse; +import org.elasticsearch.xpack.esql.action.EsqlQueryTask; +import org.elasticsearch.xpack.ql.plugin.AbstractTransportQlAsyncGetResultsAction; + +public class TransportEsqlAsyncGetResultsAction extends AbstractTransportQlAsyncGetResultsAction { + + private final BlockFactory blockFactory; + + @Inject + public TransportEsqlAsyncGetResultsAction( + TransportService transportService, + ActionFilters actionFilters, + ClusterService clusterService, + NamedWriteableRegistry registry, + Client client, + ThreadPool threadPool, + BigArrays bigArrays, + BlockFactory blockFactory + ) { + super( + EsqlAsyncGetResultAction.NAME, + transportService, + actionFilters, + clusterService, + registry, + client, + threadPool, + bigArrays, + EsqlQueryTask.class + ); + this.blockFactory = blockFactory; + } + + @Override + public Writeable.Reader responseReader() { + return EsqlQueryResponse.reader(blockFactory); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java index d272aba26e4e8..baaa4abe23b3d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/TransportEsqlQueryAction.java @@ -11,8 +11,11 @@ import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.compute.data.BlockFactory; @@ -23,22 +26,32 @@ import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.XPackPlugin; +import org.elasticsearch.xpack.core.async.AsyncExecutionId; import org.elasticsearch.xpack.esql.action.ColumnInfo; import org.elasticsearch.xpack.esql.action.EsqlQueryAction; import org.elasticsearch.xpack.esql.action.EsqlQueryRequest; import org.elasticsearch.xpack.esql.action.EsqlQueryResponse; +import org.elasticsearch.xpack.esql.action.EsqlQueryTask; import org.elasticsearch.xpack.esql.enrich.EnrichLookupService; import org.elasticsearch.xpack.esql.enrich.EnrichPolicyResolver; import org.elasticsearch.xpack.esql.execution.PlanExecutor; import org.elasticsearch.xpack.esql.session.EsqlConfiguration; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import org.elasticsearch.xpack.ql.async.AsyncTaskManagementService; +import java.io.IOException; import java.time.ZoneOffset; import java.util.List; import java.util.Locale; +import java.util.Map; import java.util.concurrent.Executor; -public class TransportEsqlQueryAction extends HandledTransportAction { +import static org.elasticsearch.xpack.core.ClientHelper.ASYNC_SEARCH_ORIGIN; + +public class TransportEsqlQueryAction extends HandledTransportAction + implements + AsyncTaskManagementService.AsyncOperation { private final PlanExecutor planExecutor; private final ComputeService computeService; @@ -47,8 +60,10 @@ public class TransportEsqlQueryAction extends HandledTransportAction asyncTaskManagementService; @Inject + @SuppressWarnings("this-escape") public TransportEsqlQueryAction( TransportService transportService, ActionFilters actionFilters, @@ -58,7 +73,10 @@ public TransportEsqlQueryAction( ClusterService clusterService, ThreadPool threadPool, BigArrays bigArrays, - BlockFactory blockFactory + BlockFactory blockFactory, + Client client, + NamedWriteableRegistry registry + ) { // TODO replace SAME when removing workaround for https://github.com/elastic/elasticsearch/issues/97916 super(EsqlQueryAction.NAME, transportService, actionFilters, EsqlQueryRequest::new, EsExecutors.DIRECT_EXECUTOR_SERVICE); @@ -79,15 +97,53 @@ public TransportEsqlQueryAction( bigArrays, blockFactory ); + this.asyncTaskManagementService = new AsyncTaskManagementService<>( + XPackPlugin.ASYNC_RESULTS_INDEX, + client, + ASYNC_SEARCH_ORIGIN, + registry, + taskManager, + EsqlQueryAction.INSTANCE.name(), + this, + EsqlQueryTask.class, + clusterService, + threadPool, + bigArrays + ); } @Override protected void doExecute(Task task, EsqlQueryRequest request, ActionListener listener) { // workaround for https://github.com/elastic/elasticsearch/issues/97916 - TODO remove this when we can - requestExecutor.execute(ActionRunnable.wrap(listener, l -> doExecuteForked(task, request, l))); + requestExecutor.execute( + ActionRunnable.wrap( + listener.delegateFailureAndWrap(ActionListener::respondAndRelease), + l -> doExecuteForked(task, request, l) + ) + ); } private void doExecuteForked(Task task, EsqlQueryRequest request, ActionListener listener) { + assert ThreadPool.assertCurrentThreadPool(EsqlPlugin.ESQL_THREAD_POOL_NAME); + if (requestIsAsync(request)) { + asyncTaskManagementService.asyncExecute( + request, + request.waitForCompletionTimeout(), + request.keepAlive(), + request.keepOnCompletion(), + listener + ); + } else { + innerExecute(task, request, listener); + } + } + + @Override + public void execute(EsqlQueryRequest request, EsqlQueryTask task, ActionListener listener) { + ActionListener.run(listener, l -> innerExecute(task, request, l)); + } + + private void innerExecute(Task task, EsqlQueryRequest request, ActionListener listener) { EsqlConfiguration configuration = new EsqlConfiguration( ZoneOffset.UTC, request.locale() != null ? request.locale() : Locale.US, @@ -120,7 +176,12 @@ private void doExecuteForked(Task task, EsqlQueryRequest request, ActionListener EsqlQueryResponse.Profile profile = configuration.profile() ? new EsqlQueryResponse.Profile(result.profiles()) : null; - return new EsqlQueryResponse(columns, result.pages(), profile, request.columnar()); + if (task instanceof EsqlQueryTask asyncTask && request.keepOnCompletion()) { + String id = asyncTask.getExecutionId().getEncoded(); + return new EsqlQueryResponse(columns, result.pages(), profile, request.columnar(), id, false, request.async()); + } else { + return new EsqlQueryResponse(columns, result.pages(), profile, request.columnar(), request.async()); + } }) ) ) @@ -143,4 +204,50 @@ public ExchangeService exchangeService() { public EnrichLookupService enrichLookupService() { return enrichLookupService; } + + @Override + public EsqlQueryTask createTask( + EsqlQueryRequest request, + long id, + String type, + String action, + TaskId parentTaskId, + Map headers, + Map originHeaders, + AsyncExecutionId asyncExecutionId + ) { + return new EsqlQueryTask( + id, + type, + action, + request.getDescription(), + parentTaskId, + headers, + originHeaders, + asyncExecutionId, + request.keepAlive() + ); + } + + @Override + public EsqlQueryResponse initialResponse(EsqlQueryTask task) { + return new EsqlQueryResponse( + List.of(), + List.of(), + null, + false, + task.getExecutionId().getEncoded(), + true, // is_running + true // isAsync + ); + } + + @Override + public EsqlQueryResponse readResponse(StreamInput inputStream) throws IOException { + throw new AssertionError("should not reach here"); + } + + private static boolean requestIsAsync(EsqlQueryRequest request) { + return request.async(); + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/stats/SearchStats.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/stats/SearchStats.java index b5d75a1528493..7300b05a2081f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/stats/SearchStats.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/stats/SearchStats.java @@ -18,6 +18,7 @@ import org.apache.lucene.index.Term; import org.apache.lucene.index.Terms; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.index.mapper.AbstractScriptFieldType; import org.elasticsearch.index.mapper.ConstantFieldType; import org.elasticsearch.index.mapper.DocCountFieldMapper.DocCountFieldType; import org.elasticsearch.index.mapper.IdFieldMapper; @@ -43,9 +44,12 @@ public class SearchStats { private static class FieldStat { private Long count; - private Boolean exists; private Object min, max; + // TODO: use a multi-bitset instead + private Boolean exists; private Boolean singleValue; + private Boolean indexed; + private Boolean runtime; } private static final int CACHE_SIZE = 32; @@ -106,6 +110,12 @@ public boolean exists(String field) { break; } } + + // populate additional properties to save on the lookups + if (stat.exists == false) { + stat.indexed = false; + stat.singleValue = true; + } } return stat.exists; } @@ -161,14 +171,17 @@ public boolean isSingleValue(String field) { if (exists(field) == false) { stat.singleValue = true; } else { - var sv = new boolean[] { false }; + var sv = new boolean[] { true }; for (SearchContext context : contexts) { - MappedFieldType mappedType = context.getSearchExecutionContext().getFieldType(field); - doWithContexts(r -> { - sv[0] &= detectSingleValue(r, mappedType, field); - return sv[0]; - }, true); - break; + var sec = context.getSearchExecutionContext(); + MappedFieldType mappedType = sec.isFieldMapped(field) ? null : sec.getFieldType(field); + if (mappedType != null) { + doWithContexts(r -> { + sv[0] &= detectSingleValue(r, mappedType, field); + return sv[0]; + }, true); + break; + } } stat.singleValue = sv[0]; } @@ -176,6 +189,46 @@ public boolean isSingleValue(String field) { return stat.singleValue; } + public boolean isRuntimeField(String field) { + var stat = cache.computeIfAbsent(field, s -> new FieldStat()); + if (stat.runtime == null) { + stat.runtime = false; + if (exists(field)) { + for (SearchContext context : contexts) { + var sec = context.getSearchExecutionContext(); + if (sec.isFieldMapped(field)) { + if (sec.getFieldType(field) instanceof AbstractScriptFieldType) { + stat.runtime = true; + break; + } + } + } + } + } + return stat.runtime; + } + + public boolean isIndexed(String field) { + var stat = cache.computeIfAbsent(field, s -> new FieldStat()); + if (stat.indexed == null) { + stat.indexed = false; + if (exists(field)) { + boolean indexed = true; + for (SearchContext context : contexts) { + var sec = context.getSearchExecutionContext(); + if (sec.isFieldMapped(field)) { + if (sec.getFieldType(field).isIndexed() == false) { + indexed = false; + break; + } + } + } + stat.indexed = indexed; + } + } + return stat.indexed; + } + private boolean detectSingleValue(IndexReader r, MappedFieldType fieldType, String name) throws IOException { // types that are always single value (and are accessible through instanceof) if (fieldType instanceof ConstantFieldType || fieldType instanceof DocCountFieldType || fieldType instanceof TimestampFieldType) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistry.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistry.java index 3b5aa5dbecc3d..2910a690bf8a0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistry.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistry.java @@ -16,7 +16,11 @@ import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.DATE_PERIOD; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.TIME_DURATION; -import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isTemporalAmount; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isDateTimeOrTemporal; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isNullOrDatePeriod; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isNullOrTemporalAmount; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isNullOrTimeDuration; +import static org.elasticsearch.xpack.ql.type.DataTypes.DATETIME; import static org.elasticsearch.xpack.ql.type.DataTypes.isDateTime; public class EsqlDataTypeRegistry implements DataTypeRegistry { @@ -61,14 +65,16 @@ public Object convert(Object value, DataType type) { @Override public DataType commonType(DataType left, DataType right) { - if (isDateTime(left) && isTemporalAmount(right) || isTemporalAmount(left) && isDateTime(right)) { - return DataTypes.DATETIME; - } - if (left == TIME_DURATION && right == TIME_DURATION) { - return TIME_DURATION; - } - if (left == DATE_PERIOD && right == DATE_PERIOD) { - return DATE_PERIOD; + if (isDateTimeOrTemporal(left) || isDateTimeOrTemporal(right)) { + if ((isDateTime(left) && isNullOrTemporalAmount(right)) || (isNullOrTemporalAmount(left) && isDateTime(right))) { + return DATETIME; + } + if (isNullOrTimeDuration(left) && isNullOrTimeDuration(right)) { + return TIME_DURATION; + } + if (isNullOrDatePeriod(left) && isNullOrDatePeriod(right)) { + return DATE_PERIOD; + } } return EsqlDataTypeConverter.commonType(left, right); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java index 03e2d40c8cb48..eae808abb5037 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java @@ -39,6 +39,7 @@ import static org.elasticsearch.xpack.ql.type.DataTypes.UNSIGNED_LONG; import static org.elasticsearch.xpack.ql.type.DataTypes.UNSUPPORTED; import static org.elasticsearch.xpack.ql.type.DataTypes.VERSION; +import static org.elasticsearch.xpack.ql.type.DataTypes.isNull; public final class EsqlDataTypes { @@ -153,6 +154,18 @@ public static boolean isTemporalAmount(DataType t) { return t == DATE_PERIOD || t == TIME_DURATION; } + public static boolean isNullOrTemporalAmount(DataType t) { + return isTemporalAmount(t) || isNull(t); + } + + public static boolean isNullOrDatePeriod(DataType t) { + return t == DATE_PERIOD || isNull(t); + } + + public static boolean isNullOrTimeDuration(DataType t) { + return t == TIME_DURATION || isNull(t); + } + public static boolean isSpatial(DataType t) { return t == GEO_POINT || t == CARTESIAN_POINT; } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index a5e3033b6e1e3..d19922afd2815 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -331,11 +331,16 @@ private ActualResults executePlan(BigArrays bigArrays) throws Exception { ExchangeSinkHandler exchangeSink = new ExchangeSinkHandler(between(1, 64), threadPool::relativeTimeInMillis); Settings.Builder settings = Settings.builder(); + BlockFactory blockFactory = new BlockFactory( + bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST), + bigArrays, + ByteSizeValue.ofBytes(randomLongBetween(1, BlockFactory.DEFAULT_MAX_BLOCK_PRIMITIVE_ARRAY_SIZE.getBytes() * 2)) + ); LocalExecutionPlanner executionPlanner = new LocalExecutionPlanner( sessionId, new CancellableTask(1, "transport", "esql", null, TaskId.EMPTY_TASK_ID, Map.of()), bigArrays, - new BlockFactory(bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST), bigArrays), + blockFactory, randomNodeSettings(), configuration, exchangeSource, diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/SerializationTestUtils.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/SerializationTestUtils.java index c8da2792c7565..4be95b95afe54 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/SerializationTestUtils.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/SerializationTestUtils.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.ExistsQueryBuilder; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.RangeQueryBuilder; @@ -29,6 +30,7 @@ import org.elasticsearch.xpack.esql.querydsl.query.SingleValueQuery; import org.elasticsearch.xpack.esql.session.EsqlConfiguration; import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.plan.logical.LogicalPlan; import java.io.IOException; import java.io.UncheckedIOException; @@ -43,6 +45,11 @@ public static void assertSerialization(PhysicalPlan plan) { EqualsHashCodeTestUtils.checkEqualsAndHashCode(plan, unused -> deserPlan); } + public static void assertSerialization(LogicalPlan plan) { + var deserPlan = serializeDeserialize(plan, PlanStreamOutput::writeLogicalPlanNode, PlanStreamInput::readLogicalPlanNode); + EqualsHashCodeTestUtils.checkEqualsAndHashCode(plan, unused -> deserPlan); + } + public static void assertSerialization(Expression expression) { Expression deserExpression = serializeDeserialize(expression, PlanStreamOutput::writeExpression, PlanStreamInput::readExpression); EqualsHashCodeTestUtils.checkEqualsAndHashCode(expression, unused -> deserExpression); @@ -85,6 +92,7 @@ public static NamedWriteableRegistry writableRegistry() { new NamedWriteableRegistry.Entry(QueryBuilder.class, BoolQueryBuilder.NAME, BoolQueryBuilder::new), new NamedWriteableRegistry.Entry(QueryBuilder.class, WildcardQueryBuilder.NAME, WildcardQueryBuilder::new), new NamedWriteableRegistry.Entry(QueryBuilder.class, RegexpQueryBuilder.NAME, RegexpQueryBuilder::new), + new NamedWriteableRegistry.Entry(QueryBuilder.class, ExistsQueryBuilder.NAME, ExistsQueryBuilder::new), SingleValueQuery.ENTRY ) ); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/TestBlockFactory.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/TestBlockFactory.java new file mode 100644 index 0000000000000..99cf8be307054 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/TestBlockFactory.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql; + +import org.elasticsearch.common.breaker.NoopCircuitBreaker; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.compute.data.BlockFactory; + +public class TestBlockFactory { + + private static final BlockFactory NON_BREAKING = BlockFactory.getInstance( + new NoopCircuitBreaker("noop-esql-breaker"), + BigArrays.NON_RECYCLING_INSTANCE + ); + + /** + * Returns the Non-Breaking block factory. + */ + public static BlockFactory getNonBreakingInstance() { + return NON_BREAKING; + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java index b1b492b28076e..b7ea867f82cde 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java @@ -31,6 +31,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.function.Function; import java.util.function.Supplier; import static org.hamcrest.Matchers.containsString; @@ -55,7 +56,7 @@ public void testParseFields() throws IOException { "filter": %s %s""", query, columnar, locale.toLanguageTag(), filter, paramsString); - EsqlQueryRequest request = parseEsqlQueryRequest(json); + EsqlQueryRequest request = parseEsqlQueryRequestSync(json); assertEquals(query, request.query()); assertEquals(columnar, request.columnar()); @@ -69,6 +70,57 @@ public void testParseFields() throws IOException { } } + public void testParseFieldsForAsync() throws IOException { + String query = randomAlphaOfLengthBetween(1, 100); + boolean columnar = randomBoolean(); + Locale locale = randomLocale(random()); + QueryBuilder filter = randomQueryBuilder(); + + List params = randomParameters(); + boolean hasParams = params.isEmpty() == false; + StringBuilder paramsString = paramsString(params, hasParams); + boolean keepOnCompletion = randomBoolean(); + TimeValue waitForCompletion = TimeValue.parseTimeValue(randomTimeValue(), "test"); + TimeValue keepAlive = TimeValue.parseTimeValue(randomTimeValue(), "test"); + String json = String.format( + Locale.ROOT, + """ + { + "query": "%s", + "columnar": %s, + "locale": "%s", + "filter": %s, + "keep_on_completion": %s, + "wait_for_completion_timeout": "%s", + "keep_alive": "%s" + %s""", + query, + columnar, + locale.toLanguageTag(), + filter, + keepOnCompletion, + waitForCompletion.getStringRep(), + keepAlive.getStringRep(), + paramsString + ); + + EsqlQueryRequest request = parseEsqlQueryRequestAsync(json); + + assertEquals(query, request.query()); + assertEquals(columnar, request.columnar()); + assertEquals(locale.toLanguageTag(), request.locale().toLanguageTag()); + assertEquals(locale, request.locale()); + assertEquals(filter, request.filter()); + assertEquals(keepOnCompletion, request.keepOnCompletion()); + assertEquals(waitForCompletion, request.waitForCompletionTimeout()); + assertEquals(keepAlive, request.keepAlive()); + + assertEquals(params.size(), request.params().size()); + for (int i = 0; i < params.size(); i++) { + assertEquals(params.get(i), request.params().get(i)); + } + } + public void testRejectUnknownFields() { assertParserErrorMessage(""" { @@ -84,10 +136,15 @@ public void testRejectUnknownFields() { } public void testMissingQueryIsNotValidation() throws IOException { - EsqlQueryRequest request = parseEsqlQueryRequest(""" + String json = """ { "columnar": true - }"""); + }"""; + EsqlQueryRequest request = parseEsqlQueryRequestSync(json); + assertNotNull(request.validate()); + assertThat(request.validate().getMessage(), containsString("[query] is required")); + + request = parseEsqlQueryRequestAsync(json); assertNotNull(request.validate()); assertThat(request.validate().getMessage(), containsString("[query] is required")); } @@ -96,10 +153,12 @@ public void testTask() throws IOException { String query = randomAlphaOfLength(10); int id = randomInt(); - EsqlQueryRequest request = parseEsqlQueryRequest(""" + String requestJson = """ { "query": "QUERY" - }""".replace("QUERY", query)); + }""".replace("QUERY", query); + + EsqlQueryRequest request = parseEsqlQueryRequestSync(requestJson); Task task = request.createTask(id, "transport", EsqlQueryAction.NAME, TaskId.EMPTY_TASK_ID, Map.of()); assertThat(task.getDescription(), equalTo(query)); @@ -180,17 +239,29 @@ private StringBuilder paramsString(List params, boolean hasPara } private static void assertParserErrorMessage(String json, String message) { - Exception e = expectThrows(IllegalArgumentException.class, () -> parseEsqlQueryRequest(json)); + Exception e = expectThrows(IllegalArgumentException.class, () -> parseEsqlQueryRequestSync(json)); + assertThat(e.getMessage(), containsString(message)); + + e = expectThrows(IllegalArgumentException.class, () -> parseEsqlQueryRequestAsync(json)); assertThat(e.getMessage(), containsString(message)); } - private static EsqlQueryRequest parseEsqlQueryRequest(String json) throws IOException { + static EsqlQueryRequest parseEsqlQueryRequestSync(String json) throws IOException { + return parseEsqlQueryRequest(json, EsqlQueryRequest::fromXContentSync); + } + + static EsqlQueryRequest parseEsqlQueryRequestAsync(String json) throws IOException { + return parseEsqlQueryRequest(json, EsqlQueryRequest::fromXContentAsync); + } + + static EsqlQueryRequest parseEsqlQueryRequest(String json, Function fromXContentFunc) + throws IOException { SearchModule searchModule = new SearchModule(Settings.EMPTY, Collections.emptyList()); XContentParserConfiguration config = XContentParserConfiguration.EMPTY.withRegistry( new NamedXContentRegistry(searchModule.getNamedXContents()) ); try (XContentParser parser = XContentType.JSON.xContent().createParser(config, json)) { - return EsqlQueryRequest.fromXContent(parser); + return fromXContentFunc.apply(parser); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java index 25083268a3761..debcb5345bfa9 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponseTests.java @@ -24,7 +24,6 @@ import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.DoubleBlock; -import org.elasticsearch.compute.data.IntArrayVector; import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.Page; @@ -32,11 +31,17 @@ import org.elasticsearch.compute.operator.AbstractPageMappingOperator; import org.elasticsearch.compute.operator.DriverProfile; import org.elasticsearch.compute.operator.DriverStatus; +import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasables; import org.elasticsearch.test.AbstractChunkedSerializingTestCase; +import org.elasticsearch.xcontent.InstantiatingObjectParser; +import org.elasticsearch.xcontent.ObjectParser; +import org.elasticsearch.xcontent.ParseField; +import org.elasticsearch.xcontent.ParserConstructor; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.esql.TestBlockFactory; import org.elasticsearch.xpack.esql.planner.PlannerUtils; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.type.DataType; @@ -51,6 +56,9 @@ import java.util.List; import java.util.stream.Stream; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; +import static org.elasticsearch.xpack.esql.action.ResponseValueUtils.valuesToPage; import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.CARTESIAN; import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.GEO; import static org.hamcrest.Matchers.equalTo; @@ -89,11 +97,21 @@ protected EsqlQueryResponse createTestInstance() { } EsqlQueryResponse randomResponse(boolean columnar, EsqlQueryResponse.Profile profile) { + return randomResponseAsync(columnar, profile, false); + } + + EsqlQueryResponse randomResponseAsync(boolean columnar, EsqlQueryResponse.Profile profile, boolean async) { int noCols = randomIntBetween(1, 10); List columns = randomList(noCols, noCols, this::randomColumnInfo); int noPages = randomIntBetween(1, 20); List values = randomList(noPages, noPages, () -> randomPage(columns)); - return new EsqlQueryResponse(columns, values, profile, columnar); + String id = null; + boolean isRunning = false; + if (async) { + id = randomAlphaOfLengthBetween(1, 16); + isRunning = randomBoolean(); + } + return new EsqlQueryResponse(columns, values, profile, columnar, id, isRunning, async); } private ColumnInfo randomColumnInfo() { @@ -167,19 +185,21 @@ protected EsqlQueryResponse mutateInstance(EsqlQueryResponse instance) { List cols = new ArrayList<>(instance.columns()); // keep the type the same so the values are still valid but change the name cols.set(mutCol, new ColumnInfo(cols.get(mutCol).name() + "mut", cols.get(mutCol).type())); - yield new EsqlQueryResponse(cols, deepCopyOfPages(instance), instance.profile(), instance.columnar()); + yield new EsqlQueryResponse(cols, deepCopyOfPages(instance), instance.profile(), instance.columnar(), instance.isAsync()); } case 1 -> new EsqlQueryResponse( instance.columns(), deepCopyOfPages(instance), instance.profile(), - false == instance.columnar() + false == instance.columnar(), + instance.isAsync() ); case 2 -> new EsqlQueryResponse( instance.columns(), deepCopyOfPages(instance), randomValueOtherThan(instance.profile(), this::randomProfile), - instance.columnar() + instance.columnar(), + instance.isAsync() ); case 3 -> { int noPages = instance.pages().size(); @@ -188,7 +208,13 @@ protected EsqlQueryResponse mutateInstance(EsqlQueryResponse instance) { differentPages.forEach(p -> Releasables.closeExpectNoException(p::releaseBlocks)); differentPages = randomList(noPages, noPages, () -> randomPage(instance.columns())); } while (differentPages.equals(instance.pages())); - yield new EsqlQueryResponse(instance.columns(), differentPages, instance.profile(), instance.columnar()); + yield new EsqlQueryResponse( + instance.columns(), + differentPages, + instance.profile(), + instance.columnar(), + instance.isAsync() + ); } default -> throw new IllegalArgumentException(); }; @@ -214,7 +240,58 @@ protected Writeable.Reader instanceReader() { @Override protected EsqlQueryResponse doParseInstance(XContentParser parser) { - return EsqlQueryResponse.fromXContent(parser); + return ResponseBuilder.fromXContent(parser); + } + + public static class ResponseBuilder { + private static final ParseField ID = new ParseField("id"); + private static final ParseField IS_RUNNING = new ParseField("is_running"); + private static final InstantiatingObjectParser PARSER; + + static { + InstantiatingObjectParser.Builder parser = InstantiatingObjectParser.builder( + "esql/query_response", + true, + ResponseBuilder.class + ); + parser.declareString(optionalConstructorArg(), ID); + parser.declareField( + optionalConstructorArg(), + p -> p.currentToken() == XContentParser.Token.VALUE_NULL ? false : p.booleanValue(), + IS_RUNNING, + ObjectParser.ValueType.BOOLEAN_OR_NULL + ); + parser.declareObjectArray(constructorArg(), (p, c) -> ColumnInfo.fromXContent(p), new ParseField("columns")); + parser.declareField(constructorArg(), (p, c) -> p.list(), new ParseField("values"), ObjectParser.ValueType.OBJECT_ARRAY); + PARSER = parser.build(); + } + + // Used for XContent reconstruction + private final EsqlQueryResponse response; + + @ParserConstructor + public ResponseBuilder(@Nullable String asyncExecutionId, Boolean isRunning, List columns, List> values) { + this.response = new EsqlQueryResponse( + columns, + List.of(valuesToPage(TestBlockFactory.getNonBreakingInstance(), columns, values)), + null, + false, + asyncExecutionId, + isRunning != null, + isAsync(asyncExecutionId, isRunning) + ); + } + + static boolean isAsync(@Nullable String asyncExecutionId, Boolean isRunning) { + if (asyncExecutionId != null || isRunning != null) { + return true; + } + return false; + } + + static EsqlQueryResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null).response; + } } public void testChunkResponseSizeColumnar() { @@ -223,6 +300,12 @@ public void testChunkResponseSizeColumnar() { int bodySize = resp.pages().stream().mapToInt(p -> p.getPositionCount() * p.getBlockCount()).sum() + columnCount * 2; assertChunkCount(resp, r -> 5 + bodySize); } + + try (EsqlQueryResponse resp = randomResponseAsync(true, null, true)) { + int columnCount = resp.pages().get(0).getBlockCount(); + int bodySize = resp.pages().stream().mapToInt(p -> p.getPositionCount() * p.getBlockCount()).sum() + columnCount * 2; + assertChunkCount(resp, r -> 6 + bodySize); // is_running + } } public void testChunkResponseSizeRows() { @@ -230,6 +313,10 @@ public void testChunkResponseSizeRows() { int bodySize = resp.pages().stream().mapToInt(p -> p.getPositionCount()).sum(); assertChunkCount(resp, r -> 5 + bodySize); } + try (EsqlQueryResponse resp = randomResponseAsync(false, null, true)) { + int bodySize = resp.pages().stream().mapToInt(p -> p.getPositionCount()).sum(); + assertChunkCount(resp, r -> 6 + bodySize); + } } public void testSimpleXContentColumnar() { @@ -239,6 +326,13 @@ public void testSimpleXContentColumnar() { } } + public void testSimpleXContentColumnarAsync() { + try (EsqlQueryResponse response = simple(true, true)) { + assertThat(Strings.toString(response), equalTo(""" + {"is_running":false,"columns":[{"name":"foo","type":"integer"}],"values":[[40,80]]}""")); + } + } + public void testSimpleXContentRows() { try (EsqlQueryResponse response = simple(false)) { assertThat(Strings.toString(response), equalTo(""" @@ -246,12 +340,41 @@ public void testSimpleXContentRows() { } } + public void testSimpleXContentRowsAsync() { + try (EsqlQueryResponse response = simple(false, true)) { + assertThat(Strings.toString(response), equalTo(""" + {"is_running":false,"columns":[{"name":"foo","type":"integer"}],"values":[[40],[80]]}""")); + } + } + + public void testBasicXContentIdAndRunning() { + try ( + EsqlQueryResponse response = new EsqlQueryResponse( + List.of(new ColumnInfo("foo", "integer")), + List.of(new Page(blockFactory.newIntArrayVector(new int[] { 40, 80 }, 2).asBlock())), + null, + false, + "id-123", + true, + true + ) + ) { + assertThat(Strings.toString(response), equalTo(""" + {"id":"id-123","is_running":true,"columns":[{"name":"foo","type":"integer"}],"values":[[40],[80]]}""")); + } + } + private EsqlQueryResponse simple(boolean columnar) { + return simple(columnar, false); + } + + private EsqlQueryResponse simple(boolean columnar, boolean async) { return new EsqlQueryResponse( List.of(new ColumnInfo("foo", "integer")), - List.of(new Page(new IntArrayVector(new int[] { 40, 80 }, 2).asBlock())), + List.of(new Page(blockFactory.newIntArrayVector(new int[] { 40, 80 }, 2).asBlock())), null, - columnar + columnar, + async ); } @@ -259,10 +382,11 @@ public void testProfileXContent() { try ( EsqlQueryResponse response = new EsqlQueryResponse( List.of(new ColumnInfo("foo", "integer")), - List.of(new Page(new IntArrayVector(new int[] { 40, 80 }, 2).asBlock())), + List.of(new Page(blockFactory.newIntArrayVector(new int[] { 40, 80 }, 2).asBlock())), new EsqlQueryResponse.Profile( List.of(new DriverProfile(List.of(new DriverStatus.OperatorStatus("asdf", new AbstractPageMappingOperator.Status(10))))) ), + false, false ); ) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index 500a7a1b14195..db4b50dc2a5ba 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -32,6 +32,7 @@ import org.elasticsearch.logging.LogManager; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.json.JsonXContent; +import org.elasticsearch.xpack.esql.TestBlockFactory; import org.elasticsearch.xpack.esql.evaluator.EvalMapper; import org.elasticsearch.xpack.esql.expression.function.scalar.conditional.Greatest; import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; @@ -40,6 +41,7 @@ import org.elasticsearch.xpack.esql.planner.Layout; import org.elasticsearch.xpack.esql.planner.PlannerUtils; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import org.elasticsearch.xpack.ql.expression.Attribute; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.FieldAttribute; import org.elasticsearch.xpack.ql.expression.Literal; @@ -150,11 +152,17 @@ protected static Iterable parameterSuppliersFromTypedData(List values) { - return new Page(BlockUtils.fromListRow(BlockFactory.getNonBreakingInstance(), values)); + return new Page(BlockUtils.fromListRow(TestBlockFactory.getNonBreakingInstance(), values)); } /** * Hack together a layout by scanning for Fields. * Those will show up in the layout in whatever order a depth first traversal finds them. */ - protected void buildLayout(Layout.Builder builder, Expression e) { + protected static void buildLayout(Layout.Builder builder, Expression e) { if (e instanceof FieldAttribute f) { builder.append(f); return; @@ -434,13 +446,14 @@ public final void testSimpleWithNulls() { // TODO replace this with nulls insert assumeTrue("All test data types must be representable in order to build fields", testCase.allTypesAreRepresentable()); List simpleData = testCase.getDataValues(); try (EvalOperator.ExpressionEvaluator eval = evaluator(buildFieldExpression(testCase)).get(driverContext())) { - Block[] orig = BlockUtils.fromListRow(BlockFactory.getNonBreakingInstance(), simpleData); + BlockFactory blockFactory = TestBlockFactory.getNonBreakingInstance(); + Block[] orig = BlockUtils.fromListRow(blockFactory, simpleData); for (int i = 0; i < orig.length; i++) { List data = new ArrayList<>(); Block[] blocks = new Block[orig.length]; for (int b = 0; b < blocks.length; b++) { if (b == i) { - blocks[b] = orig[b].elementType().newBlockBuilder(1).appendNull().build(); + blocks[b] = orig[b].elementType().newBlockBuilder(1, blockFactory).appendNull().build(); data.add(null); } else { blocks[b] = orig[b]; @@ -664,13 +677,9 @@ protected static List anyNullIsNull(boolean entirelyNullPreser if (newSignature) { suppliers.add(new TestCaseSupplier(typesWithNull, () -> { TestCaseSupplier.TestCase oc = original.get(); - List data = IntStream.range(0, oc.getData().size()).mapToObj(i -> { - TestCaseSupplier.TypedData od = oc.getData().get(i); - if (i == finalNullPosition) { - return new TestCaseSupplier.TypedData(null, DataTypes.NULL, od.name()); - } - return od; - }).toList(); + List data = IntStream.range(0, oc.getData().size()) + .mapToObj(i -> i == finalNullPosition ? TestCaseSupplier.TypedData.NULL : oc.getData().get(i)) + .toList(); return new TestCaseSupplier.TestCase( data, "LiteralsEvaluator[lit=null]", @@ -891,6 +900,21 @@ private static String typeErrorMessage(boolean includeOrdinal, List forBinaryCastingToDouble( + "]", warnings, suppliers, - DataTypes.DOUBLE + DataTypes.DOUBLE, + false ); return suppliers; } @@ -200,40 +201,55 @@ private static void casesCrossProduct( BiFunction evaluatorToString, List warnings, List suppliers, - DataType expectedType + DataType expectedType, + boolean symmetric ) { for (TypedDataSupplier lhsSupplier : lhsSuppliers) { for (TypedDataSupplier rhsSupplier : rhsSuppliers) { - String caseName = lhsSupplier.name() + ", " + rhsSupplier.name(); - suppliers.add(new TestCaseSupplier(caseName, List.of(lhsSupplier.type(), rhsSupplier.type()), () -> { - Object lhs = lhsSupplier.supplier().get(); - Object rhs = rhsSupplier.supplier().get(); - TypedData lhsTyped = new TypedData( - // TODO there has to be a better way to handle unsigned long - lhs instanceof BigInteger b ? NumericUtils.asLongUnsigned(b) : lhs, - lhsSupplier.type(), - "lhs" - ); - TypedData rhsTyped = new TypedData( - rhs instanceof BigInteger b ? NumericUtils.asLongUnsigned(b) : rhs, - rhsSupplier.type(), - "rhs" - ); - TestCase testCase = new TestCase( - List.of(lhsTyped, rhsTyped), - evaluatorToString.apply(lhsSupplier.type(), rhsSupplier.type()), - expectedType, - equalTo(expected.apply(lhs, rhs)) - ); - for (String warning : warnings) { - testCase = testCase.withWarning(warning); - } - return testCase; - })); + suppliers.add(testCaseSupplier(lhsSupplier, rhsSupplier, evaluatorToString, expectedType, expected, warnings)); + if (symmetric) { + suppliers.add(testCaseSupplier(rhsSupplier, lhsSupplier, evaluatorToString, expectedType, expected, warnings)); + } } } } + private static TestCaseSupplier testCaseSupplier( + TypedDataSupplier lhsSupplier, + TypedDataSupplier rhsSupplier, + BiFunction evaluatorToString, + DataType expectedType, + BinaryOperator expectedValue, + List warnings + ) { + String caseName = lhsSupplier.name() + ", " + rhsSupplier.name(); + return new TestCaseSupplier(caseName, List.of(lhsSupplier.type(), rhsSupplier.type()), () -> { + Object lhs = lhsSupplier.supplier().get(); + Object rhs = rhsSupplier.supplier().get(); + TypedData lhsTyped = new TypedData( + // TODO there has to be a better way to handle unsigned long + lhs instanceof BigInteger b ? NumericUtils.asLongUnsigned(b) : lhs, + lhsSupplier.type(), + "lhs" + ); + TypedData rhsTyped = new TypedData( + rhs instanceof BigInteger b ? NumericUtils.asLongUnsigned(b) : rhs, + rhsSupplier.type(), + "rhs" + ); + TestCase testCase = new TestCase( + List.of(lhsTyped, rhsTyped), + evaluatorToString.apply(lhsSupplier.type(), rhsSupplier.type()), + expectedType, + equalTo(expectedValue.apply(lhs, rhs)) + ); + for (String warning : warnings) { + testCase = testCase.withWarning(warning); + } + return testCase; + }); + } + public static List castToDoubleSuppliersFromRange(Double Min, Double Max) { List suppliers = new ArrayList<>(); suppliers.addAll(intCases(Min.intValue(), Max.intValue())); @@ -243,30 +259,6 @@ public static List castToDoubleSuppliersFromRange(Double Min, return suppliers; } - public static List forBinaryNumericNotCasting( - String name, - String lhsName, - String rhsName, - BinaryOperator expected, - DataType expectedType, - List lhsSuppliers, - List rhsSuppliers, - List warnings, - boolean symetric - ) { - return forBinaryNotCasting( - name, - lhsName, - rhsName, - (lhs, rhs) -> expected.apply((Number) lhs, (Number) rhs), - expectedType, - lhsSuppliers, - rhsSuppliers, - warnings, - symetric - ); - } - public record NumericTypeTestConfig(Number min, Number max, BinaryOperator expected, String evaluatorName) {} public record NumericTypeTestConfigs( @@ -333,25 +325,25 @@ public static List forBinaryWithWidening( for (DataType rhsType : numericTypes) { DataType expected = widen(lhsType, rhsType); NumericTypeTestConfig expectedTypeStuff = typeStuff.get(expected); - String evaluator = expectedTypeStuff.evaluatorName() + BiFunction evaluatorToString = (lhs, rhs) -> expectedTypeStuff.evaluatorName() + "[" + lhsName + "=" - + getCastEvaluator("Attribute[channel=0]", lhsType, expected) + + getCastEvaluator("Attribute[channel=0]", lhs, expected) + ", " + rhsName + "=" - + getCastEvaluator("Attribute[channel=1]", rhsType, expected) + + getCastEvaluator("Attribute[channel=1]", rhs, expected) + "]"; casesCrossProduct( (l, r) -> expectedTypeStuff.expected().apply((Number) l, (Number) r), getSuppliersForNumericType(lhsType, expectedTypeStuff.min(), expectedTypeStuff.max()), getSuppliersForNumericType(rhsType, expectedTypeStuff.min(), expectedTypeStuff.max()), - // TODO: This doesn't really need to be a function - (lt, rt) -> evaluator, + evaluatorToString, warnings, suppliers, - expected + expected, + true ); } } @@ -367,8 +359,7 @@ public static List forBinaryNotCasting( DataType expectedType, List lhsSuppliers, List rhsSuppliers, - List warnings, - boolean symetric + List warnings ) { List suppliers = new ArrayList<>(); casesCrossProduct( @@ -378,7 +369,8 @@ public static List forBinaryNotCasting( (lhsType, rhsType) -> name + "[" + lhsName + "=Attribute[channel=0], " + rhsName + "=Attribute[channel=1]]", warnings, suppliers, - expectedType + expectedType, + true ); return suppliers; } @@ -934,11 +926,11 @@ public static List timeDurationCases() { ); } - private static List geoPointCases() { + public static List geoPointCases() { return List.of(new TypedDataSupplier("", () -> GEO.pointAsLong(randomGeoPoint()), EsqlDataTypes.GEO_POINT)); } - private static List cartesianPointCases() { + public static List cartesianPointCases() { return List.of( new TypedDataSupplier("", () -> CARTESIAN.pointAsLong(randomCartesianPoint()), EsqlDataTypes.CARTESIAN_POINT) ); @@ -1184,14 +1176,22 @@ public TestCase withWarning(String warning) { * exists because we can't generate random values from the test parameter generation functions, and instead need to return * suppliers which generate the random values at test execution time. */ - public record TypedDataSupplier(String name, Supplier supplier, DataType type) {} + public record TypedDataSupplier(String name, Supplier supplier, DataType type) { + public TypedData get() { + return new TypedData(supplier.get(), type, name); + } + } /** * Holds a data value and the intended parse type of that value * @param data - value to test against * @param type - type of the value, for building expressions + * @param name - a name for the value, used for generating test case names */ public record TypedData(Object data, DataType type, String name) { + + public static final TypedData NULL = new TypedData(null, DataTypes.NULL, ""); + public TypedData(Object data, String name) { this(data, EsqlDataTypes.fromJava(data), name); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java index 838044c8b90f6..90692d5b19df1 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java @@ -12,8 +12,8 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; @@ -88,11 +88,15 @@ protected Expression build(Source source, List args) { public void testEvalCase() { testCase(caseExpr -> { + DriverContext driverContext = driverContext(); + Page page = new Page(driverContext.blockFactory().newConstantIntBlockWith(0, 1)); try ( - EvalOperator.ExpressionEvaluator eval = caseExpr.toEvaluator(child -> evaluator(child)).get(driverContext()); - Block block = eval.eval(new Page(IntBlock.newConstantBlockWith(0, 1))) + EvalOperator.ExpressionEvaluator eval = caseExpr.toEvaluator(child -> evaluator(child)).get(driverContext); + Block block = eval.eval(page) ) { return toJavaObject(block, 0); + } finally { + page.releaseBlocks(); } }); } @@ -148,7 +152,8 @@ public void testCaseWithIncompatibleTypes() { public void testCaseIsLazy() { Case caseExpr = caseExpr(true, 1, true, 2); - try (Block block = caseExpr.toEvaluator(child -> { + DriverContext driveContext = driverContext(); + EvalOperator.ExpressionEvaluator evaluator = caseExpr.toEvaluator(child -> { Object value = child.fold(); if (value != null && value.equals(2)) { return dvrCtx -> new EvalOperator.ExpressionEvaluator() { @@ -163,8 +168,12 @@ public void close() {} }; } return evaluator(child); - }).get(driverContext()).eval(new Page(IntBlock.newConstantBlockWith(0, 1)))) { + }).get(driveContext); + Page page = new Page(driveContext.blockFactory().newConstantIntBlockWith(0, 1)); + try (Block block = evaluator.eval(page)) { assertEquals(1, toJavaObject(block, 0)); + } finally { + page.releaseBlocks(); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPointTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPointTests.java new file mode 100644 index 0000000000000..b0e9a79698f90 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToCartesianPointTests.java @@ -0,0 +1,99 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.convert; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataTypes; +import org.elasticsearch.xpack.ql.util.NumericUtils; + +import java.math.BigInteger; +import java.util.ArrayList; +import java.util.List; +import java.util.function.Function; +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.CARTESIAN; + +public class ToCartesianPointTests extends AbstractFunctionTestCase { + public ToCartesianPointTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + // TODO multivalue fields + final String attribute = "Attribute[channel=0]"; + final Function evaluatorName = s -> "ToCartesianPoint" + s + "Evaluator[field=" + attribute + "]"; + final List suppliers = new ArrayList<>(); + + TestCaseSupplier.forUnaryCartesianPoint(suppliers, attribute, EsqlDataTypes.CARTESIAN_POINT, l -> l, List.of()); + TestCaseSupplier.forUnaryLong( + suppliers, + attribute, + EsqlDataTypes.CARTESIAN_POINT, + l -> l, + Long.MIN_VALUE, + Long.MAX_VALUE, + List.of() + ); + TestCaseSupplier.forUnaryUnsignedLong( + suppliers, + attribute, + EsqlDataTypes.CARTESIAN_POINT, + NumericUtils::asLongUnsigned, + BigInteger.ZERO, + UNSIGNED_LONG_MAX, + List.of() + ); + + // random strings that don't look like a cartesian point + TestCaseSupplier.forUnaryStrings( + suppliers, + evaluatorName.apply("FromString"), + EsqlDataTypes.CARTESIAN_POINT, + bytesRef -> null, + bytesRef -> { + var exception = expectThrows(Exception.class, () -> CARTESIAN.stringAsPoint(bytesRef.utf8ToString())); + return List.of( + "Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", + "Line -1:-1: " + exception + ); + } + ); + // strings that are cartesian point representations + TestCaseSupplier.unary( + suppliers, + evaluatorName.apply("FromString"), + List.of( + new TestCaseSupplier.TypedDataSupplier( + "", + () -> new BytesRef(CARTESIAN.pointAsString(randomCartesianPoint())), + DataTypes.KEYWORD + ) + ), + EsqlDataTypes.CARTESIAN_POINT, + bytesRef -> CARTESIAN.pointAsLong(CARTESIAN.stringAsPoint(((BytesRef) bytesRef).utf8ToString())), + List.of() + ); + + return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + } + + @Override + protected Expression build(Source source, List args) { + return new ToCartesianPoint(source, args.get(0)); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPointTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPointTests.java new file mode 100644 index 0000000000000..6a8198ca12b4c --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToGeoPointTests.java @@ -0,0 +1,91 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.convert; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataTypes; +import org.elasticsearch.xpack.ql.util.NumericUtils; + +import java.math.BigInteger; +import java.util.ArrayList; +import java.util.List; +import java.util.function.Function; +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.ql.util.SpatialCoordinateTypes.GEO; + +public class ToGeoPointTests extends AbstractFunctionTestCase { + public ToGeoPointTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + // TODO multivalue fields + final String attribute = "Attribute[channel=0]"; + final Function evaluatorName = s -> "ToGeoPoint" + s + "Evaluator[field=" + attribute + "]"; + final List suppliers = new ArrayList<>(); + + TestCaseSupplier.forUnaryGeoPoint(suppliers, attribute, EsqlDataTypes.GEO_POINT, l -> l, List.of()); + TestCaseSupplier.forUnaryLong(suppliers, attribute, EsqlDataTypes.GEO_POINT, l -> l, Long.MIN_VALUE, Long.MAX_VALUE, List.of()); + TestCaseSupplier.forUnaryUnsignedLong( + suppliers, + attribute, + EsqlDataTypes.GEO_POINT, + NumericUtils::asLongUnsigned, + BigInteger.ZERO, + UNSIGNED_LONG_MAX, + List.of() + ); + + // random strings that don't look like a geo point + TestCaseSupplier.forUnaryStrings( + suppliers, + evaluatorName.apply("FromString"), + EsqlDataTypes.GEO_POINT, + bytesRef -> null, + bytesRef -> { + var exception = expectThrows(Exception.class, () -> GEO.stringAsPoint(bytesRef.utf8ToString())); + return List.of( + "Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", + "Line -1:-1: " + exception + ); + } + ); + // strings that are geo point representations + TestCaseSupplier.unary( + suppliers, + evaluatorName.apply("FromString"), + List.of( + new TestCaseSupplier.TypedDataSupplier( + "", + () -> new BytesRef(GEO.pointAsString(randomGeoPoint())), + DataTypes.KEYWORD + ) + ), + EsqlDataTypes.GEO_POINT, + bytesRef -> GEO.pointAsLong(GEO.stringAsPoint(((BytesRef) bytesRef).utf8ToString())), + List.of() + ); + + return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); + } + + @Override + protected Expression build(Source source, List args) { + return new ToGeoPoint(source, args.get(0)); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitTests.java index abaa382637882..9854dfbe11460 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitTests.java @@ -12,8 +12,9 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; @@ -84,10 +85,11 @@ protected Expression build(Source source, List args) { } public void testConstantDelimiter() { + DriverContext driverContext = driverContext(); try ( EvalOperator.ExpressionEvaluator eval = evaluator( new Split(Source.EMPTY, field("str", DataTypes.KEYWORD), new Literal(Source.EMPTY, new BytesRef(":"), DataTypes.KEYWORD)) - ).get(driverContext()) + ).get(driverContext) ) { /* * 58 is ascii for : and appears in the toString below. We don't convert the delimiter to a @@ -96,8 +98,12 @@ public void testConstantDelimiter() { */ assert ':' == 58; assertThat(eval.toString(), equalTo("SplitSingleByteEvaluator[str=Attribute[channel=0], delim=58]")); - try (Block block = eval.eval(new Page(BytesRefBlock.newConstantBlockWith(new BytesRef("foo:bar"), 1)))) { + BlockFactory blockFactory = driverContext.blockFactory(); + Page page = new Page(blockFactory.newConstantBytesRefBlockWith(new BytesRef("foo:bar"), 1)); + try (Block block = eval.eval(page)) { assertThat(toJavaObject(block, 0), equalTo(List.of(new BytesRef("foo"), new BytesRef("bar")))); + } finally { + page.releaseBlocks(); } } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/AbstractBinaryOperatorTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/AbstractBinaryOperatorTestCase.java index cc677787c50c6..22c3bb6e515df 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/AbstractBinaryOperatorTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/AbstractBinaryOperatorTestCase.java @@ -22,6 +22,7 @@ import org.elasticsearch.xpack.ql.type.DataTypes; import org.hamcrest.Matcher; +import java.util.Arrays; import java.util.List; import java.util.Locale; @@ -30,6 +31,7 @@ import static org.elasticsearch.xpack.ql.type.DataTypeConverter.commonType; import static org.elasticsearch.xpack.ql.type.DataTypes.isNull; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; @@ -70,9 +72,6 @@ protected Expression build(Source source, List args) { * @return True if the type combination is supported by the respective function. */ protected boolean supportsTypes(DataType lhsType, DataType rhsType) { - if (isNull(lhsType) || isNull(rhsType)) { - return false; - } if ((lhsType == DataTypes.UNSIGNED_LONG || rhsType == DataTypes.UNSIGNED_LONG) && lhsType != rhsType) { // UL can only be operated on together with another UL, so skip non-UL&UL combinations return false; @@ -94,14 +93,16 @@ public final void testApplyToAllTypes() { Source src = new Source(Location.EMPTY, lhsType.typeName() + " " + rhsType.typeName()); if (isRepresentable(lhsType) && isRepresentable(rhsType)) { op = build(src, field("lhs", lhsType), field("rhs", rhsType)); - try (Block block = evaluator(op).get(driverContext()).eval(row(List.of(lhs.value(), rhs.value())))) { + try (Block block = evaluator(op).get(driverContext()).eval(row(Arrays.asList(lhs.value(), rhs.value())))) { result = toJavaObject(block, 0); } } else { op = build(src, lhs, rhs); result = op.fold(); } - if (result == null) { + if (isNull(lhsType) || isNull(rhsType)) { + assertThat(op.toString(), result, is(nullValue())); + } else if (result == null) { assertCriticalWarnings( "Line -1:-1: evaluation of [" + op + "] failed, treating result as null. Only first 20 failures recorded.", "Line -1:-1: java.lang.ArithmeticException: " + commonType(lhsType, rhsType).typeName() + " overflow" diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/BreakerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/BreakerTests.java new file mode 100644 index 0000000000000..a09cb68c893e0 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/BreakerTests.java @@ -0,0 +1,92 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.predicate.operator; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Div; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.Literal; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataTypes; +import org.junit.After; + +import java.util.ArrayList; +import java.util.List; + +import static org.hamcrest.Matchers.equalTo; + +public class BreakerTests extends ESTestCase { + @ParametersFactory + public static Iterable parameters() { + List params = new ArrayList<>(); + + Expression expression = new Div( + Source.synthetic("[1] / (long) 2"), + AbstractFunctionTestCase.field("f", DataTypes.LONG), + new Literal(Source.EMPTY, 2, DataTypes.INTEGER) + ); + for (int b = 0; b < 136; b++) { + params.add(new Object[] { ByteSizeValue.ofBytes(b), expression }); + } + return params; + } + + private final List breakers = new ArrayList<>(); + + private final ByteSizeValue limit; + private final Expression expression; + + public BreakerTests(ByteSizeValue limit, Expression expression) { + this.limit = limit; + this.expression = expression; + } + + public void testBreaker() { + DriverContext unlimited = driverContext(ByteSizeValue.ofGb(1)); + DriverContext context = driverContext(limit); + EvalOperator.ExpressionEvaluator eval = AbstractFunctionTestCase.evaluator(expression).get(context); + try (Block b = unlimited.blockFactory().newConstantNullBlock(1)) { + Exception e = expectThrows(CircuitBreakingException.class, () -> eval.eval(new Page(b))); + assertThat(e.getMessage(), equalTo("over test limit")); + } + } + + /** + * A {@link DriverContext} that won't throw {@link CircuitBreakingException}. + */ + private DriverContext driverContext(ByteSizeValue limit) { + BigArrays bigArrays = new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, limit).withCircuitBreaking(); + CircuitBreaker breaker = bigArrays.breakerService().getBreaker(CircuitBreaker.REQUEST); + breakers.add(breaker); + return new DriverContext(bigArrays, new BlockFactory(breaker, bigArrays)); + } + + @After + public void allBreakersEmpty() throws Exception { + // first check that all big arrays are released, which can affect breakers + MockBigArrays.ensureAllArraysAreReleased(); + + for (CircuitBreaker breaker : breakers) { + assertThat("Unexpected used in breaker: " + breaker, breaker.getUsed(), equalTo(0L)); + } + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AbstractDateTimeArithmeticTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AbstractDateTimeArithmeticTestCase.java index a620a95ea3c0f..bb462dc00463c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AbstractDateTimeArithmeticTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AbstractDateTimeArithmeticTestCase.java @@ -13,15 +13,17 @@ import org.elasticsearch.xpack.ql.type.DataTypes; import org.hamcrest.Matcher; +import java.time.Duration; +import java.time.Period; import java.time.temporal.TemporalAmount; import java.util.List; import java.util.Locale; -import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isDateTimeOrTemporal; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isNullOrTemporalAmount; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isTemporalAmount; import static org.elasticsearch.xpack.ql.type.DataTypes.isDateTime; +import static org.elasticsearch.xpack.ql.type.DataTypes.isNull; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.oneOf; public abstract class AbstractDateTimeArithmeticTestCase extends AbstractArithmeticTestCase { @@ -30,15 +32,30 @@ protected Matcher resultMatcher(List data, DataType dataType) { Object lhs = data.get(0); Object rhs = data.get(1); if (lhs instanceof TemporalAmount || rhs instanceof TemporalAmount) { - TemporalAmount temporal = lhs instanceof TemporalAmount leftTemporal ? leftTemporal : (TemporalAmount) rhs; - long datetime = temporal == lhs ? (Long) rhs : (Long) lhs; - return equalTo(expectedValue(datetime, temporal)); + Object expectedValue; + if (lhs instanceof TemporalAmount && rhs instanceof TemporalAmount) { + assertThat("temporal amounts of different kinds", lhs.getClass(), equalTo(rhs.getClass())); + if (lhs instanceof Period) { + expectedValue = expectedValue((Period) lhs, (Period) rhs); + } else { + expectedValue = expectedValue((Duration) lhs, (Duration) rhs); + } + } else if (lhs instanceof TemporalAmount lhsTemporal) { + expectedValue = expectedValue((long) rhs, lhsTemporal); + } else { // rhs instanceof TemporalAmount + expectedValue = expectedValue((long) lhs, (TemporalAmount) rhs); + } + return equalTo(expectedValue); } return super.resultMatcher(data, dataType); } protected abstract long expectedValue(long datetime, TemporalAmount temporalAmount); + protected abstract Period expectedValue(Period lhs, Period rhs); + + protected abstract Duration expectedValue(Duration lhs, Duration rhs); + @Override protected final boolean supportsType(DataType type) { return EsqlDataTypes.isDateTimeOrTemporal(type) || super.supportsType(type); @@ -46,28 +63,61 @@ protected final boolean supportsType(DataType type) { @Override protected void validateType(BinaryOperator op, DataType lhsType, DataType rhsType) { - if (isDateTime(lhsType) && isTemporalAmount(rhsType) || isTemporalAmount(lhsType) && isDateTime(rhsType)) { - assertTrue(op.toString(), op.typeResolved().resolved()); - assertTrue(op.toString(), isTemporalAmount(lhsType) || isTemporalAmount(rhsType)); - assertFalse(op.toString(), isTemporalAmount(lhsType) && isTemporalAmount(rhsType)); - assertThat(op.toString(), op.dataType(), equalTo(expectedType(lhsType, rhsType))); - assertThat(op.toString(), op.getClass(), oneOf(Add.class, Sub.class)); - } else if (isDateTimeOrTemporal(lhsType) || isDateTimeOrTemporal(rhsType)) { - assertFalse(op.toString(), op.typeResolved().resolved()); - assertThat( - op.toString(), - op.typeResolved().message(), - equalTo( - String.format(Locale.ROOT, "[%s] has arguments with incompatible types [%s] and [%s]", op.symbol(), lhsType, rhsType) - ) - ); + if (isDateTime(lhsType) || isDateTime(rhsType)) { + String failureMessage = null; + if (isDateTime(lhsType) && isDateTime(rhsType) + || isNullOrTemporalAmount(lhsType) == false && isNullOrTemporalAmount(rhsType) == false) { + failureMessage = String.format( + Locale.ROOT, + "[%s] has arguments with incompatible types [%s] and [%s]", + op.symbol(), + lhsType, + rhsType + ); + } else if (op instanceof Sub && isDateTime(rhsType)) { + failureMessage = String.format( + Locale.ROOT, + "[%s] arguments are in unsupported order: cannot subtract a [DATETIME] value [%s] from a [%s] amount [%s]", + op.symbol(), + op.right().sourceText(), + lhsType, + op.left().sourceText() + ); + } + assertTypeResolution(failureMessage, op, lhsType, rhsType); + } else if (isTemporalAmount(lhsType) || isTemporalAmount(rhsType)) { + String failureMessage = isNull(lhsType) || isNull(rhsType) || lhsType == rhsType + ? null + : String.format(Locale.ROOT, "[%s] has arguments with incompatible types [%s] and [%s]", op.symbol(), lhsType, rhsType); + assertTypeResolution(failureMessage, op, lhsType, rhsType); } else { super.validateType(op, lhsType, rhsType); } } + private void assertTypeResolution(String failureMessage, BinaryOperator op, DataType lhsType, DataType rhsType) { + if (failureMessage != null) { + assertFalse(op.toString(), op.typeResolved().resolved()); + assertThat(op.toString(), op.typeResolved().message(), equalTo(failureMessage)); + } else { + assertTrue(op.toString(), op.typeResolved().resolved()); + assertThat(op.toString(), op.dataType(), equalTo(expectedType(lhsType, rhsType))); + } + } + @Override protected DataType expectedType(DataType lhsType, DataType rhsType) { - return isDateTimeOrTemporal(lhsType) ? DataTypes.DATETIME : super.expectedType(lhsType, rhsType); + if (isDateTime(lhsType) || isDateTime(rhsType)) { + return DataTypes.DATETIME; + } else if (isNullOrTemporalAmount(lhsType) || isNullOrTemporalAmount(rhsType)) { + if (isNull(lhsType)) { + return rhsType; + } else if (isNull(rhsType)) { + return lhsType; + } else if (lhsType == rhsType) { + return lhsType; + } // else: UnsupportedOperationException + } + return super.expectedType(lhsType, rhsType); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddTests.java index 91f5a80076626..2280ad9a2b1fe 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddTests.java @@ -28,6 +28,7 @@ import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isDateTimeOrTemporal; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isTemporalAmount; import static org.elasticsearch.xpack.ql.type.DataTypes.isDateTime; +import static org.elasticsearch.xpack.ql.type.DataTypes.isNull; import static org.elasticsearch.xpack.ql.type.DateUtils.asDateTime; import static org.elasticsearch.xpack.ql.type.DateUtils.asMillis; import static org.elasticsearch.xpack.ql.util.NumericUtils.asLongUnsigned; @@ -83,16 +84,41 @@ public static Iterable parameters() { DataTypes.UNSIGNED_LONG, TestCaseSupplier.ulongCases(BigInteger.ZERO, BigInteger.valueOf(Long.MAX_VALUE)), TestCaseSupplier.ulongCases(BigInteger.ZERO, BigInteger.valueOf(Long.MAX_VALUE)), - List.of(), - false + List.of() + ) + ); + + // Datetime, Period/Duration Cases + + suppliers.addAll( + TestCaseSupplier.forBinaryNotCasting( + "No evaluator, the tests only trigger the folding code since Period is not representable", + "lhs", + "rhs", + (lhs, rhs) -> ((Period) lhs).plus((Period) rhs), + EsqlDataTypes.DATE_PERIOD, + TestCaseSupplier.datePeriodCases(), + TestCaseSupplier.datePeriodCases(), + List.of() + ) + ); + suppliers.addAll( + TestCaseSupplier.forBinaryNotCasting( + "No evaluator, the tests only trigger the folding code since Duration is not representable", + "lhs", + "rhs", + (lhs, rhs) -> ((Duration) lhs).plus((Duration) rhs), + EsqlDataTypes.TIME_DURATION, + TestCaseSupplier.timeDurationCases(), + TestCaseSupplier.timeDurationCases(), + List.of() ) ); - // AwaitsFix https://github.com/elastic/elasticsearch/issues/103085 - // After fixing that issue, please move this line to below where the date cases are generated + // Datetime tests are split in two, depending on their permissiveness of null-injection, which cannot happen "automatically" for + // Datetime + Period/Duration, since the expression will take the non-null arg's type. suppliers = anyNullIsNull(true, suppliers); - // Datetime Cases suppliers.addAll( TestCaseSupplier.forBinaryNotCasting( // TODO: There is an evaluator for Datetime + Period, so it should be tested. Similarly below. @@ -115,26 +141,12 @@ public static Iterable parameters() { DataTypes.DATETIME, TestCaseSupplier.dateCases(), TestCaseSupplier.datePeriodCases(), - List.of(), - true - ) - ); - suppliers.addAll( - TestCaseSupplier.forBinaryNotCasting( - "No evaluator, the tests only trigger the folding code since Period is not representable", - "lhs", - "rhs", - (lhs, rhs) -> ((Period) lhs).plus((Period) rhs), - EsqlDataTypes.DATE_PERIOD, - TestCaseSupplier.datePeriodCases(), - TestCaseSupplier.datePeriodCases(), - List.of(), - false + List.of() ) ); suppliers.addAll( TestCaseSupplier.forBinaryNotCasting( - // TODO: There is an evaluator for Datetime + Duration, so it should be tested. Similarly below. + // TODO: There is an evaluator for Datetime + Duration, so it should be tested. Similarly above. "No evaluator, the tests only trigger the folding code since Duration is not representable", "lhs", "rhs", @@ -154,23 +166,33 @@ public static Iterable parameters() { DataTypes.DATETIME, TestCaseSupplier.dateCases(), TestCaseSupplier.timeDurationCases(), - List.of(), - true - ) - ); - suppliers.addAll( - TestCaseSupplier.forBinaryNotCasting( - "No evaluator, the tests only trigger the folding code since Duration is not representable", - "lhs", - "rhs", - (lhs, rhs) -> ((Duration) lhs).plus((Duration) rhs), - EsqlDataTypes.TIME_DURATION, - TestCaseSupplier.timeDurationCases(), - TestCaseSupplier.timeDurationCases(), - List.of(), - false + List.of() ) ); + suppliers.addAll(TestCaseSupplier.dateCases().stream().mapMulti((tds, consumer) -> { + consumer.accept( + new TestCaseSupplier( + List.of(DataTypes.DATETIME, DataTypes.NULL), + () -> new TestCaseSupplier.TestCase( + List.of(tds.get(), TestCaseSupplier.TypedData.NULL), + "LiteralsEvaluator[lit=null]", + DataTypes.DATETIME, + nullValue() + ) + ) + ); + consumer.accept( + new TestCaseSupplier( + List.of(DataTypes.NULL, DataTypes.DATETIME), + () -> new TestCaseSupplier.TestCase( + List.of(TestCaseSupplier.TypedData.NULL, tds.get()), + "LiteralsEvaluator[lit=null]", + DataTypes.DATETIME, + nullValue() + ) + ) + ); + }).toList()); // Cases that should generate warnings suppliers.addAll(List.of(new TestCaseSupplier("MV", () -> { @@ -196,7 +218,11 @@ public static Iterable parameters() { @Override protected boolean supportsTypes(DataType lhsType, DataType rhsType) { if (isDateTimeOrTemporal(lhsType) || isDateTimeOrTemporal(rhsType)) { - return isDateTime(lhsType) && isTemporalAmount(rhsType) || isTemporalAmount(lhsType) && isDateTime(rhsType); + return isNull(lhsType) + || isNull(rhsType) + || isDateTime(lhsType) && isTemporalAmount(rhsType) + || isTemporalAmount(lhsType) && isDateTime(rhsType) + || isTemporalAmount(lhsType) && isTemporalAmount(rhsType) && lhsType == rhsType; } return super.supportsTypes(lhsType, rhsType); } @@ -232,4 +258,14 @@ protected long expectedUnsignedLongValue(long lhs, long rhs) { protected long expectedValue(long datetime, TemporalAmount temporalAmount) { return asMillis(asDateTime(datetime).plus(temporalAmount)); } + + @Override + protected Period expectedValue(Period lhs, Period rhs) { + return lhs.plus(rhs); + } + + @Override + protected Duration expectedValue(Duration lhs, Duration rhs) { + return lhs.plus(rhs); + } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubTests.java index db924d0d68c53..b2f54e4d2400c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubTests.java @@ -27,6 +27,7 @@ import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isDateTimeOrTemporal; import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isTemporalAmount; import static org.elasticsearch.xpack.ql.type.DataTypes.isDateTime; +import static org.elasticsearch.xpack.ql.type.DataTypes.isNull; import static org.elasticsearch.xpack.ql.type.DateUtils.asDateTime; import static org.elasticsearch.xpack.ql.type.DateUtils.asMillis; import static org.elasticsearch.xpack.ql.util.NumericUtils.asLongUnsigned; @@ -162,9 +163,13 @@ public static Iterable parameters() { @Override protected boolean supportsTypes(DataType lhsType, DataType rhsType) { - return isDateTimeOrTemporal(lhsType) || isDateTimeOrTemporal(rhsType) - ? isDateTime(lhsType) && isTemporalAmount(rhsType) - : super.supportsTypes(lhsType, rhsType); + if (isDateTimeOrTemporal(lhsType) || isDateTimeOrTemporal(rhsType)) { + return isNull(lhsType) + || isNull(rhsType) + || isDateTime(lhsType) && isTemporalAmount(rhsType) + || isTemporalAmount(lhsType) && isTemporalAmount(rhsType) && lhsType == rhsType; + } + return super.supportsTypes(lhsType, rhsType); } @Override @@ -198,4 +203,14 @@ protected long expectedUnsignedLongValue(long lhs, long rhs) { protected long expectedValue(long datetime, TemporalAmount temporalAmount) { return asMillis(asDateTime(datetime).minus(temporalAmount)); } + + @Override + protected Period expectedValue(Period lhs, Period rhs) { + return lhs.minus(rhs); + } + + @Override + protected Duration expectedValue(Duration lhs, Duration rhs) { + return lhs.minus(rhs); + } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatTests.java index 9430e984039fe..545a2893270b7 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatTests.java @@ -8,14 +8,13 @@ package org.elasticsearch.xpack.esql.formatter; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.compute.data.BytesRefBlock; -import org.elasticsearch.compute.data.IntArrayVector; -import org.elasticsearch.compute.data.LongArrayVector; +import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.Page; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.xcontent.NamedXContentRegistry; +import org.elasticsearch.xpack.esql.TestBlockFactory; import org.elasticsearch.xpack.esql.action.ColumnInfo; import org.elasticsearch.xpack.esql.action.EsqlQueryResponse; import org.elasticsearch.xpack.ql.util.StringUtils; @@ -39,6 +38,8 @@ public class TextFormatTests extends ESTestCase { + static final BlockFactory blockFactory = TestBlockFactory.getNonBreakingInstance(); + public void testCsvContentType() { assertEquals("text/csv; charset=utf-8; header=present", CSV.contentType(req())); } @@ -231,15 +232,16 @@ public void testPlainTextEmptyCursorWithColumns() { public void testPlainTextEmptyCursorWithoutColumns() { assertEquals( StringUtils.EMPTY, - getTextBodyContent(PLAIN_TEXT.format(req(), new EsqlQueryResponse(emptyList(), emptyList(), null, false))) + getTextBodyContent(PLAIN_TEXT.format(req(), new EsqlQueryResponse(emptyList(), emptyList(), null, false, false))) ); } private static EsqlQueryResponse emptyData() { - return new EsqlQueryResponse(singletonList(new ColumnInfo("name", "keyword")), emptyList(), null, false); + return new EsqlQueryResponse(singletonList(new ColumnInfo("name", "keyword")), emptyList(), null, false, false); } private static EsqlQueryResponse regularData() { + BlockFactory blockFactory = TestBlockFactory.getNonBreakingInstance(); // headers List headers = asList( new ColumnInfo("string", "keyword"), @@ -250,16 +252,16 @@ private static EsqlQueryResponse regularData() { // values List values = List.of( new Page( - BytesRefBlock.newBlockBuilder(2) + blockFactory.newBytesRefBlockBuilder(2) .appendBytesRef(new BytesRef("Along The River Bank")) .appendBytesRef(new BytesRef("Mind Train")) .build(), - new IntArrayVector(new int[] { 11 * 60 + 48, 4 * 60 + 40 }, 2).asBlock(), - new LongArrayVector(new long[] { GEO.pointAsLong(12, 56), GEO.pointAsLong(-97, 26) }, 2).asBlock() + blockFactory.newIntArrayVector(new int[] { 11 * 60 + 48, 4 * 60 + 40 }, 2).asBlock(), + blockFactory.newLongArrayVector(new long[] { GEO.pointAsLong(12, 56), GEO.pointAsLong(-97, 26) }, 2).asBlock() ) ); - return new EsqlQueryResponse(headers, values, null, false); + return new EsqlQueryResponse(headers, values, null, false, false); } private static EsqlQueryResponse escapedData() { @@ -269,15 +271,18 @@ private static EsqlQueryResponse escapedData() { // values List values = List.of( new Page( - BytesRefBlock.newBlockBuilder(2).appendBytesRef(new BytesRef("normal")).appendBytesRef(new BytesRef("commas")).build(), - BytesRefBlock.newBlockBuilder(2) + blockFactory.newBytesRefBlockBuilder(2) + .appendBytesRef(new BytesRef("normal")) + .appendBytesRef(new BytesRef("commas")) + .build(), + blockFactory.newBytesRefBlockBuilder(2) .appendBytesRef(new BytesRef("\"quo\"ted\",\n")) .appendBytesRef(new BytesRef("a,b,c,\n,d,e,\t\n")) .build() ) ); - return new EsqlQueryResponse(headers, values, null, false); + return new EsqlQueryResponse(headers, values, null, false, false); } private static RestRequest req() { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatterTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatterTests.java index 22e532341d30b..2ad9449f12199 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatterTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatterTests.java @@ -9,12 +9,10 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Strings; -import org.elasticsearch.compute.data.Block; -import org.elasticsearch.compute.data.BytesRefBlock; -import org.elasticsearch.compute.data.DoubleArrayVector; -import org.elasticsearch.compute.data.LongArrayVector; +import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.Page; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.TestBlockFactory; import org.elasticsearch.xpack.esql.action.ColumnInfo; import org.elasticsearch.xpack.esql.action.EsqlQueryResponse; @@ -27,6 +25,9 @@ import static org.hamcrest.Matchers.arrayWithSize; public class TextFormatterTests extends ESTestCase { + + static BlockFactory blockFactory = TestBlockFactory.getNonBreakingInstance(); + private final List columns = Arrays.asList( new ColumnInfo("foo", "keyword"), new ColumnInfo("bar", "long"), @@ -42,26 +43,27 @@ public class TextFormatterTests extends ESTestCase { columns, List.of( new Page( - BytesRefBlock.newBlockBuilder(2) + blockFactory.newBytesRefBlockBuilder(2) .appendBytesRef(new BytesRef("15charwidedata!")) .appendBytesRef(new BytesRef("dog")) .build(), - new LongArrayVector(new long[] { 1, 2 }, 2).asBlock(), - new DoubleArrayVector(new double[] { 6.888, 123124.888 }, 2).asBlock(), - Block.constantNullBlock(2), - new DoubleArrayVector(new double[] { 12, 9912 }, 2).asBlock(), - BytesRefBlock.newBlockBuilder(2).appendBytesRef(new BytesRef("rabbit")).appendBytesRef(new BytesRef("goat")).build(), - new LongArrayVector( + blockFactory.newLongArrayVector(new long[] { 1, 2 }, 2).asBlock(), + blockFactory.newDoubleArrayVector(new double[] { 6.888, 123124.888 }, 2).asBlock(), + blockFactory.newConstantNullBlock(2), + blockFactory.newDoubleArrayVector(new double[] { 12, 9912 }, 2).asBlock(), + blockFactory.newBytesRefBlockBuilder(2).appendBytesRef(new BytesRef("rabbit")).appendBytesRef(new BytesRef("goat")).build(), + blockFactory.newLongArrayVector( new long[] { UTC_DATE_TIME_FORMATTER.parseMillis("1953-09-02T00:00:00.000Z"), UTC_DATE_TIME_FORMATTER.parseMillis("2000-03-15T21:34:37.443Z") }, 2 ).asBlock(), - new LongArrayVector(new long[] { GEO.pointAsLong(12, 56), GEO.pointAsLong(-97, 26) }, 2).asBlock(), - Block.constantNullBlock(2) + blockFactory.newLongArrayVector(new long[] { GEO.pointAsLong(12, 56), GEO.pointAsLong(-97, 26) }, 2).asBlock(), + blockFactory.newConstantNullBlock(2) ) ), null, + randomBoolean(), randomBoolean() ); @@ -108,23 +110,30 @@ public void testFormatWithoutHeader() { columns, List.of( new Page( - BytesRefBlock.newBlockBuilder(2).appendBytesRef(new BytesRef("doggie")).appendBytesRef(new BytesRef("dog")).build(), - new LongArrayVector(new long[] { 4, 2 }, 2).asBlock(), - new DoubleArrayVector(new double[] { 1, 123124.888 }, 2).asBlock(), - Block.constantNullBlock(2), - new DoubleArrayVector(new double[] { 77.0, 9912.0 }, 2).asBlock(), - BytesRefBlock.newBlockBuilder(2).appendBytesRef(new BytesRef("wombat")).appendBytesRef(new BytesRef("goat")).build(), - new LongArrayVector( + blockFactory.newBytesRefBlockBuilder(2) + .appendBytesRef(new BytesRef("doggie")) + .appendBytesRef(new BytesRef("dog")) + .build(), + blockFactory.newLongArrayVector(new long[] { 4, 2 }, 2).asBlock(), + blockFactory.newDoubleArrayVector(new double[] { 1, 123124.888 }, 2).asBlock(), + blockFactory.newConstantNullBlock(2), + blockFactory.newDoubleArrayVector(new double[] { 77.0, 9912.0 }, 2).asBlock(), + blockFactory.newBytesRefBlockBuilder(2) + .appendBytesRef(new BytesRef("wombat")) + .appendBytesRef(new BytesRef("goat")) + .build(), + blockFactory.newLongArrayVector( new long[] { UTC_DATE_TIME_FORMATTER.parseMillis("1955-01-21T01:02:03.342Z"), UTC_DATE_TIME_FORMATTER.parseMillis("2231-12-31T23:59:59.999Z") }, 2 ).asBlock(), - new LongArrayVector(new long[] { GEO.pointAsLong(12, 56), GEO.pointAsLong(-97, 26) }, 2).asBlock(), - Block.constantNullBlock(2) + blockFactory.newLongArrayVector(new long[] { GEO.pointAsLong(12, 56), GEO.pointAsLong(-97, 26) }, 2).asBlock(), + blockFactory.newConstantNullBlock(2) ) ), null, + randomBoolean(), randomBoolean() ); @@ -157,13 +166,14 @@ public void testVeryLongPadding() { List.of(new ColumnInfo("foo", "keyword")), List.of( new Page( - BytesRefBlock.newBlockBuilder(2) + blockFactory.newBytesRefBlockBuilder(2) .appendBytesRef(new BytesRef(smallFieldContent)) .appendBytesRef(new BytesRef(largeFieldContent)) .build() ) ), null, + randomBoolean(), randomBoolean() ) ).format(false) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutputTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutputTests.java new file mode 100644 index 0000000000000..7f683e8f8003b --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamOutputTests.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.io.stream; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.TransportVersionUtils; + +import static org.hamcrest.Matchers.equalTo; + +public class PlanStreamOutputTests extends ESTestCase { + + public void testTransportVersion() { + BytesStreamOutput out = new BytesStreamOutput(); + TransportVersion v1 = TransportVersionUtils.randomCompatibleVersion(random()); + out.setTransportVersion(v1); + PlanStreamOutput planOut = new PlanStreamOutput(out, PlanNameRegistry.INSTANCE); + assertThat(planOut.getTransportVersion(), equalTo(v1)); + TransportVersion v2 = TransportVersionUtils.randomCompatibleVersion(random()); + planOut.setTransportVersion(v2); + assertThat(planOut.getTransportVersion(), equalTo(v2)); + assertThat(out.getTransportVersion(), equalTo(v2)); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java index 12b8185cbec5d..5887d61c652bb 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizerTests.java @@ -258,9 +258,11 @@ public void testCountOneFieldWithFilter() { assertThat(esStatsQuery.limit(), is(nullValue())); assertThat(Expressions.names(esStatsQuery.output()), contains("count", "seen")); var stat = as(esStatsQuery.stats().get(0), Stat.class); - assertThat(stat.query(), is(QueryBuilders.existsQuery("salary"))); - var source = ((SingleValueQuery.Builder) esStatsQuery.query()).source(); - var expected = wrapWithSingleQuery(QueryBuilders.rangeQuery("salary").gt(1000), "salary", source); + Source source = new Source(2, 8, "salary > 1000"); + var exists = QueryBuilders.existsQuery("salary"); + assertThat(stat.query(), is(exists)); + var range = wrapWithSingleQuery(QueryBuilders.rangeQuery("salary").gt(1000), "salary", source); + var expected = QueryBuilders.boolQuery().must(range).must(exists); assertThat(expected.toString(), is(esStatsQuery.query().toString())); } @@ -381,6 +383,28 @@ public boolean exists(String field) { assertThat(Expressions.names(localSource.output()), contains("count", "seen")); } + public void testIsNotNullPushdownFilter() { + var plan = plan("from test | where emp_no is not null"); + + var limit = as(plan, LimitExec.class); + var exchange = as(limit.child(), ExchangeExec.class); + var query = as(exchange.child(), EsQueryExec.class); + assertThat(query.limit().fold(), is(500)); + var expected = QueryBuilders.existsQuery("emp_no"); + assertThat(query.query().toString(), is(expected.toString())); + } + + public void testIsNullPushdownFilter() { + var plan = plan("from test | where emp_no is null"); + + var limit = as(plan, LimitExec.class); + var exchange = as(limit.child(), ExchangeExec.class); + var query = as(exchange.child(), EsQueryExec.class); + assertThat(query.limit().fold(), is(500)); + var expected = QueryBuilders.boolQuery().mustNot(QueryBuilders.existsQuery("emp_no")); + assertThat(query.query().toString(), is(expected.toString())); + } + private QueryBuilder wrapWithSingleQuery(QueryBuilder inner, String fieldName, Source source) { return FilterTests.singleValueQuery(inner, fieldName, source); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index 352dccc046588..6320294d7ee54 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -9,9 +9,9 @@ import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.compute.aggregation.QuantileStates; -import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.EsqlTestUtils; +import org.elasticsearch.xpack.esql.TestBlockFactory; import org.elasticsearch.xpack.esql.analysis.Analyzer; import org.elasticsearch.xpack.esql.analysis.AnalyzerContext; import org.elasticsearch.xpack.esql.analysis.AnalyzerTestUtils; @@ -261,6 +261,62 @@ public void testCombineProjectionWithPruning() { var from = as(agg.child(), EsRelation.class); } + /** + * Expects + * Limit[500[INTEGER]] + * \_Aggregate[[f{r}#7],[SUM(emp_no{f}#15) AS s, COUNT(first_name{f}#16) AS c, first_name{f}#16 AS f]] + * \_EsRelation[test][_meta_field{f}#21, emp_no{f}#15, first_name{f}#16, ..] + */ + public void testCombineProjectionWithAggregationFirstAndAliasedGroupingUsedInAgg() { + var plan = plan(""" + from test + | rename emp_no as e, first_name as f + | stats s = sum(e), c = count(f) by f + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + var aggs = agg.aggregates(); + assertThat(Expressions.names(aggs), contains("s", "c", "f")); + Alias as = as(aggs.get(0), Alias.class); + var sum = as(as.child(), Sum.class); + assertThat(Expressions.name(sum.field()), is("emp_no")); + as = as(aggs.get(1), Alias.class); + var count = as(as.child(), Count.class); + assertThat(Expressions.name(count.field()), is("first_name")); + + as = as(aggs.get(2), Alias.class); + assertThat(Expressions.name(as.child()), is("first_name")); + + assertThat(Expressions.names(agg.groupings()), contains("f")); + } + + /** + * Expects + * Limit[500[INTEGER]] + * \_Aggregate[[f{r}#7],[SUM(emp_no{f}#15) AS s, first_name{f}#16 AS f]] + * \_EsRelation[test][_meta_field{f}#21, emp_no{f}#15, first_name{f}#16, ..] + */ + public void testCombineProjectionWithAggregationFirstAndAliasedGroupingUnused() { + var plan = plan(""" + from test + | rename emp_no as e, first_name as f, last_name as l + | stats s = sum(e) by f + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + var aggs = agg.aggregates(); + assertThat(Expressions.names(aggs), contains("s", "f")); + Alias as = as(aggs.get(0), Alias.class); + var aggFunc = as(as.child(), AggregateFunction.class); + assertThat(Expressions.name(aggFunc.field()), is("emp_no")); + as = as(aggs.get(1), Alias.class); + assertThat(Expressions.name(as.child()), is("first_name")); + + assertThat(Expressions.names(agg.groupings()), contains("f")); + } + /** * Expects * EsqlProject[[x{r}#3, y{r}#6]] @@ -301,7 +357,7 @@ public void testMultipleCombineLimits() { var limitWithMinimum = randomIntBetween(0, numberOfLimits - 1); var fa = getFieldAttribute("a", INTEGER); - var relation = localSource(BlockFactory.getNonBreakingInstance(), singletonList(fa), singletonList(1)); + var relation = localSource(TestBlockFactory.getNonBreakingInstance(), singletonList(fa), singletonList(1)); LogicalPlan plan = relation; for (int i = 0; i < numberOfLimits; i++) { @@ -2534,6 +2590,184 @@ private void aggFieldName(Expression exp, Class assertThat(name, is(fieldName)); } + /** + * Expects + * Limit[500[INTEGER]] + * \_Aggregate[[],[SUM(emp_no{f}#4) AS sum(emp_no)]] + * \_EsRelation[test][_meta_field{f}#10, emp_no{f}#4, first_name{f}#5, ge..] + */ + public void testIsNotNullConstraintForStatsWithoutGrouping() { + var plan = optimizedPlan(""" + from test + | stats sum(emp_no) + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(Expressions.names(agg.groupings()), is(empty())); + assertThat(Expressions.names(agg.aggregates()), contains("sum(emp_no)")); + var from = as(agg.child(), EsRelation.class); + } + + public void testIsNotNullConstraintForStatsWithGrouping() { + var plan = optimizedPlan(""" + from test + | stats sum(emp_no) by salary + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(Expressions.names(agg.groupings()), contains("salary")); + assertThat(Expressions.names(agg.aggregates()), contains("sum(emp_no)", "salary")); + var from = as(agg.child(), EsRelation.class); + } + + /** + * Expected + * Limit[500[INTEGER]] + * \_Aggregate[[salary{f}#1185],[SUM(salary{f}#1185) AS sum(salary), salary{f}#1185]] + * \_EsRelation[test][_meta_field{f}#1186, emp_no{f}#1180, first_name{f}#..] + */ + public void testIsNotNullConstraintForStatsWithAndOnGrouping() { + var plan = optimizedPlan(""" + from test + | stats sum(salary) by salary + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(Expressions.names(agg.groupings()), contains("salary")); + assertThat(Expressions.names(agg.aggregates()), contains("sum(salary)", "salary")); + var from = as(agg.child(), EsRelation.class); + } + + /** + * Expects + * Limit[500[INTEGER]] + * \_Aggregate[[x{r}#4],[SUM(salary{f}#13) AS sum(salary), salary{f}#13 AS x]] + * \_EsRelation[test][_meta_field{f}#14, emp_no{f}#8, first_name{f}#9, ge..] + */ + public void testIsNotNullConstraintForStatsWithAndOnGroupingAlias() { + var plan = optimizedPlan(""" + from test + | eval x = salary + | stats sum(salary) by x + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(Expressions.names(agg.groupings()), contains("x")); + assertThat(Expressions.names(agg.aggregates()), contains("sum(salary)", "x")); + var from = as(agg.child(), EsRelation.class); + } + + /** + * Expects + * Limit[500[INTEGER]] + * \_Aggregate[[salary{f}#13],[SUM(emp_no{f}#8) AS sum(x), salary{f}#13]] + * \_EsRelation[test][_meta_field{f}#14, emp_no{f}#8, first_name{f}#9, ge..] + */ + public void testIsNotNullConstraintSkippedForStatsWithAlias() { + var plan = optimizedPlan(""" + from test + | eval x = emp_no + | stats sum(x) by salary + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(Expressions.names(agg.groupings()), contains("salary")); + assertThat(Expressions.names(agg.aggregates()), contains("sum(x)", "salary")); + + // non null filter for stats + var from = as(agg.child(), EsRelation.class); + } + + /** + * Expects + * Limit[500[INTEGER]] + * \_Aggregate[[],[SUM(emp_no{f}#8) AS a, MIN(salary{f}#13) AS b]] + * \_EsRelation[test][_meta_field{f}#14, emp_no{f}#8, first_name{f}#9, ge..] + */ + public void testIsNotNullConstraintForStatsWithMultiAggWithoutGrouping() { + var plan = optimizedPlan(""" + from test + | stats a = sum(emp_no), b = min(salary) + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(Expressions.names(agg.aggregates()), contains("a", "b")); + + var from = as(agg.child(), EsRelation.class); + } + + /** + * Expects + * Limit[500[INTEGER]] + * \_Aggregate[[gender{f}#11],[SUM(emp_no{f}#9) AS a, MIN(salary{f}#14) AS b, gender{f}#11]] + * \_EsRelation[test][_meta_field{f}#15, emp_no{f}#9, first_name{f}#10, g..] + */ + public void testIsNotNullConstraintForStatsWithMultiAggWithGrouping() { + var plan = optimizedPlan(""" + from test + | stats a = sum(emp_no), b = min(salary) by gender + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(Expressions.names(agg.aggregates()), contains("a", "b", "gender")); + + var from = as(agg.child(), EsRelation.class); + } + + /** + * Expects + * Limit[500[INTEGER]] + * \_Aggregate[[emp_no{f}#9],[SUM(emp_no{f}#9) AS a, MIN(salary{f}#14) AS b, emp_no{f}#9]] + * \_EsRelation[test][_meta_field{f}#15, emp_no{f}#9, first_name{f}#10, g..] + */ + public void testIsNotNullConstraintForStatsWithMultiAggWithAndOnGrouping() { + var plan = optimizedPlan(""" + from test + | stats a = sum(emp_no), b = min(salary) by emp_no + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(Expressions.names(agg.aggregates()), contains("a", "b", "emp_no")); + + var from = as(agg.child(), EsRelation.class); + } + + /** + * Expects + * Limit[500[INTEGER]] + * \_Aggregate[[w{r}#14, g{r}#16],[COUNT(b{r}#24) AS c, w{r}#14, gender{f}#32 AS g]] + * \_Eval[[emp_no{f}#30 / 10[INTEGER] AS x, x{r}#4 + salary{f}#35 AS y, y{r}#8 / 4[INTEGER] AS z, z{r}#11 * 2[INTEGER] + + * 3[INTEGER] AS w, salary{f}#35 + 4[INTEGER] / 2[INTEGER] AS a, a{r}#21 + 3[INTEGER] AS b]] + * \_EsRelation[test][_meta_field{f}#36, emp_no{f}#30, first_name{f}#31, ..] + */ + public void testIsNotNullConstraintForAliasedExpressions() { + var plan = optimizedPlan(""" + from test + | eval x = emp_no / 10 + | eval y = x + salary + | eval z = y / 4 + | eval w = z * 2 + 3 + | rename gender as g, salary as s + | eval a = (s + 4) / 2 + | eval b = a + 3 + | stats c = count(b) by w, g + """); + + var limit = as(plan, Limit.class); + var agg = as(limit.child(), Aggregate.class); + assertThat(Expressions.names(agg.aggregates()), contains("c", "w", "g")); + var eval = as(agg.child(), Eval.class); + var from = as(eval.child(), EsRelation.class); + } + private LogicalPlan optimizedPlan(String query) { return plan(query); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java index 1f2bde2526fab..c05e11d8d8a13 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java @@ -84,6 +84,8 @@ import static java.util.Arrays.asList; import static org.elasticsearch.core.Tuple.tuple; +import static org.elasticsearch.index.query.QueryBuilders.boolQuery; +import static org.elasticsearch.index.query.QueryBuilders.existsQuery; import static org.elasticsearch.xpack.esql.EsqlTestUtils.TEST_VERIFIER; import static org.elasticsearch.xpack.esql.EsqlTestUtils.as; import static org.elasticsearch.xpack.esql.EsqlTestUtils.configuration; @@ -104,7 +106,7 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; -//@TestLogging(value = "org.elasticsearch.xpack.esql:TRACE", reason = "debug") +// @TestLogging(value = "org.elasticsearch.xpack.esql:TRACE", reason = "debug") public class PhysicalPlanOptimizerTests extends ESTestCase { private static final String PARAM_FORMATTING = "%1$s"; @@ -510,6 +512,16 @@ public void testExtractGroupingFieldsIfAggdWithEval() { assertThat(source.estimatedRowSize(), equalTo(Integer.BYTES + KEYWORD_EST)); } + /** + * Expects + * EvalExec[[agg_emp{r}#4 + 7[INTEGER] AS x]] + * \_LimitExec[500[INTEGER]] + * \_AggregateExec[[],[SUM(emp_no{f}#8) AS agg_emp],FINAL,16] + * \_ExchangeExec[[sum{r}#18, seen{r}#19],true] + * \_AggregateExec[[],[SUM(emp_no{f}#8) AS agg_emp],PARTIAL,8] + * \_FieldExtractExec[emp_no{f}#8] + * \_EsQueryExec[test], query[{"exists":{"field":"emp_no","boost":1.0}}][_doc{f}#34], limit[], sort[] estimatedRowSize[8] + */ public void testQueryWithAggregation() { var plan = physicalPlan(""" from test @@ -526,8 +538,22 @@ public void testQueryWithAggregation() { var extract = as(aggregate.child(), FieldExtractExec.class); assertThat(names(extract.attributesToExtract()), contains("emp_no")); assertThat(aggregate.estimatedRowSize(), equalTo(Long.BYTES)); + + var query = source(extract.child()); + assertThat(query.estimatedRowSize(), equalTo(Integer.BYTES * 2 /* for doc id, emp_no*/)); + assertThat(query.query(), is(existsQuery("emp_no"))); } + /** + * Expects + * EvalExec[[agg_emp{r}#4 + 7[INTEGER] AS x]] + * \_LimitExec[500[INTEGER]] + * \_AggregateExec[[],[SUM(emp_no{f}#8) AS agg_emp],FINAL,16] + * \_ExchangeExec[[sum{r}#18, seen{r}#19],true] + * \_AggregateExec[[],[SUM(emp_no{f}#8) AS agg_emp],PARTIAL,8] + * \_FieldExtractExec[emp_no{f}#8] + * \_EsQueryExec[test], query[{"exists":{"field":"emp_no","boost":1.0}}][_doc{f}#34], limit[], sort[] estimatedRowSize[8] + */ public void testQueryWithAggAfterEval() { var plan = physicalPlan(""" from test @@ -543,10 +569,35 @@ public void testQueryWithAggAfterEval() { assertThat(agg.estimatedRowSize(), equalTo(Long.BYTES * 2)); var exchange = asRemoteExchange(agg.child()); var aggregate = as(exchange.child(), AggregateExec.class); - // sum is long a long, x isn't calculated until the agg above + // sum is long, x isn't calculated until the agg above assertThat(aggregate.estimatedRowSize(), equalTo(Long.BYTES)); var extract = as(aggregate.child(), FieldExtractExec.class); assertThat(names(extract.attributesToExtract()), contains("emp_no")); + + var query = source(extract.child()); + assertThat(query.estimatedRowSize(), equalTo(Integer.BYTES * 2 /* for doc id, emp_no*/)); + assertThat(query.query(), is(existsQuery("emp_no"))); + } + + public void testQueryForStatWithMultiAgg() { + var plan = physicalPlan(""" + from test + | stats agg_1 = sum(emp_no), agg_2 = min(salary) + """); + + var stats = statsWithIndexedFields("emp_no", "salary"); + var optimized = optimizedPlan(plan, stats); + var topLimit = as(optimized, LimitExec.class); + var agg = as(topLimit.child(), AggregateExec.class); + var exchange = asRemoteExchange(agg.child()); + var aggregate = as(exchange.child(), AggregateExec.class); + // sum is long, x isn't calculated until the agg above + var extract = as(aggregate.child(), FieldExtractExec.class); + assertThat(names(extract.attributesToExtract()), contains("emp_no", "salary")); + + var query = source(extract.child()); + assertThat(query.estimatedRowSize(), equalTo(Integer.BYTES * 3 /* for doc id, emp_no, salary*/)); + assertThat(query.query(), is(boolQuery().should(existsQuery("emp_no")).should(existsQuery("salary")))); } public void testQueryWithNull() { @@ -1337,8 +1388,9 @@ public void testPushDownLike() { QueryBuilder query = source.query(); assertNotNull(query); - assertEquals(WildcardQueryBuilder.class, query.getClass()); - WildcardQueryBuilder wildcard = ((WildcardQueryBuilder) query); + assertEquals(SingleValueQuery.Builder.class, query.getClass()); + assertThat(((SingleValueQuery.Builder) query).next(), instanceOf(WildcardQueryBuilder.class)); + WildcardQueryBuilder wildcard = ((WildcardQueryBuilder) ((SingleValueQuery.Builder) query).next()); assertEquals("first_name", wildcard.fieldName()); assertEquals("*foo*", wildcard.value()); } @@ -1402,8 +1454,9 @@ public void testPushDownRLike() { QueryBuilder query = source.query(); assertNotNull(query); - assertEquals(RegexpQueryBuilder.class, query.getClass()); - RegexpQueryBuilder wildcard = ((RegexpQueryBuilder) query); + assertEquals(SingleValueQuery.Builder.class, query.getClass()); + assertThat(((SingleValueQuery.Builder) query).next(), instanceOf(RegexpQueryBuilder.class)); + RegexpQueryBuilder wildcard = ((RegexpQueryBuilder) ((SingleValueQuery.Builder) query).next()); assertEquals("first_name", wildcard.fieldName()); assertEquals(".*foo.*", wildcard.value()); } @@ -1424,8 +1477,9 @@ public void testPushDownNotRLike() { QueryBuilder query = source.query(); assertNotNull(query); - assertThat(query, instanceOf(BoolQueryBuilder.class)); - var boolQuery = (BoolQueryBuilder) query; + assertThat(query, instanceOf(SingleValueQuery.Builder.class)); + assertThat(((SingleValueQuery.Builder) query).next(), instanceOf(BoolQueryBuilder.class)); + var boolQuery = (BoolQueryBuilder) ((SingleValueQuery.Builder) query).next(); List mustNot = boolQuery.mustNot(); assertThat(mustNot.size(), is(1)); assertThat(mustNot.get(0), instanceOf(RegexpQueryBuilder.class)); @@ -1892,6 +1946,110 @@ public boolean exists(String field) { assertThat(Expressions.names(localSourceExec.output()), contains("languages", "min", "seen")); } + /** + * Expects + * intermediate plan + * LimitExec[500[INTEGER]] + * \_AggregateExec[[],[COUNT(emp_no{f}#6) AS c],FINAL,null] + * \_ExchangeExec[[count{r}#16, seen{r}#17],true] + * \_FragmentExec[filter=null, estimatedRowSize=0, fragment=[ + * Aggregate[[],[COUNT(emp_no{f}#6) AS c]] + * \_Filter[emp_no{f}#6 > 10[INTEGER]] + * \_EsRelation[test][_meta_field{f}#12, emp_no{f}#6, first_name{f}#7, ge..]]] + * + * and final plan is + * LimitExec[500[INTEGER]] + * \_AggregateExec[[],[COUNT(emp_no{f}#6) AS c],FINAL,8] + * \_ExchangeExec[[count{r}#16, seen{r}#17],true] + * \_LocalSourceExec[[count{r}#16, seen{r}#17],[LongVectorBlock[vector=ConstantLongVector[positions=1, value=0]]]] + */ + public void testPartialAggFoldingOutput() { + var plan = physicalPlan(""" + from test + | where emp_no > 10 + | stats c = count(emp_no) + """); + + var stats = statsForMissingField("emp_no"); + var optimized = optimizedPlan(plan, stats); + + var limit = as(optimized, LimitExec.class); + var agg = as(limit.child(), AggregateExec.class); + var exchange = as(agg.child(), ExchangeExec.class); + assertThat(Expressions.names(exchange.output()), contains("count", "seen")); + var source = as(exchange.child(), LocalSourceExec.class); + assertThat(Expressions.names(source.output()), contains("count", "seen")); + } + + /** + * Checks that when the folding happens on the coordinator, the intermediate agg state + * are not used anymore. + * + * Expects + * LimitExec[10000[INTEGER]] + * \_AggregateExec[[],[COUNT(emp_no{f}#5) AS c],FINAL,8] + * \_AggregateExec[[],[COUNT(emp_no{f}#5) AS c],PARTIAL,8] + * \_LimitExec[10[INTEGER]] + * \_ExchangeExec[[],false] + * \_ProjectExec[[emp_no{r}#5]] + * \_EvalExec[[null[INTEGER] AS emp_no]] + * \_EsQueryExec[test], query[][_doc{f}#26], limit[10], sort[] estimatedRowSize[8] + */ + public void testGlobalAggFoldingOutput() { + var plan = physicalPlan(""" + from test + | limit 10 + | stats c = count(emp_no) + """); + + var stats = statsForMissingField("emp_no"); + var optimized = optimizedPlan(plan, stats); + + var limit = as(optimized, LimitExec.class); + var aggFinal = as(limit.child(), AggregateExec.class); + var aggPartial = as(aggFinal.child(), AggregateExec.class); + assertThat(Expressions.names(aggPartial.output()), contains("c")); + limit = as(aggPartial.child(), LimitExec.class); + var exchange = as(limit.child(), ExchangeExec.class); + var project = as(exchange.child(), ProjectExec.class); + } + + /** + * Checks the folded aggregation preserves the intermediate output. + * + * Expects + * ProjectExec[[a{r}#5]] + * \_EvalExec[[__a_SUM@734e2841{r}#16 / __a_COUNT@12536eab{r}#17 AS a]] + * \_LimitExec[500[INTEGER]] + * \_AggregateExec[[],[SUM(emp_no{f}#6) AS __a_SUM@734e2841, COUNT(emp_no{f}#6) AS __a_COUNT@12536eab],FINAL,24] + * \_ExchangeExec[[sum{r}#18, seen{r}#19, count{r}#20, seen{r}#21],true] + * \_LocalSourceExec[[sum{r}#18, seen{r}#19, count{r}#20, seen{r}#21],[LongArrayBlock[positions=1, mvOrdering=UNORDERED, + * values=[0, + * 0]], BooleanVectorBlock[vector=ConstantBooleanVector[positions=1, value=true]], + * LongVectorBlock[vector=ConstantLongVector[positions=1, value=0]], + * BooleanVectorBlock[vector=ConstantBooleanVector[positions=1, value=true]]]] + */ + public void testPartialAggFoldingOutputForSyntheticAgg() { + var plan = physicalPlan(""" + from test + | where emp_no > 10 + | stats a = avg(emp_no) + """); + + var stats = statsForMissingField("emp_no"); + var optimized = optimizedPlan(plan, stats); + + var project = as(optimized, ProjectExec.class); + var eval = as(project.child(), EvalExec.class); + var limit = as(eval.child(), LimitExec.class); + var aggFinal = as(limit.child(), AggregateExec.class); + assertThat(aggFinal.output(), hasSize(2)); + var exchange = as(aggFinal.child(), ExchangeExec.class); + assertThat(Expressions.names(exchange.output()), contains("sum", "seen", "count", "seen")); + var source = as(exchange.child(), LocalSourceExec.class); + assertThat(Expressions.names(source.output()), contains("sum", "seen", "count", "seen")); + } + private static EsQueryExec source(PhysicalPlan plan) { if (plan instanceof ExchangeExec exchange) { plan = exchange.child(); @@ -1922,6 +2080,17 @@ private PhysicalPlan optimizedPlan(PhysicalPlan plan, SearchStats searchStats) { return l; } + static SearchStats statsWithIndexedFields(String... names) { + return new EsqlTestUtils.TestSearchStats() { + private final Set indexedFields = Set.of(names); + + @Override + public boolean isIndexed(String field) { + return indexedFields.contains(field); + } + }; + } + static PhysicalPlan localRelationshipAlignment(PhysicalPlan l) { // handle local reduction alignment return l.transformUp(ExchangeExec.class, exg -> { @@ -1941,6 +2110,7 @@ private PhysicalPlan physicalPlan(String query) { var logical = logicalOptimizer.optimize(analyzer.analyze(parser.createStatement(query))); // System.out.println("Logical\n" + logical); var physical = mapper.map(logical); + // System.out.println(physical); assertSerialization(physical); return physical; } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java index b4c9d7a9baeca..1d2b11d3deb89 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java @@ -12,12 +12,12 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.PageCacheRecycler; -import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.SerializationTestUtils; +import org.elasticsearch.xpack.esql.TestBlockFactory; import org.elasticsearch.xpack.esql.evaluator.EvalMapper; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThan; @@ -162,7 +162,7 @@ private static FieldAttribute field(String name, DataType type) { static DriverContext driverContext() { return new DriverContext( new MockBigArrays(PageCacheRecycler.NON_RECYCLING_INSTANCE, new NoneCircuitBreakerService()).withCircuitBreaking(), - BlockFactory.getNonBreakingInstance() + TestBlockFactory.getNonBreakingInstance() ); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/GrokEvaluatorExtracterTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/GrokEvaluatorExtracterTests.java index 6a5c8fd3f92c2..af7a66fea9bb2 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/GrokEvaluatorExtracterTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/GrokEvaluatorExtracterTests.java @@ -9,6 +9,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.BooleanBlock; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.DoubleBlock; @@ -18,6 +19,7 @@ import org.elasticsearch.grok.Grok; import org.elasticsearch.grok.GrokBuiltinPatterns; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.TestBlockFactory; import org.elasticsearch.xpack.esql.evaluator.command.GrokEvaluatorExtracter; import java.util.Map; @@ -26,6 +28,8 @@ import static org.hamcrest.Matchers.is; public class GrokEvaluatorExtracterTests extends ESTestCase { + final BlockFactory blockFactory = TestBlockFactory.getNonBreakingInstance(); + final Map KEY_TO_BLOCK = Map.of("a", 0, "b", 1, "c", 2, "d", 3, "e", 4, "f", 5); final Map TYPES = Map.of( "a", @@ -196,7 +200,7 @@ private void checkBooleanBlock(Block.Builder builder, int[] itemsPerRow, boolean private BytesRefBlock buildInputBlock(int[] mvSize, String... input) { int nextString = 0; - BytesRefBlock.Builder inputBuilder = BytesRefBlock.newBlockBuilder(input.length); + BytesRefBlock.Builder inputBuilder = blockFactory.newBytesRefBlockBuilder(input.length); for (int i = 0; i < mvSize.length; i++) { if (mvSize[i] == 0) { inputBuilder.appendNull(); @@ -222,12 +226,12 @@ private BytesRefBlock buildInputBlock(int[] mvSize, String... input) { private Block.Builder[] buidDefaultTargetBlocks(int estimatedSize) { return new Block.Builder[] { - BytesRefBlock.newBlockBuilder(estimatedSize), - IntBlock.newBlockBuilder(estimatedSize), - LongBlock.newBlockBuilder(estimatedSize), - DoubleBlock.newBlockBuilder(estimatedSize), - DoubleBlock.newBlockBuilder(estimatedSize), - BooleanBlock.newBlockBuilder(estimatedSize) }; + blockFactory.newBytesRefBlockBuilder(estimatedSize), + blockFactory.newIntBlockBuilder(estimatedSize), + blockFactory.newLongBlockBuilder(estimatedSize), + blockFactory.newDoubleBlockBuilder(estimatedSize), + blockFactory.newDoubleBlockBuilder(estimatedSize), + blockFactory.newBooleanBlockBuilder(estimatedSize) }; } private GrokEvaluatorExtracter buildExtracter(String pattern, Map keyToBlock, Map types) { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java index 24fcae0f6bbb0..27a45e71a69c1 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java @@ -19,7 +19,6 @@ import org.apache.lucene.tests.index.RandomIndexWriter; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.lucene.LuceneSourceOperator; import org.elasticsearch.compute.lucene.LuceneTopNSourceOperator; import org.elasticsearch.compute.operator.SourceOperator; @@ -31,6 +30,7 @@ import org.elasticsearch.search.internal.ContextIndexSearcher; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.TestSearchContext; +import org.elasticsearch.xpack.esql.TestBlockFactory; import org.elasticsearch.xpack.esql.plan.physical.EsQueryExec; import org.elasticsearch.xpack.esql.plugin.EsqlPlugin; import org.elasticsearch.xpack.esql.plugin.QueryPragmas; @@ -124,7 +124,7 @@ private LocalExecutionPlanner planner() throws IOException { "test", null, BigArrays.NON_RECYCLING_INSTANCE, - BlockFactory.getNonBreakingInstance(), + TestBlockFactory.getNonBreakingInstance(), Settings.EMPTY, config(), null, diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/TestPhysicalOperationProviders.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/TestPhysicalOperationProviders.java index 601184252814e..8377530b9fbc2 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/TestPhysicalOperationProviders.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/TestPhysicalOperationProviders.java @@ -13,11 +13,10 @@ import org.elasticsearch.compute.aggregation.GroupingAggregator; import org.elasticsearch.compute.aggregation.blockhash.BlockHash; import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BlockFactory; import org.elasticsearch.compute.data.DocBlock; import org.elasticsearch.compute.data.DocVector; import org.elasticsearch.compute.data.ElementType; -import org.elasticsearch.compute.data.IntArrayVector; -import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.DriverContext; @@ -27,6 +26,7 @@ import org.elasticsearch.compute.operator.SourceOperator; import org.elasticsearch.compute.operator.SourceOperator.SourceOperatorFactory; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; +import org.elasticsearch.xpack.esql.TestBlockFactory; import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; import org.elasticsearch.xpack.esql.plan.physical.EsQueryExec; import org.elasticsearch.xpack.esql.plan.physical.FieldExtractExec; @@ -92,6 +92,11 @@ public Operator.OperatorFactory ordinalGroupingOperatorFactory( private class TestSourceOperator extends SourceOperator { boolean finished = false; + private final DriverContext driverContext; + + TestSourceOperator(DriverContext driverContext) { + this.driverContext = driverContext; + } @Override public Page getOutput() { @@ -99,15 +104,14 @@ public Page getOutput() { finish(); } - return new Page( - new Block[] { - new DocVector( - IntBlock.newConstantBlockWith(0, testData.getPositionCount()).asVector(), - IntBlock.newConstantBlockWith(0, testData.getPositionCount()).asVector(), - new IntArrayVector(IntStream.range(0, testData.getPositionCount()).toArray(), testData.getPositionCount()), - true - ).asBlock() } + BlockFactory blockFactory = driverContext.blockFactory(); + DocVector docVector = new DocVector( + blockFactory.newConstantIntVector(0, testData.getPositionCount()), + blockFactory.newConstantIntVector(0, testData.getPositionCount()), + blockFactory.newIntArrayVector(IntStream.range(0, testData.getPositionCount()).toArray(), testData.getPositionCount()), + true ); + return new Page(docVector.asBlock()); } @Override @@ -128,11 +132,9 @@ public void close() { private class TestSourceOperatorFactory implements SourceOperatorFactory { - SourceOperator op = new TestSourceOperator(); - @Override public SourceOperator get(DriverContext driverContext) { - return op; + return new TestSourceOperator(driverContext); } @Override @@ -292,7 +294,8 @@ private Block extractBlockForColumn(Page page, String columnName) { DocBlock docBlock = page.getBlock(0); IntVector docIndices = docBlock.asVector().docs(); Block originalData = testData.getBlock(columnIndex); - Block.Builder builder = originalData.elementType().newBlockBuilder(docIndices.getPositionCount()); + Block.Builder builder = originalData.elementType() + .newBlockBuilder(docIndices.getPositionCount(), TestBlockFactory.getNonBreakingInstance()); for (int c = 0; c < docIndices.getPositionCount(); c++) { int doc = docIndices.getInt(c); builder.copyFrom(originalData, doc, doc + 1); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequestTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequestTests.java index 8970617548016..f1701ed696d2c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequestTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequestTests.java @@ -82,6 +82,7 @@ protected DataNodeRequest createTestInstance() { DataNodeRequest request = new DataNodeRequest( sessionId, EsqlConfigurationSerializationTests.randomConfiguration(query), + randomAlphaOfLength(10), shardIds, aliasFilters, physicalPlan @@ -92,9 +93,16 @@ protected DataNodeRequest createTestInstance() { @Override protected DataNodeRequest mutateInstance(DataNodeRequest in) throws IOException { - return switch (between(0, 5)) { + return switch (between(0, 6)) { case 0 -> { - var request = new DataNodeRequest(randomAlphaOfLength(20), in.configuration(), in.shardIds(), in.aliasFilters(), in.plan()); + var request = new DataNodeRequest( + randomAlphaOfLength(20), + in.configuration(), + in.clusterAlias(), + in.shardIds(), + in.aliasFilters(), + in.plan() + ); request.setParentTask(in.getParentTask()); yield request; } @@ -102,6 +110,7 @@ protected DataNodeRequest mutateInstance(DataNodeRequest in) throws IOException var request = new DataNodeRequest( in.sessionId(), EsqlConfigurationSerializationTests.randomConfiguration(), + in.clusterAlias(), in.shardIds(), in.aliasFilters(), in.plan() @@ -111,7 +120,14 @@ protected DataNodeRequest mutateInstance(DataNodeRequest in) throws IOException } case 2 -> { List shardIds = randomList(1, 10, () -> new ShardId("new-index-" + between(1, 10), "n/a", between(1, 10))); - var request = new DataNodeRequest(in.sessionId(), in.configuration(), shardIds, in.aliasFilters(), in.plan()); + var request = new DataNodeRequest( + in.sessionId(), + in.configuration(), + in.clusterAlias(), + shardIds, + in.aliasFilters(), + in.plan() + ); request.setParentTask(in.getParentTask()); yield request; } @@ -132,6 +148,7 @@ protected DataNodeRequest mutateInstance(DataNodeRequest in) throws IOException var request = new DataNodeRequest( in.sessionId(), in.configuration(), + in.clusterAlias(), in.shardIds(), in.aliasFilters(), mapAndMaybeOptimize(parse(newQuery)) @@ -146,18 +163,45 @@ protected DataNodeRequest mutateInstance(DataNodeRequest in) throws IOException } else { aliasFilters = Map.of(new Index("concrete-index", "n/a"), AliasFilter.of(new TermQueryBuilder("id", "2"), "alias-2")); } - var request = new DataNodeRequest(in.sessionId(), in.configuration(), in.shardIds(), aliasFilters, in.plan()); + var request = new DataNodeRequest( + in.sessionId(), + in.configuration(), + in.clusterAlias(), + in.shardIds(), + aliasFilters, + in.plan() + ); request.setParentTask(request.getParentTask()); yield request; } case 5 -> { - var request = new DataNodeRequest(in.sessionId(), in.configuration(), in.shardIds(), in.aliasFilters(), in.plan()); + var request = new DataNodeRequest( + in.sessionId(), + in.configuration(), + in.clusterAlias(), + in.shardIds(), + in.aliasFilters(), + in.plan() + ); request.setParentTask( randomValueOtherThan(request.getParentTask().getNodeId(), () -> randomAlphaOfLength(10)), randomNonNegativeLong() ); yield request; } + case 6 -> { + var clusterAlias = randomValueOtherThan(in.clusterAlias(), () -> randomAlphaOfLength(10)); + var request = new DataNodeRequest( + in.sessionId(), + in.configuration(), + clusterAlias, + in.shardIds(), + in.aliasFilters(), + in.plan() + ); + request.setParentTask(request.getParentTask()); + yield request; + } default -> throw new AssertionError("invalid value"); }; } diff --git a/x-pack/plugin/fleet/src/internalClusterTest/java/org/elasticsearch/xpack/fleet/action/GetGlobalCheckpointsActionIT.java b/x-pack/plugin/fleet/src/internalClusterTest/java/org/elasticsearch/xpack/fleet/action/GetGlobalCheckpointsActionIT.java index 5c87fe8dd6c19..88f3b126b228c 100644 --- a/x-pack/plugin/fleet/src/internalClusterTest/java/org/elasticsearch/xpack/fleet/action/GetGlobalCheckpointsActionIT.java +++ b/x-pack/plugin/fleet/src/internalClusterTest/java/org/elasticsearch/xpack/fleet/action/GetGlobalCheckpointsActionIT.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.fleet.action; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.UnavailableShardsException; @@ -181,7 +180,7 @@ public void testMustProvideCorrectNumberOfShards() { ); ElasticsearchStatusException exception = expectThrows( ElasticsearchStatusException.class, - () -> client().execute(GetGlobalCheckpointsAction.INSTANCE, request).actionGet() + client().execute(GetGlobalCheckpointsAction.INSTANCE, request) ); assertThat(exception.status(), equalTo(RestStatus.BAD_REQUEST)); assertThat( @@ -205,7 +204,7 @@ public void testWaitForAdvanceOnlySupportsOneShard() { ); ElasticsearchStatusException exception = expectThrows( ElasticsearchStatusException.class, - () -> client().execute(GetGlobalCheckpointsAction.INSTANCE, request).actionGet() + client().execute(GetGlobalCheckpointsAction.INSTANCE, request) ); assertThat(exception.status(), equalTo(RestStatus.BAD_REQUEST)); assertThat(exception.getMessage(), equalTo("wait_for_advance only supports indices with one shard. [shard count: 3]")); @@ -221,10 +220,7 @@ public void testIndexDoesNotExistNoWait() { ); long start = System.nanoTime(); - ElasticsearchException exception = expectThrows( - IndexNotFoundException.class, - () -> client().execute(GetGlobalCheckpointsAction.INSTANCE, request).actionGet() - ); + expectThrows(IndexNotFoundException.class, client().execute(GetGlobalCheckpointsAction.INSTANCE, request)); long elapsed = TimeValue.timeValueNanos(System.nanoTime() - start).seconds(); assertThat(elapsed, lessThanOrEqualTo(TEN_SECONDS.seconds())); } @@ -237,10 +233,7 @@ public void testWaitOnIndexTimeout() { EMPTY_ARRAY, TimeValue.timeValueMillis(between(1, 100)) ); - ElasticsearchException exception = expectThrows( - IndexNotFoundException.class, - () -> client().execute(GetGlobalCheckpointsAction.INSTANCE, request).actionGet() - ); + expectThrows(IndexNotFoundException.class, client().execute(GetGlobalCheckpointsAction.INSTANCE, request)); } public void testWaitOnIndexCreated() throws Exception { @@ -285,7 +278,7 @@ public void testPrimaryShardsNotReadyNoWait() { UnavailableShardsException exception = expectThrows( UnavailableShardsException.class, - () -> client().execute(GetGlobalCheckpointsAction.INSTANCE, request).actionGet() + client().execute(GetGlobalCheckpointsAction.INSTANCE, request) ); assertEquals("Primary shards were not active [shards=1, active=0]", exception.getMessage()); } @@ -309,7 +302,7 @@ public void testWaitOnPrimaryShardsReadyTimeout() { UnavailableShardsException exception = expectThrows( UnavailableShardsException.class, - () -> client().execute(GetGlobalCheckpointsAction.INSTANCE, request).actionGet() + client().execute(GetGlobalCheckpointsAction.INSTANCE, request) ); assertEquals("Primary shards were not active within timeout [timeout=" + timeout + ", shards=1, active=0]", exception.getMessage()); } diff --git a/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/rest/RestFleetSearchAction.java b/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/rest/RestFleetSearchAction.java index 3a09fe1d18382..73af65b2f31a6 100644 --- a/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/rest/RestFleetSearchAction.java +++ b/x-pack/plugin/fleet/src/main/java/org/elasticsearch/xpack/fleet/rest/RestFleetSearchAction.java @@ -18,7 +18,7 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.usage.SearchUsageHolder; @@ -96,7 +96,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli return channel -> { RestCancellableNodeClient cancelClient = new RestCancellableNodeClient(client, request.getHttpChannel()); - cancelClient.execute(TransportSearchAction.TYPE, searchRequest, new RestChunkedToXContentListener<>(channel)); + cancelClient.execute(TransportSearchAction.TYPE, searchRequest, new RestRefCountedChunkedToXContentListener<>(channel)); }; } diff --git a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexIT.java b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexIT.java index 5f219bd8ce592..ba4e1d98f63a6 100644 --- a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexIT.java +++ b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexIT.java @@ -232,21 +232,16 @@ public void testRetryPointInTime() throws Exception { ).keepAlive(TimeValue.timeValueMinutes(2)); final String pitId = client().execute(TransportOpenPointInTimeAction.TYPE, openPointInTimeRequest).actionGet().getPointInTimeId(); try { - assertNoFailuresAndResponse( - prepareSearch().setIndices(indexName).setPreference(null).setPointInTime(new PointInTimeBuilder(pitId)), - searchResponse -> { - assertThat(searchResponse.pointInTimeId(), equalTo(pitId)); - assertHitCount(searchResponse, numDocs); - } - ); + assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), searchResponse -> { + assertThat(searchResponse.pointInTimeId(), equalTo(pitId)); + assertHitCount(searchResponse, numDocs); + }); internalCluster().restartNode(assignedNode); ensureGreen(indexName); assertNoFailuresAndResponse( - prepareSearch().setIndices(indexName) - .setQuery(new RangeQueryBuilder("created_date").gte("2011-01-01").lte("2011-12-12")) + prepareSearch().setQuery(new RangeQueryBuilder("created_date").gte("2011-01-01").lte("2011-12-12")) .setSearchType(SearchType.QUERY_THEN_FETCH) - .setPreference(null) .setPreFilterShardSize(between(1, 10)) .setAllowPartialSearchResults(true) .setPointInTime(new PointInTimeBuilder(pitId)), @@ -287,7 +282,7 @@ public void testPointInTimeWithDeletedIndices() { indicesAdmin().prepareDelete("index-1").get(); // Return partial results if allow partial search result is allowed assertResponse( - prepareSearch().setPreference(null).setAllowPartialSearchResults(true).setPointInTime(new PointInTimeBuilder(pitId)), + prepareSearch().setAllowPartialSearchResults(true).setPointInTime(new PointInTimeBuilder(pitId)), searchResponse -> { assertFailures(searchResponse); assertHitCount(searchResponse, index2); @@ -296,7 +291,7 @@ public void testPointInTimeWithDeletedIndices() { // Fails if allow partial search result is not allowed expectThrows( ElasticsearchException.class, - prepareSearch().setPreference(null).setAllowPartialSearchResults(false).setPointInTime(new PointInTimeBuilder(pitId))::get + prepareSearch().setAllowPartialSearchResults(false).setPointInTime(new PointInTimeBuilder(pitId)) ); } finally { client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pitId)).actionGet(); @@ -322,7 +317,7 @@ public void testOpenPointInTimeWithNoIndexMatched() { .getPointInTimeId(); try { assertNoFailuresAndResponse( - prepareSearch().setPreference(null).setPointInTime(new PointInTimeBuilder(pitId)), + prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), searchResponse -> assertHitCount(searchResponse, numDocs) ); } finally { @@ -338,7 +333,7 @@ public void testOpenPointInTimeWithNoIndexMatched() { .actionGet() .getPointInTimeId(); try { - assertHitCountAndNoFailures(prepareSearch().setPreference(null).setPointInTime(new PointInTimeBuilder(pitId)), 0); + assertHitCountAndNoFailures(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), 0); } finally { client().execute(TransportClosePointInTimeAction.TYPE, new ClosePointInTimeRequest(pitId)).actionGet(); } diff --git a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexTests.java b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexTests.java index ca848c8bb8c44..f1d23739b0938 100644 --- a/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexTests.java +++ b/x-pack/plugin/frozen-indices/src/internalClusterTest/java/org/elasticsearch/index/engine/frozen/FrozenIndexTests.java @@ -100,7 +100,7 @@ public void testCloseFreezeAndOpen() throws Exception { assertAcked(client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest(indexName)).actionGet()); expectThrows( ClusterBlockException.class, - () -> prepareIndex(indexName).setId("4").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get() + prepareIndex(indexName).setId("4").setSource("field", "value").setRefreshPolicy(IMMEDIATE) ); IndicesService indexServices = getInstanceFromNode(IndicesService.class); Index index = resolveIndex(indexName); @@ -150,11 +150,7 @@ public void testCloseFreezeAndOpen() throws Exception { try { for (int from = 0; from < 3; from++) { assertResponse( - client().prepareSearch() - .setIndicesOptions(IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED) - .setPointInTime(new PointInTimeBuilder(pitId)) - .setSize(1) - .setFrom(from), + client().prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)).setSize(1).setFrom(from), response -> { assertHitCount(response, 3); assertEquals(1, response.getHits().getHits().length); @@ -276,12 +272,12 @@ public void testDoubleFreeze() { assertAcked(client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest("test-idx")).actionGet()); ResourceNotFoundException exception = expectThrows( ResourceNotFoundException.class, - () -> client().execute( + client().execute( FreezeIndexAction.INSTANCE, new FreezeRequest("test-idx").indicesOptions( new IndicesOptions(EnumSet.noneOf(IndicesOptions.Option.class), EnumSet.of(IndicesOptions.WildcardStates.OPEN)) ) - ).actionGet() + ) ); assertEquals("no index found to freeze", exception.getMessage()); } @@ -473,10 +469,7 @@ public void testWriteToFrozenIndex() { prepareIndex("idx").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); assertAcked(client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest("idx")).actionGet()); assertIndexFrozen("idx"); - expectThrows( - ClusterBlockException.class, - () -> prepareIndex("idx").setId("2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get() - ); + expectThrows(ClusterBlockException.class, prepareIndex("idx").setId("2").setSource("field", "value").setRefreshPolicy(IMMEDIATE)); } public void testIgnoreUnavailable() { @@ -502,13 +495,13 @@ public void testUnfreezeClosedIndex() { assertEquals(IndexMetadata.State.CLOSE, clusterAdmin().prepareState().get().getState().metadata().index("idx").getState()); expectThrows( IndexNotFoundException.class, - () -> client().execute( + client().execute( FreezeIndexAction.INSTANCE, new FreezeRequest("id*").setFreeze(false) .indicesOptions( new IndicesOptions(EnumSet.noneOf(IndicesOptions.Option.class), EnumSet.of(IndicesOptions.WildcardStates.OPEN)) ) - ).actionGet() + ) ); // we don't resolve to closed indices assertAcked(client().execute(FreezeIndexAction.INSTANCE, new FreezeRequest("idx").setFreeze(false)).actionGet()); diff --git a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/ExplainLifecycleIT.java b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/ExplainLifecycleIT.java index c81a44d9e3507..8d51c380e96cf 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/ExplainLifecycleIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/ExplainLifecycleIT.java @@ -21,6 +21,7 @@ import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.ilm.DeleteAction; +import org.elasticsearch.xpack.core.ilm.ErrorStep; import org.elasticsearch.xpack.core.ilm.LifecycleAction; import org.elasticsearch.xpack.core.ilm.LifecyclePolicy; import org.elasticsearch.xpack.core.ilm.LifecycleSettings; @@ -201,7 +202,7 @@ public void testExplainIndicesWildcard() throws Exception { assertThat(explainIndexWithMissingPolicy.get("policy"), is(missingPolicyName)); assertThat(explainIndexWithMissingPolicy.get("phase"), is(nullValue())); assertThat(explainIndexWithMissingPolicy.get("action"), is(nullValue())); - assertThat(explainIndexWithMissingPolicy.get("step"), is(nullValue())); + assertThat(explainIndexWithMissingPolicy.get("step"), is(ErrorStep.NAME)); assertThat(explainIndexWithMissingPolicy.get("age"), is(nullValue())); assertThat(explainIndexWithMissingPolicy.get("failed_step"), is(nullValue())); Map stepInfo = (Map) explainIndexWithMissingPolicy.get("step_info"); diff --git a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/TimeSeriesLifecycleActionsIT.java b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/TimeSeriesLifecycleActionsIT.java index 34b80520b4bab..2b722a6555a08 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/TimeSeriesLifecycleActionsIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/TimeSeriesLifecycleActionsIT.java @@ -32,6 +32,7 @@ import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.ilm.AllocateAction; import org.elasticsearch.xpack.core.ilm.DeleteAction; +import org.elasticsearch.xpack.core.ilm.ErrorStep; import org.elasticsearch.xpack.core.ilm.ForceMergeAction; import org.elasticsearch.xpack.core.ilm.FreezeAction; import org.elasticsearch.xpack.core.ilm.LifecycleAction; @@ -599,7 +600,7 @@ public void testNonexistentPolicy() throws Exception { Map indexStatus = (Map) ((Map) responseMap.get("indices")).get(index); assertNull(indexStatus.get("phase")); assertNull(indexStatus.get("action")); - assertNull(indexStatus.get("step")); + assertEquals(ErrorStep.NAME, indexStatus.get("step")); Map stepInfo = (Map) indexStatus.get("step_info"); assertNotNull(stepInfo); assertEquals("policy [does_not_exist] does not exist", stepInfo.get("reason")); @@ -1221,7 +1222,7 @@ private void assertHistoryIsPresent( } // Finally, check that the history index is in a good state - String historyIndexName = DataStream.getDefaultBackingIndexName("ilm-history-6", 1); + String historyIndexName = DataStream.getDefaultBackingIndexName("ilm-history-7", 1); Response explainHistoryIndex = client().performRequest(new Request("GET", historyIndexName + "/_lifecycle/explain")); Map responseMap; try (InputStream is = explainHistoryIndex.getEntity().getContent()) { diff --git a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java index 6d3811fd66d9c..23ec1c0262dd4 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java @@ -494,6 +494,7 @@ public void testDownsampleTwice() throws Exception { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/101428") public void testDownsampleTwiceSameInterval() throws Exception { // Create the ILM policy Request request = new Request("PUT", "_ilm/policy/" + policy); diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestGetLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestGetLifecycleAction.java index b92f603da49b5..f5221ba980440 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestGetLifecycleAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestGetLifecycleAction.java @@ -12,7 +12,7 @@ import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import org.elasticsearch.xpack.core.ilm.action.GetLifecycleAction; import java.util.List; @@ -41,7 +41,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient return channel -> new RestCancellableNodeClient(client, restRequest.getHttpChannel()).execute( GetLifecycleAction.INSTANCE, getLifecycleRequest, - new RestChunkedToXContentListener<>(channel) + new RestRefCountedChunkedToXContentListener<>(channel) ); } } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestMigrateToDataTiersAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestMigrateToDataTiersAction.java index 6a37ae708f872..095cb212be558 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestMigrateToDataTiersAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestMigrateToDataTiersAction.java @@ -33,9 +33,14 @@ public List routes() { @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { - MigrateToDataTiersRequest migrateRequest = request.hasContent() - ? MigrateToDataTiersRequest.parse(request.contentParser()) - : new MigrateToDataTiersRequest(); + MigrateToDataTiersRequest migrateRequest; + if (request.hasContent()) { + try (var parser = request.contentParser()) { + migrateRequest = MigrateToDataTiersRequest.parse(parser); + } + } else { + migrateRequest = new MigrateToDataTiersRequest(); + } migrateRequest.setDryRun(request.paramAsBoolean("dry_run", false)); return channel -> client.execute(MigrateToDataTiersAction.INSTANCE, migrateRequest, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestMoveToStepAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestMoveToStepAction.java index e963cade94c81..f6b28a6ed3b8a 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestMoveToStepAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/RestMoveToStepAction.java @@ -35,8 +35,10 @@ public String getName() { @Override protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { String index = restRequest.param("name"); - XContentParser parser = restRequest.contentParser(); - MoveToStepAction.Request request = MoveToStepAction.Request.parseRequest(index, parser); + MoveToStepAction.Request request; + try (XContentParser parser = restRequest.contentParser()) { + request = MoveToStepAction.Request.parseRequest(index, parser); + } request.timeout(restRequest.paramAsTime("timeout", request.timeout())); request.masterNodeTimeout(restRequest.paramAsTime("master_timeout", request.masterNodeTimeout())); return channel -> client.execute(MoveToStepAction.INSTANCE, request, new RestToXContentListener<>(channel)); diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportExplainLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportExplainLifecycleAction.java index 90438f5a753ba..e032b300a824e 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportExplainLifecycleAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportExplainLifecycleAction.java @@ -152,7 +152,8 @@ static IndexLifecycleExplainResponse getIndexLifecycleExplainResponse( originationDate != -1L ? originationDate : lifecycleState.lifecycleDate(), lifecycleState.phase(), lifecycleState.action(), - lifecycleState.step(), + // treat a missing policy as if the index is in the error step + indexLifecycleService.policyExists(policyName) == false ? ErrorStep.NAME : lifecycleState.step(), lifecycleState.failedStep(), lifecycleState.isAutoRetryableError(), lifecycleState.failedStepRetryCount(), diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/history/ILMHistoryTemplateRegistry.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/history/ILMHistoryTemplateRegistry.java index c85ead4aada53..5633033e6faa1 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/history/ILMHistoryTemplateRegistry.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/history/ILMHistoryTemplateRegistry.java @@ -35,10 +35,11 @@ public class ILMHistoryTemplateRegistry extends IndexTemplateRegistry { // version 4: add `allow_auto_create` setting // version 5: convert to data stream // version 6: manage by data stream lifecycle - public static final int INDEX_TEMPLATE_VERSION = 6; + // version 7: version the index template name so we can upgrade existing deployments + public static final int INDEX_TEMPLATE_VERSION = 7; public static final String ILM_TEMPLATE_VERSION_VARIABLE = "xpack.ilm_history.template.version"; - public static final String ILM_TEMPLATE_NAME = "ilm-history"; + public static final String ILM_TEMPLATE_NAME = "ilm-history-" + INDEX_TEMPLATE_VERSION; public static final String ILM_POLICY_NAME = "ilm-history-ilm-policy"; diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/TransportExplainLifecycleActionTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/TransportExplainLifecycleActionTests.java index 87580a8165d61..246d2bcf21205 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/TransportExplainLifecycleActionTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/TransportExplainLifecycleActionTests.java @@ -158,6 +158,7 @@ public void testGetIndexLifecycleExplainResponse() throws IOException { ); assertThat(onlyErrorsResponse, notNullValue()); assertThat(onlyErrorsResponse.getPolicyName(), is("random-policy")); + assertThat(onlyErrorsResponse.getStep(), is(ErrorStep.NAME)); } { diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/history/ILMHistoryStoreTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/history/ILMHistoryStoreTests.java index 6ac3a4522fb3d..0eece33e2e581 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/history/ILMHistoryStoreTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/history/ILMHistoryStoreTests.java @@ -53,6 +53,9 @@ import static org.elasticsearch.xpack.core.ilm.LifecycleSettings.LIFECYCLE_HISTORY_INDEX_ENABLED_SETTING; import static org.elasticsearch.xpack.ilm.history.ILMHistoryStore.ILM_HISTORY_DATA_STREAM; +import static org.elasticsearch.xpack.ilm.history.ILMHistoryTemplateRegistry.ILM_TEMPLATE_NAME; +import static org.elasticsearch.xpack.ilm.history.ILMHistoryTemplateRegistry.INDEX_TEMPLATE_VERSION; +import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; @@ -284,6 +287,10 @@ public void onFailure(Exception e) { } } + public void testTemplateNameIsVersioned() { + assertThat(ILM_TEMPLATE_NAME, endsWith("-" + INDEX_TEMPLATE_VERSION)); + } + /** * A client that delegates to a verifying function for action/request/listener */ diff --git a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/RestInferenceAction.java b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/RestInferenceAction.java index beecf75da38ab..0286390a8a3ec 100644 --- a/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/RestInferenceAction.java +++ b/x-pack/plugin/inference/src/main/java/org/elasticsearch/xpack/inference/rest/RestInferenceAction.java @@ -33,8 +33,9 @@ public List routes() { protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { String taskType = restRequest.param("task_type"); String modelId = restRequest.param("model_id"); - var request = InferenceAction.Request.parseRequest(modelId, taskType, restRequest.contentParser()); - - return channel -> client.execute(InferenceAction.INSTANCE, request, new RestToXContentListener<>(channel)); + try (var parser = restRequest.contentParser()) { + var request = InferenceAction.Request.parseRequest(modelId, taskType, parser); + return channel -> client.execute(InferenceAction.INSTANCE, request, new RestToXContentListener<>(channel)); + } } } diff --git a/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/action/TransportGetPipelineActionTests.java b/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/action/TransportGetPipelineActionTests.java index d93c24356422f..f75dd2926059a 100644 --- a/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/action/TransportGetPipelineActionTests.java +++ b/x-pack/plugin/logstash/src/test/java/org/elasticsearch/xpack/logstash/action/TransportGetPipelineActionTests.java @@ -30,7 +30,6 @@ import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.SearchShardTarget; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.test.MockUtils; @@ -116,9 +115,14 @@ public void onFailure(Exception e) { * Test that the explicit and wildcard IDs are requested. */ public void testGetPipelinesByExplicitAndWildcardIds() { - InternalSearchResponse internalSearchResponse = new InternalSearchResponse(prepareSearchHits(), null, null, null, false, null, 1); SearchResponse searchResponse = new SearchResponse( - internalSearchResponse, + prepareSearchHits(), + null, + null, + false, + null, + null, + 1, null, 1, 1, diff --git a/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordFieldMapper.java b/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordFieldMapper.java index d04bb88325cc7..ad5e224efd5db 100644 --- a/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordFieldMapper.java +++ b/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordFieldMapper.java @@ -78,16 +78,24 @@ public class CountedKeywordFieldMapper extends FieldMapper { public static final String CONTENT_TYPE = "counted_keyword"; public static final String COUNT_FIELD_NAME_SUFFIX = "_count"; - public static final FieldType FIELD_TYPE; + private static final FieldType FIELD_TYPE_INDEXED; + private static final FieldType FIELD_TYPE_NOT_INDEXED; static { - FieldType ft = new FieldType(); - ft.setDocValuesType(DocValuesType.SORTED_SET); - ft.setTokenized(false); - ft.setOmitNorms(true); - ft.setIndexOptions(IndexOptions.DOCS); - ft.freeze(); - FIELD_TYPE = freezeAndDeduplicateFieldType(ft); + FieldType indexed = new FieldType(); + indexed.setDocValuesType(DocValuesType.SORTED_SET); + indexed.setTokenized(false); + indexed.setOmitNorms(true); + indexed.setIndexOptions(IndexOptions.DOCS); + FIELD_TYPE_INDEXED = freezeAndDeduplicateFieldType(indexed); + + FieldType notIndexed = new FieldType(); + notIndexed.setDocValuesType(DocValuesType.SORTED_SET); + notIndexed.setTokenized(false); + notIndexed.setOmitNorms(true); + notIndexed.setIndexOptions(IndexOptions.NONE); + FIELD_TYPE_NOT_INDEXED = freezeAndDeduplicateFieldType(notIndexed); + } private static class CountedKeywordFieldType extends StringFieldType { @@ -261,7 +269,12 @@ public TermsEnum termsEnum() throws IOException { } } + private static CountedKeywordFieldMapper toType(FieldMapper in) { + return (CountedKeywordFieldMapper) in; + } + public static class Builder extends FieldMapper.Builder { + private final Parameter indexed = Parameter.indexParam(m -> toType(m).mappedFieldType.isIndexed(), true); private final Parameter> meta = Parameter.metaParam(); protected Builder(String name) { @@ -270,22 +283,24 @@ protected Builder(String name) { @Override protected Parameter[] getParameters() { - return new Parameter[] { meta }; + return new Parameter[] { meta, indexed }; } @Override public FieldMapper build(MapperBuilderContext context) { BinaryFieldMapper countFieldMapper = new BinaryFieldMapper.Builder(name + COUNT_FIELD_NAME_SUFFIX, true).build(context); + boolean isIndexed = indexed.getValue(); + FieldType ft = isIndexed ? FIELD_TYPE_INDEXED : FIELD_TYPE_NOT_INDEXED; return new CountedKeywordFieldMapper( name, - FIELD_TYPE, + ft, new CountedKeywordFieldType( context.buildFullName(name), - true, + isIndexed, false, true, - new TextSearchInfo(FIELD_TYPE, null, KEYWORD_ANALYZER, KEYWORD_ANALYZER), + new TextSearchInfo(ft, null, KEYWORD_ANALYZER, KEYWORD_ANALYZER), meta.getValue(), countFieldMapper.fieldType() ), diff --git a/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregationBuilder.java b/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregationBuilder.java index 72e3eb4efacf9..31be7f149831d 100644 --- a/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregationBuilder.java +++ b/x-pack/plugin/mapper-counted-keyword/src/main/java/org/elasticsearch/xpack/countedkeyword/CountedTermsAggregationBuilder.java @@ -73,6 +73,11 @@ public static void registerAggregators(ValuesSourceRegistry.Builder builder) { CountedTermsAggregatorFactory.registerAggregators(builder); } + @Override + public boolean supportsSampling() { + return true; + } + public CountedTermsAggregationBuilder size(int size) { if (size <= 0) { throw new IllegalArgumentException("[size] must be greater than 0. Found [" + size + "] in [" + name + "]"); diff --git a/x-pack/plugin/mapper-counted-keyword/src/test/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordFieldMapperTests.java b/x-pack/plugin/mapper-counted-keyword/src/test/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordFieldMapperTests.java index 1468ed456b132..2ffd4468c814a 100644 --- a/x-pack/plugin/mapper-counted-keyword/src/test/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordFieldMapperTests.java +++ b/x-pack/plugin/mapper-counted-keyword/src/test/java/org/elasticsearch/xpack/countedkeyword/CountedKeywordFieldMapperTests.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.countedkeyword; +import org.apache.lucene.index.DocValuesType; +import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MappedFieldType; @@ -82,4 +84,15 @@ public void testDottedFieldNames() throws IOException { List fields = doc.rootDoc().getFields("dotted.field"); assertEquals(1, fields.size()); } + + public void testDisableIndex() throws IOException { + DocumentMapper mapper = createDocumentMapper( + fieldMapping(b -> b.field("type", CountedKeywordFieldMapper.CONTENT_TYPE).field("index", false)) + ); + ParsedDocument doc = mapper.parse(source(b -> b.field("field", "1234"))); + List fields = doc.rootDoc().getFields("field"); + assertEquals(1, fields.size()); + assertEquals(IndexOptions.NONE, fields.get(0).fieldType().indexOptions()); + assertEquals(DocValuesType.SORTED_SET, fields.get(0).fieldType().docValuesType()); + } } diff --git a/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/ModelLoaderUtils.java b/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/ModelLoaderUtils.java index 43ab090e94381..2f3f9cbf3f32c 100644 --- a/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/ModelLoaderUtils.java +++ b/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/ModelLoaderUtils.java @@ -130,9 +130,13 @@ static VocabularyParts loadVocabulary(URI uri) { // visible for testing static VocabularyParts parseVocabParts(InputStream vocabInputStream) throws IOException { - XContentParser sourceParser = XContentType.JSON.xContent() - .createParser(XContentParserConfiguration.EMPTY, Streams.limitStream(vocabInputStream, VOCABULARY_SIZE_LIMIT.getBytes())); - Map> vocabParts = sourceParser.map(HashMap::new, XContentParser::list); + Map> vocabParts; + try ( + XContentParser sourceParser = XContentType.JSON.xContent() + .createParser(XContentParserConfiguration.EMPTY, Streams.limitStream(vocabInputStream, VOCABULARY_SIZE_LIMIT.getBytes())) + ) { + vocabParts = sourceParser.map(HashMap::new, XContentParser::list); + } List vocabulary = vocabParts.containsKey(VOCABULARY) ? vocabParts.get(VOCABULARY).stream().map(Object::toString).collect(Collectors.toList()) diff --git a/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/build.gradle b/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/build.gradle index 1cc37f5c4ffc0..9d931974d25d5 100644 --- a/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/build.gradle +++ b/x-pack/plugin/ml/qa/multi-cluster-tests-with-security/build.gradle @@ -2,6 +2,7 @@ import org.elasticsearch.gradle.internal.test.RestIntegTestTask import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionProperties import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE +import org.elasticsearch.gradle.internal.info.BuildParams apply plugin: 'elasticsearch.internal-testclusters' apply plugin: 'elasticsearch.standalone-rest-test' @@ -49,16 +50,29 @@ testClusters.register('mixed-cluster') { tasks.register('remote-cluster', RestIntegTestTask) { mustRunAfter("precommit") systemProperty 'tests.rest.suite', 'remote_cluster' + maybeDisableForFips(it) } tasks.register('mixed-cluster', RestIntegTestTask) { dependsOn 'remote-cluster' useCluster remoteCluster systemProperty 'tests.rest.suite', 'multi_cluster' + maybeDisableForFips(it) } tasks.register("integTest") { dependsOn 'mixed-cluster' + maybeDisableForFips(it) } tasks.named("check").configure { dependsOn("integTest") } + +//TODO: remove with version 8.14. A new FIPS setting was added in 8.13. Since FIPS configures all test clusters and this specific integTest uses +// the previous minor version, that setting is not available when running in FIPS until 8.14. +def maybeDisableForFips(task) { + if (BuildParams.inFipsJvm) { + if(Version.fromString(project.version).before(Version.fromString('8.14.0'))) { + task.enabled = false + } + } +} diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ExplainDataFrameAnalyticsIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ExplainDataFrameAnalyticsIT.java index ff0490e46bb7a..1a5a11fe595ff 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ExplainDataFrameAnalyticsIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/ExplainDataFrameAnalyticsIT.java @@ -7,11 +7,12 @@ package org.elasticsearch.xpack.ml.integration; import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.RefCountingListener; +import org.elasticsearch.action.support.SubscribableListener; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.query.QueryBuilders; @@ -27,11 +28,11 @@ import org.elasticsearch.xpack.core.ml.utils.QueryProvider; import java.io.IOException; -import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import static org.hamcrest.Matchers.contains; @@ -204,26 +205,26 @@ public void testSimultaneousExplainSameConfig() throws IOException { ) .buildForExplain(); - List> futures = new ArrayList<>(); - - for (int i = 0; i < simultaneousInvocationCount; ++i) { - futures.add(client().execute(ExplainDataFrameAnalyticsAction.INSTANCE, new ExplainDataFrameAnalyticsAction.Request(config))); - } - - ExplainDataFrameAnalyticsAction.Response previous = null; - for (ActionFuture future : futures) { - // The main purpose of this test is that actionGet() here will throw an exception - // if any of the simultaneous calls returns an error due to interaction between - // the many estimation processes that get run - ExplainDataFrameAnalyticsAction.Response current = future.actionGet(10000); - if (previous != null) { - // A secondary check the test can perform is that the multiple invocations - // return the same result (but it was failures due to unwanted interactions - // that caused this test to be written) - assertEquals(previous, current); + safeAwait(SubscribableListener.newForked(testListener -> { + try (var listeners = new RefCountingListener(testListener)) { + final var firstResponseRef = new AtomicReference(); + for (int i = 0; i < simultaneousInvocationCount; ++i) { + client().execute( + ExplainDataFrameAnalyticsAction.INSTANCE, + new ExplainDataFrameAnalyticsAction.Request(config), + // The main purpose of this test is that the action will complete its listener exceptionally if any of the + // simultaneous calls returns an error due to interaction between the many estimation processes that get run. + listeners.acquire(response -> { + // A secondary check the test can perform is that the multiple invocations return the same result + // (but it was failures due to unwanted interactions that caused this test to be written) + assertNotNull(response); + firstResponseRef.compareAndSet(null, response); + assertEquals(firstResponseRef.get(), response); + }) + ); + } } - previous = current; - } + })); } public void testRuntimeFields() { diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java index 858c5ba946f78..ecfb2f81bf452 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java @@ -80,6 +80,8 @@ import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsTaskState; +import org.elasticsearch.xpack.core.ml.inference.ModelAliasMetadata; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.core.ml.inference.persistence.InferenceIndexConstants; import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.core.ml.job.config.MlFilter; @@ -91,8 +93,6 @@ import org.elasticsearch.xpack.ilm.IndexLifecycle; import org.elasticsearch.xpack.ml.LocalStateMachineLearning; import org.elasticsearch.xpack.ml.autoscaling.MlScalingReason; -import org.elasticsearch.xpack.ml.inference.ModelAliasMetadata; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.slm.SnapshotLifecycle; import org.elasticsearch.xpack.slm.history.SnapshotLifecycleTemplateRegistry; import org.elasticsearch.xpack.transform.Transform; diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/ChunkedTrainedModelPersisterIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/ChunkedTrainedModelPersisterIT.java index 5cf87cff66a25..9b3326a4ba348 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/ChunkedTrainedModelPersisterIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/ChunkedTrainedModelPersisterIT.java @@ -19,6 +19,7 @@ import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsDest; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsSource; import org.elasticsearch.xpack.core.ml.dataframe.analyses.Regression; +import org.elasticsearch.xpack.core.ml.inference.ModelAliasMetadata; import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; import org.elasticsearch.xpack.core.ml.inference.TrainedModelDefinition; import org.elasticsearch.xpack.core.ml.inference.TrainedModelDefinitionTests; @@ -36,7 +37,6 @@ import org.elasticsearch.xpack.ml.extractor.DocValueField; import org.elasticsearch.xpack.ml.extractor.ExtractedField; import org.elasticsearch.xpack.ml.extractor.ExtractedFields; -import org.elasticsearch.xpack.ml.inference.ModelAliasMetadata; import org.elasticsearch.xpack.ml.inference.modelsize.ModelSizeInfo; import org.elasticsearch.xpack.ml.inference.modelsize.ModelSizeInfoTests; import org.elasticsearch.xpack.ml.inference.persistence.TrainedModelProvider; diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java index c3f10353c37f7..55ea5deffe1af 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/JobResultsProviderIT.java @@ -63,6 +63,7 @@ import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.Quantiles; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.TimingStats; +import org.elasticsearch.xpack.core.ml.job.results.Result; import org.elasticsearch.xpack.core.ml.utils.MlIndexAndAlias; import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; import org.elasticsearch.xpack.ml.MlSingleNodeTestCase; @@ -846,6 +847,16 @@ public void testGetSnapshots() { assertNull(snapshots.get(3).getQuantiles()); assertNull(snapshots.get(4).getQuantiles()); + // test get single snapshot + PlainActionFuture> singleFuture = new PlainActionFuture<>(); + jobProvider.getModelSnapshot(jobId, "snap_1", true, singleFuture::onResponse, singleFuture::onFailure); + ModelSnapshot withQuantiles = singleFuture.actionGet().result; + assertThat(withQuantiles.getQuantiles().getTimestamp().getTime(), equalTo(11L)); + + singleFuture = new PlainActionFuture<>(); + jobProvider.getModelSnapshot(jobId, "snap_2", false, singleFuture::onResponse, singleFuture::onFailure); + ModelSnapshot withoutQuantiles = singleFuture.actionGet().result; + assertNull(withoutQuantiles.getQuantiles()); } public void testGetAutodetectParams() throws Exception { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index f9c483496445e..f3254245168b8 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -74,6 +74,7 @@ import org.elasticsearch.plugins.SystemIndexPlugin; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.telemetry.TelemetryProvider; import org.elasticsearch.threadpool.ExecutorBuilder; import org.elasticsearch.threadpool.ScalingExecutorBuilder; import org.elasticsearch.threadpool.ThreadPool; @@ -188,6 +189,8 @@ import org.elasticsearch.xpack.core.ml.dataframe.evaluation.MlEvaluationNamedXContentProvider; import org.elasticsearch.xpack.core.ml.dataframe.stats.AnalysisStatsNamedWriteablesProvider; import org.elasticsearch.xpack.core.ml.inference.MlInferenceNamedXContentProvider; +import org.elasticsearch.xpack.core.ml.inference.ModelAliasMetadata; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.core.ml.inference.persistence.InferenceIndexConstants; import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; @@ -318,10 +321,8 @@ import org.elasticsearch.xpack.ml.dataframe.process.NativeMemoryUsageEstimationProcessFactory; import org.elasticsearch.xpack.ml.dataframe.process.results.AnalyticsResult; import org.elasticsearch.xpack.ml.dataframe.process.results.MemoryUsageEstimationResult; -import org.elasticsearch.xpack.ml.inference.ModelAliasMetadata; import org.elasticsearch.xpack.ml.inference.TrainedModelStatsService; import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentClusterService; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentService; import org.elasticsearch.xpack.ml.inference.deployment.DeploymentManager; import org.elasticsearch.xpack.ml.inference.ingest.InferenceProcessor; @@ -910,6 +911,7 @@ public Collection createComponents(PluginServices services) { Environment environment = services.environment(); NamedXContentRegistry xContentRegistry = services.xContentRegistry(); IndexNameExpressionResolver indexNameExpressionResolver = services.indexNameExpressionResolver(); + TelemetryProvider telemetryProvider = services.telemetryProvider(); if (enabled == false) { // Holders for @link(MachineLearningFeatureSetUsage) which needs access to job manager and ML extension, @@ -1051,7 +1053,7 @@ public Collection createComponents(PluginServices services) { normalizerProcessFactory = (jobId, quantilesState, bucketSpan, executorService) -> new MultiplyingNormalizerProcess(1.0); analyticsProcessFactory = (jobId, analyticsProcessConfig, hasState, executorService, onProcessCrash) -> null; memoryEstimationProcessFactory = (jobId, analyticsProcessConfig, hasState, executorService, onProcessCrash) -> null; - pyTorchProcessFactory = (task, executorService, onProcessCrash) -> new BlackHolePyTorchProcess(); + pyTorchProcessFactory = (task, executorService, afterInputStreamClose, onProcessCrash) -> new BlackHolePyTorchProcess(); } NormalizerFactory normalizerFactory = new NormalizerFactory( normalizerProcessFactory, @@ -1251,6 +1253,14 @@ public Collection createComponents(PluginServices services) { machineLearningExtension.get().isNlpEnabled() ); + MlMetrics mlMetrics = new MlMetrics( + telemetryProvider.getMeterRegistry(), + clusterService, + settings, + autodetectProcessManager, + dataFrameAnalyticsManager + ); + return List.of( mlLifeCycleService, new MlControllerHolder(mlController), @@ -1282,7 +1292,8 @@ public Collection createComponents(PluginServices services) { trainedModelAllocationClusterServiceSetOnce.get(), deploymentManager.get(), nodeAvailabilityZoneMapper, - new MachineLearningExtensionHolder(machineLearningExtension.get()) + new MachineLearningExtensionHolder(machineLearningExtension.get()), + mlMetrics ); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java index c6a360a018e2a..976e5ec255b85 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java @@ -15,9 +15,9 @@ import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingInfo; import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingState; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.datafeed.DatafeedRunner; import org.elasticsearch.xpack.ml.dataframe.DataFrameAnalyticsManager; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager; import org.elasticsearch.xpack.ml.process.MlController; import org.elasticsearch.xpack.ml.process.MlMemoryTracker; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlMetrics.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlMetrics.java new file mode 100644 index 0000000000000..f2cedd4bf0f6b --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlMetrics.java @@ -0,0 +1,563 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.cluster.ClusterChangedEvent; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateListener; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodeRole; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.gateway.GatewayService; +import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.telemetry.metric.LongWithAttributes; +import org.elasticsearch.telemetry.metric.MeterRegistry; +import org.elasticsearch.xpack.core.ml.MlTasks; +import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingInfo; +import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingState; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; +import org.elasticsearch.xpack.ml.dataframe.DataFrameAnalyticsManager; +import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager; +import org.elasticsearch.xpack.ml.utils.NativeMemoryCalculator; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Optional; + +import static org.elasticsearch.xpack.core.ml.MachineLearningField.USE_AUTO_MACHINE_MEMORY_PERCENT; +import static org.elasticsearch.xpack.core.ml.MlTasks.DATAFEED_TASK_NAME; +import static org.elasticsearch.xpack.core.ml.MlTasks.DATA_FRAME_ANALYTICS_TASK_NAME; +import static org.elasticsearch.xpack.core.ml.MlTasks.JOB_SNAPSHOT_UPGRADE_TASK_NAME; +import static org.elasticsearch.xpack.core.ml.MlTasks.JOB_TASK_NAME; +import static org.elasticsearch.xpack.ml.MachineLearning.NATIVE_EXECUTABLE_CODE_OVERHEAD; + +/** + * This class adds two types of ML metrics to the meter registry, such that they can be collected by Elastic APM. + *

+ * 1. Per-node ML native memory statistics for ML nodes + * 2. Cluster-wide job/model statuses for master-eligible nodes + *

+ * The memory metrics relate solely to the ML node they are collected from. + *

+ * The job/model metrics are cluster-wide because a key problem we want to be able to detect is when there are + * jobs or models that are not assigned to any node. The consumer of the data needs to account for the fact that + * multiple master-eligible nodes are reporting the same information. The es.ml.is_master attribute in the records + * indicates which one was actually master, so can be used to deduplicate. + */ +public final class MlMetrics extends AbstractLifecycleComponent implements ClusterStateListener { + + private static final Logger logger = LogManager.getLogger(MlMetrics.class); + + private final MeterRegistry meterRegistry; + private final ClusterService clusterService; + private final AutodetectProcessManager autodetectProcessManager; + private final DataFrameAnalyticsManager dataFrameAnalyticsManager; + private final boolean hasMasterRole; + private final boolean hasMlRole; + private final List metrics = new ArrayList<>(); + + private static final Map MASTER_TRUE_MAP = Map.of("es.ml.is_master", Boolean.TRUE); + private static final Map MASTER_FALSE_MAP = Map.of("es.ml.is_master", Boolean.FALSE); + private volatile Map isMasterMap = MASTER_FALSE_MAP; + private volatile boolean firstTime = true; + + private volatile MlTaskStatusCounts mlTaskStatusCounts = MlTaskStatusCounts.EMPTY; + private volatile TrainedModelAllocationCounts trainedModelAllocationCounts = TrainedModelAllocationCounts.EMPTY; + + private volatile long nativeMemLimit; + private volatile long nativeMemAdUsage; + private volatile long nativeMemDfaUsage; + private volatile long nativeMemTrainedModelUsage; + private volatile long nativeMemFree; + + public MlMetrics( + MeterRegistry meterRegistry, + ClusterService clusterService, + Settings settings, + AutodetectProcessManager autodetectProcessManager, + DataFrameAnalyticsManager dataFrameAnalyticsManager + ) { + this.meterRegistry = meterRegistry; + this.clusterService = clusterService; + this.autodetectProcessManager = autodetectProcessManager; + this.dataFrameAnalyticsManager = dataFrameAnalyticsManager; + hasMasterRole = DiscoveryNode.hasRole(settings, DiscoveryNodeRole.MASTER_ROLE); + hasMlRole = DiscoveryNode.hasRole(settings, DiscoveryNodeRole.ML_ROLE); + if (hasMasterRole || hasMlRole) { + clusterService.addListener(this); + } + } + + private void registerMlNodeMetrics(MeterRegistry meterRegistry) { + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.native_memory.limit", + "ML native memory limit on this node.", + "bytes", + () -> new LongWithAttributes(nativeMemLimit, Map.of()) + ) + ); + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.native_memory.usage.anomaly_detectors", + "ML native memory used by anomaly detection jobs on this node.", + "bytes", + () -> new LongWithAttributes(nativeMemAdUsage, Map.of()) + ) + ); + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.native_memory.usage.data_frame_analytics", + "ML native memory used by data frame analytics jobs on this node.", + "bytes", + () -> new LongWithAttributes(nativeMemDfaUsage, Map.of()) + ) + ); + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.native_memory.usage.trained_models", + "ML native memory used by trained models on this node.", + "bytes", + () -> new LongWithAttributes(nativeMemTrainedModelUsage, Map.of()) + ) + ); + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.native_memory.free", + "Free ML native memory on this node.", + "bytes", + () -> new LongWithAttributes(nativeMemFree, Map.of()) + ) + ); + } + + private void registerMasterNodeMetrics(MeterRegistry meterRegistry) { + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.anomaly_detectors.opening.count", + "Count of anomaly detection jobs in the opening state cluster-wide.", + "jobs", + () -> new LongWithAttributes(mlTaskStatusCounts.adOpeningCount, isMasterMap) + ) + ); + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.anomaly_detectors.opened.count", + "Count of anomaly detection jobs in the opened state cluster-wide.", + "jobs", + () -> new LongWithAttributes(mlTaskStatusCounts.adOpenedCount, isMasterMap) + ) + ); + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.anomaly_detectors.closing.count", + "Count of anomaly detection jobs in the closing state cluster-wide.", + "jobs", + () -> new LongWithAttributes(mlTaskStatusCounts.adClosingCount, isMasterMap) + ) + ); + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.anomaly_detectors.failed.count", + "Count of anomaly detection jobs in the failed state cluster-wide.", + "jobs", + () -> new LongWithAttributes(mlTaskStatusCounts.adFailedCount, isMasterMap) + ) + ); + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.datafeeds.starting.count", + "Count of datafeeds in the starting state cluster-wide.", + "datafeeds", + () -> new LongWithAttributes(mlTaskStatusCounts.datafeedStartingCount, isMasterMap) + ) + ); + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.datafeeds.started.count", + "Count of datafeeds in the started state cluster-wide.", + "datafeeds", + () -> new LongWithAttributes(mlTaskStatusCounts.datafeedStartedCount, isMasterMap) + ) + ); + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.datafeeds.stopping.count", + "Count of datafeeds in the stopping state cluster-wide.", + "datafeeds", + () -> new LongWithAttributes(mlTaskStatusCounts.datafeedStoppingCount, isMasterMap) + ) + ); + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.data_frame_analytics.starting.count", + "Count of data frame analytics jobs in the starting state cluster-wide.", + "jobs", + () -> new LongWithAttributes(mlTaskStatusCounts.dfaStartingCount, isMasterMap) + ) + ); + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.data_frame_analytics.started.count", + "Count of data frame analytics jobs in the started state cluster-wide.", + "jobs", + () -> new LongWithAttributes(mlTaskStatusCounts.dfaStartedCount, isMasterMap) + ) + ); + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.data_frame_analytics.reindexing.count", + "Count of data frame analytics jobs in the reindexing state cluster-wide.", + "jobs", + () -> new LongWithAttributes(mlTaskStatusCounts.dfaReindexingCount, isMasterMap) + ) + ); + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.data_frame_analytics.analyzing.count", + "Count of data frame analytics jobs in the analyzing state cluster-wide.", + "jobs", + () -> new LongWithAttributes(mlTaskStatusCounts.dfaAnalyzingCount, isMasterMap) + ) + ); + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.data_frame_analytics.stopping.count", + "Count of data frame analytics jobs in the stopping state cluster-wide.", + "jobs", + () -> new LongWithAttributes(mlTaskStatusCounts.dfaStoppingCount, isMasterMap) + ) + ); + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.data_frame_analytics.failed.count", + "Count of data frame analytics jobs in the failed state cluster-wide.", + "jobs", + () -> new LongWithAttributes(mlTaskStatusCounts.dfaFailedCount, isMasterMap) + ) + ); + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.trained_models.deployment.target_allocations.count", + "Sum of target trained model allocations across all deployments cluster-wide.", + "allocations", + () -> new LongWithAttributes(trainedModelAllocationCounts.trainedModelsTargetAllocations, isMasterMap) + ) + ); + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.trained_models.deployment.current_allocations.count", + "Sum of current trained model allocations across all deployments cluster-wide.", + "allocations", + () -> new LongWithAttributes(trainedModelAllocationCounts.trainedModelsCurrentAllocations, isMasterMap) + ) + ); + metrics.add( + meterRegistry.registerLongGauge( + "es.ml.trained_models.deployment.failed_allocations.count", + "Sum of failed trained model allocations across all deployments cluster-wide.", + "allocations", + () -> new LongWithAttributes(trainedModelAllocationCounts.trainedModelsFailedAllocations, isMasterMap) + ) + ); + } + + @Override + protected void doStart() { + metrics.clear(); + if (hasMasterRole) { + registerMasterNodeMetrics(meterRegistry); + } + if (hasMlRole) { + registerMlNodeMetrics(meterRegistry); + } + } + + @Override + protected void doStop() {} + + @Override + protected void doClose() { + metrics.forEach(metric -> { + try { + metric.close(); + } catch (Exception e) { + logger.warn("metrics close() method should not throw Exception", e); + } + }); + } + + /** + * Metric values are recalculated in response to cluster state changes and then cached. + * This means that the telemetry provider can poll the metrics registry as often as it + * likes without causing extra work in recalculating the metric values. + */ + @Override + public void clusterChanged(ClusterChangedEvent event) { + isMasterMap = event.localNodeMaster() ? MASTER_TRUE_MAP : MASTER_FALSE_MAP; + + if (event.state().blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) { + // Wait until the gateway has recovered from disk. + return; + } + + boolean mustRecalculateFreeMem = false; + + final ClusterState currentState = event.state(); + final ClusterState previousState = event.previousState(); + + if (firstTime || event.metadataChanged()) { + final PersistentTasksCustomMetadata tasks = currentState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + final PersistentTasksCustomMetadata oldTasks = firstTime + ? null + : previousState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + if (tasks != null && tasks.equals(oldTasks) == false) { + if (hasMasterRole) { + mlTaskStatusCounts = findTaskStatuses(tasks); + } + if (hasMlRole) { + nativeMemAdUsage = findAdMemoryUsage(autodetectProcessManager); + nativeMemDfaUsage = findDfaMemoryUsage(dataFrameAnalyticsManager, tasks); + mustRecalculateFreeMem = true; + } + } + } + + final TrainedModelAssignmentMetadata currentMetadata = TrainedModelAssignmentMetadata.fromState(currentState); + final TrainedModelAssignmentMetadata previousMetadata = firstTime ? null : TrainedModelAssignmentMetadata.fromState(previousState); + if (currentMetadata != null && currentMetadata.equals(previousMetadata) == false) { + if (hasMasterRole) { + trainedModelAllocationCounts = findTrainedModelAllocationCounts(currentMetadata); + } + if (hasMlRole) { + nativeMemTrainedModelUsage = findTrainedModelMemoryUsage(currentMetadata, currentState.nodes().getLocalNode().getId()); + mustRecalculateFreeMem = true; + } + } + + if (firstTime) { + firstTime = false; + nativeMemLimit = findNativeMemoryLimit(currentState.nodes().getLocalNode(), clusterService.getClusterSettings()); + mustRecalculateFreeMem = true; + // Install a listener to recalculate limit and free in response to settings changes. + // This isn't done in the constructor, but instead only after the three usage variables + // have been populated. Doing this means that immediately after startup, when the stats + // are inaccurate, they'll _all_ be zero. Installing the settings listeners immediately + // could mean that free would be misleadingly set based on zero usage when actual usage + // is _not_ zero. + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(USE_AUTO_MACHINE_MEMORY_PERCENT, s -> memoryLimitClusterSettingUpdated()); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer(MachineLearning.MAX_MACHINE_MEMORY_PERCENT, s -> memoryLimitClusterSettingUpdated()); + } + + if (mustRecalculateFreeMem) { + nativeMemFree = findNativeMemoryFree(nativeMemLimit, nativeMemAdUsage, nativeMemDfaUsage, nativeMemTrainedModelUsage); + } + } + + /** + * This method is registered to be called whenever a cluster setting is changed that affects + * any of the calculations this class performs. + */ + private void memoryLimitClusterSettingUpdated() { + nativeMemLimit = findNativeMemoryLimit(clusterService.localNode(), clusterService.getClusterSettings()); + nativeMemFree = findNativeMemoryFree(nativeMemLimit, nativeMemAdUsage, nativeMemDfaUsage, nativeMemTrainedModelUsage); + } + + /** + * Returns up-to-date stats about the states of the ML entities that are persistent tasks. + * Currently this includes: + * - Anomaly detection jobs + * - Datafeeds + * - Data frame analytics jobs + *

+ * In the future it could possibly also include model snapshot upgrade tasks. + *

+ * These stats relate to the whole cluster and not just the current node. + *

+ * The caller is expected to cache the returned stats to avoid unnecessary recalculation. + */ + static MlTaskStatusCounts findTaskStatuses(PersistentTasksCustomMetadata tasks) { + + int adOpeningCount = 0; + int adOpenedCount = 0; + int adClosingCount = 0; + int adFailedCount = 0; + int datafeedStartingCount = 0; + int datafeedStartedCount = 0; + int datafeedStoppingCount = 0; + int dfaStartingCount = 0; + int dfaStartedCount = 0; + int dfaReindexingCount = 0; + int dfaAnalyzingCount = 0; + int dfaStoppingCount = 0; + int dfaFailedCount = 0; + + for (PersistentTasksCustomMetadata.PersistentTask task : tasks.tasks()) { + switch (task.getTaskName()) { + case JOB_TASK_NAME: + switch (MlTasks.getJobStateModifiedForReassignments(task)) { + case OPENING -> ++adOpeningCount; + case OPENED -> ++adOpenedCount; + case CLOSING -> ++adClosingCount; + case FAILED -> ++adFailedCount; + } + break; + case DATAFEED_TASK_NAME: + switch (MlTasks.getDatafeedState(task)) { + case STARTING -> ++datafeedStartingCount; + case STARTED -> ++datafeedStartedCount; + case STOPPING -> ++datafeedStoppingCount; + } + break; + case DATA_FRAME_ANALYTICS_TASK_NAME: + switch (MlTasks.getDataFrameAnalyticsState(task)) { + case STARTING -> ++dfaStartingCount; + case STARTED -> ++dfaStartedCount; + case REINDEXING -> ++dfaReindexingCount; + case ANALYZING -> ++dfaAnalyzingCount; + case STOPPING -> ++dfaStoppingCount; + case FAILED -> ++dfaFailedCount; + } + break; + case JOB_SNAPSHOT_UPGRADE_TASK_NAME: + // Not currently tracked + // TODO: consider in the future, especially when we're at the stage of needing to upgrade serverless model snapshots + break; + } + } + + return new MlTaskStatusCounts( + adOpeningCount, + adOpenedCount, + adClosingCount, + adFailedCount, + datafeedStartingCount, + datafeedStartedCount, + datafeedStoppingCount, + dfaStartingCount, + dfaStartedCount, + dfaReindexingCount, + dfaAnalyzingCount, + dfaStoppingCount, + dfaFailedCount + ); + } + + /** + * Return the memory usage, in bytes, of the anomaly detection jobs that are running on the + * current node. + */ + static long findAdMemoryUsage(AutodetectProcessManager autodetectProcessManager) { + return autodetectProcessManager.getOpenProcessMemoryUsage().getBytes(); + } + + /** + * Return the memory usage, in bytes, of the data frame analytics jobs that are running on the + * current node. + */ + static long findDfaMemoryUsage(DataFrameAnalyticsManager dataFrameAnalyticsManager, PersistentTasksCustomMetadata tasks) { + return dataFrameAnalyticsManager.getActiveTaskMemoryUsage(tasks).getBytes(); + } + + /** + * Returns up-to-date stats about the numbers of allocations of ML trained models. + *

+ * These stats relate to the whole cluster and not just the current node. + *

+ * The caller is expected to cache the returned stats to avoid unnecessary recalculation. + */ + static TrainedModelAllocationCounts findTrainedModelAllocationCounts(TrainedModelAssignmentMetadata metadata) { + int trainedModelsTargetAllocations = 0; + int trainedModelsCurrentAllocations = 0; + int trainedModelsFailedAllocations = 0; + + for (TrainedModelAssignment trainedModelAssignment : metadata.allAssignments().values()) { + trainedModelsTargetAllocations += trainedModelAssignment.totalTargetAllocations(); + trainedModelsCurrentAllocations += trainedModelAssignment.totalCurrentAllocations(); + trainedModelsFailedAllocations += trainedModelAssignment.totalFailedAllocations(); + } + + return new TrainedModelAllocationCounts( + trainedModelsTargetAllocations, + trainedModelsCurrentAllocations, + trainedModelsFailedAllocations + ); + } + + /** + * Return the memory usage, in bytes, of the trained models that are running on the + * current node. + */ + static long findTrainedModelMemoryUsage(TrainedModelAssignmentMetadata metadata, String localNodeId) { + long trainedModelMemoryUsageBytes = 0; + for (TrainedModelAssignment assignment : metadata.allAssignments().values()) { + if (Optional.ofNullable(assignment.getNodeRoutingTable().get(localNodeId)) + .map(RoutingInfo::getState) + .orElse(RoutingState.STOPPED) + .consumesMemory()) { + trainedModelMemoryUsageBytes += assignment.getTaskParams().estimateMemoryUsageBytes(); + } + } + return trainedModelMemoryUsageBytes; + } + + /** + * Return the maximum amount of memory, in bytes, permitted for ML processes running on the + * current node. + */ + static long findNativeMemoryLimit(DiscoveryNode localNode, ClusterSettings settings) { + return NativeMemoryCalculator.allowedBytesForMl(localNode, settings).orElse(0L); + } + + /** + * Return the amount of free memory, in bytes, that remains available for ML processes running on the + * current node. + */ + static long findNativeMemoryFree(long nativeMemLimit, long nativeMemAdUsage, long nativeMemDfaUsage, long nativeMemTrainedModelUsage) { + long totalUsage = nativeMemAdUsage + nativeMemDfaUsage + nativeMemTrainedModelUsage; + if (totalUsage > 0) { + totalUsage += NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes(); + } + return nativeMemLimit - totalUsage; + } + + record MlTaskStatusCounts( + int adOpeningCount, + int adOpenedCount, + int adClosingCount, + int adFailedCount, + int datafeedStartingCount, + int datafeedStartedCount, + int datafeedStoppingCount, + int dfaStartingCount, + int dfaStartedCount, + int dfaReindexingCount, + int dfaAnalyzingCount, + int dfaStoppingCount, + int dfaFailedCount + ) { + static final MlTaskStatusCounts EMPTY = new MlTaskStatusCounts(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); + } + + record TrainedModelAllocationCounts( + int trainedModelsTargetAllocations, + int trainedModelsCurrentAllocations, + int trainedModelsFailedAllocations + ) { + static final TrainedModelAllocationCounts EMPTY = new TrainedModelAllocationCounts(0, 0, 0); + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportClearDeploymentCacheAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportClearDeploymentCacheAction.java index 44235882a6582..5ecd0322674e1 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportClearDeploymentCacheAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportClearDeploymentCacheAction.java @@ -23,8 +23,8 @@ import org.elasticsearch.xpack.core.ml.action.ClearDeploymentCacheAction.Request; import org.elasticsearch.xpack.core.ml.action.ClearDeploymentCacheAction.Response; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.inference.deployment.TrainedModelDeploymentTask; import java.util.List; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCoordinatedInferenceAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCoordinatedInferenceAction.java index 7442f1db0a662..9c368c1a162a8 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCoordinatedInferenceAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCoordinatedInferenceAction.java @@ -28,9 +28,9 @@ import org.elasticsearch.xpack.core.inference.action.InferenceAction; import org.elasticsearch.xpack.core.ml.action.CoordinatedInferenceAction; import org.elasticsearch.xpack.core.ml.action.InferModelAction; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentUtils; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.EmptyConfigUpdate; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfigUpdate; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentUtils; import java.util.ArrayList; import java.util.function.Supplier; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAction.java index 093e4213a5db1..49f73056cd8bd 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAction.java @@ -41,9 +41,9 @@ import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.ml.action.DeleteTrainedModelAction; import org.elasticsearch.xpack.core.ml.action.StopTrainedModelDeploymentAction; +import org.elasticsearch.xpack.core.ml.inference.ModelAliasMetadata; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; -import org.elasticsearch.xpack.ml.inference.ModelAliasMetadata; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.inference.ingest.InferenceProcessor; import org.elasticsearch.xpack.ml.inference.persistence.TrainedModelProvider; import org.elasticsearch.xpack.ml.notifications.InferenceAuditor; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAliasAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAliasAction.java index 73601ef86ff13..fe8a4ff029d69 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAliasAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAliasAction.java @@ -32,7 +32,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ml.action.DeleteTrainedModelAliasAction; -import org.elasticsearch.xpack.ml.inference.ModelAliasMetadata; +import org.elasticsearch.xpack.core.ml.inference.ModelAliasMetadata; import org.elasticsearch.xpack.ml.notifications.InferenceAuditor; import java.util.HashMap; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java index 36d225a943348..14afd6999b0c0 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDeploymentStatsAction.java @@ -28,7 +28,7 @@ import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingInfo; import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingState; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.inference.deployment.ModelStats; import org.elasticsearch.xpack.ml.inference.deployment.TrainedModelDeploymentTask; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsAction.java index e6d1fe30d7646..78d030d454f0b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsAction.java @@ -21,11 +21,11 @@ import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction; import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction.Request; import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction.Response; +import org.elasticsearch.xpack.core.ml.inference.ModelAliasMetadata; import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; import org.elasticsearch.xpack.core.ml.inference.TrainedModelType; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; -import org.elasticsearch.xpack.ml.inference.ModelAliasMetadata; import org.elasticsearch.xpack.ml.inference.persistence.TrainedModelProvider; import java.util.Collections; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsAction.java index 3c9ba3700dc8e..76321608ba4fb 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsAction.java @@ -43,16 +43,16 @@ import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction; import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsStatsAction; import org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction; +import org.elasticsearch.xpack.core.ml.inference.ModelAliasMetadata; import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; import org.elasticsearch.xpack.core.ml.inference.TrainedModelType; import org.elasticsearch.xpack.core.ml.inference.assignment.AssignmentStats; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.core.ml.inference.persistence.InferenceIndexConstants; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceStats; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.TrainedModelSizeStats; import org.elasticsearch.xpack.core.ml.utils.TransportVersionUtils; -import org.elasticsearch.xpack.ml.inference.ModelAliasMetadata; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.inference.persistence.TrainedModelDefinitionDoc; import org.elasticsearch.xpack.ml.inference.persistence.TrainedModelProvider; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java index 3cf0189c28df2..6a8dca8e2776b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportInternalInferModelAction.java @@ -32,15 +32,15 @@ import org.elasticsearch.xpack.core.ml.action.InferModelAction.Request; import org.elasticsearch.xpack.core.ml.action.InferModelAction.Response; import org.elasticsearch.xpack.core.ml.action.InferTrainedModelDeploymentAction; +import org.elasticsearch.xpack.core.ml.inference.ModelAliasMetadata; import org.elasticsearch.xpack.core.ml.inference.TrainedModelType; import org.elasticsearch.xpack.core.ml.inference.assignment.AssignmentState; import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingState; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.core.ml.inference.results.ErrorInferenceResults; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.ml.MachineLearning; -import org.elasticsearch.xpack.ml.inference.ModelAliasMetadata; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.inference.loadingservice.LocalModel; import org.elasticsearch.xpack.ml.inference.loadingservice.ModelLoadingService; import org.elasticsearch.xpack.ml.inference.persistence.TrainedModelProvider; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelAction.java index 7462b6cd918aa..5206799735c52 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelAction.java @@ -60,8 +60,10 @@ import org.elasticsearch.xpack.core.ml.action.PutTrainedModelAction; import org.elasticsearch.xpack.core.ml.action.PutTrainedModelAction.Request; import org.elasticsearch.xpack.core.ml.action.PutTrainedModelAction.Response; +import org.elasticsearch.xpack.core.ml.inference.ModelAliasMetadata; import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; import org.elasticsearch.xpack.core.ml.inference.TrainedModelType; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.core.ml.inference.persistence.InferenceIndexConstants; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfig; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.LenientlyParsedInferenceConfig; @@ -72,8 +74,6 @@ import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.ml.utils.MlPlatformArchitecturesUtil; import org.elasticsearch.xpack.ml.MachineLearning; -import org.elasticsearch.xpack.ml.inference.ModelAliasMetadata; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.inference.persistence.TrainedModelProvider; import org.elasticsearch.xpack.ml.utils.TaskRetriever; @@ -289,7 +289,7 @@ protected void masterOperation( .execute(ActionListener.wrap(stats -> { IndexStats indexStats = stats.getIndices().get(InferenceIndexConstants.nativeDefinitionStore()); if (indexStats != null - && indexStats.getTotal().getStore().getSizeInBytes() > MAX_NATIVE_DEFINITION_INDEX_SIZE.getBytes()) { + && indexStats.getTotal().getStore().sizeInBytes() > MAX_NATIVE_DEFINITION_INDEX_SIZE.getBytes()) { finalResponseListener.onFailure( new ElasticsearchStatusException( "Native model store has exceeded the maximum acceptable size of {}, " @@ -583,24 +583,27 @@ static InferenceConfig parseInferenceConfigFromModelPackage( NamedXContentRegistry namedXContentRegistry, DeprecationHandler deprecationHandler ) throws IOException { - XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().map(source); - XContentParser sourceParser = XContentType.JSON.xContent() - .createParser( - XContentParserConfiguration.EMPTY.withRegistry(namedXContentRegistry).withDeprecationHandler(deprecationHandler), - BytesReference.bytes(xContentBuilder).streamInput() - ); - - XContentParser.Token token = sourceParser.nextToken(); - assert token == XContentParser.Token.START_OBJECT; - token = sourceParser.nextToken(); - assert token == XContentParser.Token.FIELD_NAME; - String currentName = sourceParser.currentName(); - - InferenceConfig inferenceConfig = sourceParser.namedObject(LenientlyParsedInferenceConfig.class, currentName, null); - // consume the end object token - token = sourceParser.nextToken(); - assert token == XContentParser.Token.END_OBJECT; - return inferenceConfig; + try ( + XContentBuilder xContentBuilder = XContentFactory.jsonBuilder().map(source); + XContentParser sourceParser = XContentType.JSON.xContent() + .createParser( + XContentParserConfiguration.EMPTY.withRegistry(namedXContentRegistry).withDeprecationHandler(deprecationHandler), + BytesReference.bytes(xContentBuilder).streamInput() + ) + ) { + + XContentParser.Token token = sourceParser.nextToken(); + assert token == XContentParser.Token.START_OBJECT; + token = sourceParser.nextToken(); + assert token == XContentParser.Token.FIELD_NAME; + String currentName = sourceParser.currentName(); + + InferenceConfig inferenceConfig = sourceParser.namedObject(LenientlyParsedInferenceConfig.class, currentName, null); + // consume the end object token + token = sourceParser.nextToken(); + assert token == XContentParser.Token.END_OBJECT; + return inferenceConfig; + } } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelAliasAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelAliasAction.java index de760d8fa17ed..79560b8b8e94e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelAliasAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelAliasAction.java @@ -36,14 +36,14 @@ import org.elasticsearch.xpack.core.ml.MachineLearningField; import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction; import org.elasticsearch.xpack.core.ml.action.PutTrainedModelAliasAction; +import org.elasticsearch.xpack.core.ml.inference.ModelAliasMetadata; import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; import org.elasticsearch.xpack.core.ml.inference.TrainedModelType; import org.elasticsearch.xpack.core.ml.inference.assignment.AllocationStatus; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; -import org.elasticsearch.xpack.ml.inference.ModelAliasMetadata; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.inference.persistence.TrainedModelProvider; import org.elasticsearch.xpack.ml.notifications.InferenceAuditor; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportRevertModelSnapshotAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportRevertModelSnapshotAction.java index 5450b2752ab97..c01c1f46b3d13 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportRevertModelSnapshotAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportRevertModelSnapshotAction.java @@ -272,7 +272,7 @@ private static void getModelSnapshot( return; } - provider.getModelSnapshot(request.getJobId(), request.getSnapshotId(), modelSnapshot -> { + provider.getModelSnapshot(request.getJobId(), request.getSnapshotId(), true, modelSnapshot -> { if (modelSnapshot == null) { throw missingSnapshotException(request); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetResetModeAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetResetModeAction.java index 2cb8fc847bb62..7c52e086ec43c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetResetModeAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetResetModeAction.java @@ -18,7 +18,7 @@ import org.elasticsearch.xpack.core.action.SetResetModeActionRequest; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.SetResetModeAction; -import org.elasticsearch.xpack.ml.inference.ModelAliasMetadata; +import org.elasticsearch.xpack.core.ml.inference.ModelAliasMetadata; public class TransportSetResetModeAction extends AbstractTransportSetResetModeAction { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java index 4a569b374582a..ecfe4c8aac6c6 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java @@ -56,13 +56,13 @@ import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingInfo; import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingState; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.core.ml.inference.persistence.InferenceIndexConstants; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.IndexLocation; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.ml.utils.TransportVersionUtils; import org.elasticsearch.xpack.ml.MachineLearning; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentService; import org.elasticsearch.xpack.ml.inference.persistence.TrainedModelDefinitionDoc; import org.elasticsearch.xpack.ml.notifications.InferenceAuditor; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopTrainedModelDeploymentAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopTrainedModelDeploymentAction.java index 6e90d097d1e9f..5b2c3fdeddf43 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopTrainedModelDeploymentAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopTrainedModelDeploymentAction.java @@ -35,10 +35,10 @@ import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ml.action.StopTrainedModelDeploymentAction; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentClusterService; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.inference.deployment.TrainedModelDeploymentTask; import org.elasticsearch.xpack.ml.notifications.InferenceAuditor; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateModelSnapshotAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateModelSnapshotAction.java index e83a0ee6880a4..3a5a07768a094 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateModelSnapshotAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpdateModelSnapshotAction.java @@ -71,7 +71,8 @@ protected void doExecute( ActionListener listener ) { logger.debug("Received request to update model snapshot [{}] for job [{}]", request.getSnapshotId(), request.getJobId()); - jobResultsProvider.getModelSnapshot(request.getJobId(), request.getSnapshotId(), modelSnapshot -> { + // Even though the quantiles can be large we have to fetch them initially so that the updated document is complete + jobResultsProvider.getModelSnapshot(request.getJobId(), request.getSnapshotId(), true, modelSnapshot -> { if (modelSnapshot == null) { listener.onFailure( new ResourceNotFoundException( @@ -81,8 +82,7 @@ protected void doExecute( } else { Result updatedSnapshot = applyUpdate(request, modelSnapshot); indexModelSnapshot(updatedSnapshot, b -> { - // The quantiles can be large, and totally dominate the output - - // it's clearer to remove them + // The quantiles can be large, and totally dominate the output - it's clearer to remove them at this stage listener.onResponse( new UpdateModelSnapshotAction.Response(new ModelSnapshot.Builder(updatedSnapshot.result).setQuantiles(null).build()) ); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpgradeJobModelSnapshotAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpgradeJobModelSnapshotAction.java index 3f6193c124a9a..15c1d53f7bdf8 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpgradeJobModelSnapshotAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpgradeJobModelSnapshotAction.java @@ -223,6 +223,7 @@ protected void masterOperation(Task task, Request request, ClusterState state, A jobResultsProvider.getModelSnapshot( request.getJobId(), request.getSnapshotId(), + false, getSnapshotHandler::onResponse, getSnapshotHandler::onFailure ); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointAggregator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointAggregator.java index 613b36882f919..456ace5c1e08c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointAggregator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointAggregator.java @@ -7,13 +7,15 @@ package org.elasticsearch.xpack.ml.aggs.changepoint; +import org.apache.commons.math3.distribution.UniformRealDistribution; import org.apache.commons.math3.exception.NotStrictlyPositiveException; +import org.apache.commons.math3.random.RandomGeneratorFactory; import org.apache.commons.math3.special.Beta; import org.apache.commons.math3.stat.inference.KolmogorovSmirnovTest; import org.apache.commons.math3.stat.regression.SimpleRegression; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.core.Tuple; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.search.aggregations.AggregationReduceContext; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.InternalAggregation; @@ -23,9 +25,9 @@ import org.elasticsearch.xpack.ml.aggs.MlAggsHelper; import java.util.Arrays; -import java.util.HashSet; import java.util.Map; import java.util.Optional; +import java.util.Random; import java.util.Set; import java.util.function.IntToDoubleFunction; import java.util.stream.IntStream; @@ -40,15 +42,42 @@ public class ChangePointAggregator extends SiblingPipelineAggregator { static final double P_VALUE_THRESHOLD = 0.025; private static final int MINIMUM_BUCKETS = 10; private static final int MAXIMUM_CANDIDATE_CHANGE_POINTS = 1000; + private static final int MAXIMUM_SAMPLE_SIZE_FOR_KS_TEST = 500; private static final KolmogorovSmirnovTest KOLMOGOROV_SMIRNOV_TEST = new KolmogorovSmirnovTest(); - static Tuple candidateChangePoints(double[] values) { + private static int lowerBound(int[] x, int start, int end, int xs) { + int retVal = Arrays.binarySearch(x, start, end, xs); + if (retVal < 0) { + retVal = -1 - retVal; + } + return retVal; + } + + private record SampleData(double[] values, double[] weights, Integer[] changePoints) {} + + private record DataStats(double nValues, double mean, double var, int nCandidateChangePoints) { + boolean varianceZeroToWorkingPrecision() { + // Our variance calculation is only accurate to ulp(length * mean)^(1/2), + // i.e. we compute it using the difference of squares method and don't use + // the Kahan correction. We treat anything as zero to working precision as + // zero. We should at some point switch to a more numerically stable approach + // for computing data statistics. + return var < Math.sqrt(Math.ulp(2.0 * nValues * mean)); + } + + @Override + public String toString() { + return "DataStats{nValues=" + nValues + ", mean=" + mean + ", var=" + var + ", nCandidates=" + nCandidateChangePoints + "}"; + } + } + + static int[] candidateChangePoints(double[] values) { int minValues = Math.max((int) (0.1 * values.length + 0.5), MINIMUM_BUCKETS); if (values.length - 2 * minValues <= MAXIMUM_CANDIDATE_CHANGE_POINTS) { - return Tuple.tuple(IntStream.range(minValues, values.length - minValues).toArray(), 1); + return IntStream.range(minValues, values.length - minValues).toArray(); } else { int step = (int) Math.ceil((double) (values.length - 2 * minValues) / MAXIMUM_CANDIDATE_CHANGE_POINTS); - return Tuple.tuple(IntStream.range(minValues, values.length - minValues).filter(i -> i % step == 0).toArray(), step); + return IntStream.range(minValues, values.length - minValues).filter(i -> i % step == 0).toArray(); } } @@ -87,8 +116,8 @@ public InternalAggregation doReduce(Aggregations aggregations, AggregationReduce ) ); } - Tuple candidatePoints = candidateChangePoints(bucketValues.getValues()); - ChangeType changeType = changePValue(bucketValues, candidatePoints, P_VALUE_THRESHOLD); + int[] candidatePoints = candidateChangePoints(bucketValues.getValues()); + ChangeType changeType = testForChange(bucketValues, candidatePoints, P_VALUE_THRESHOLD); if (changeType.pValue() > P_VALUE_THRESHOLD) { try { SpikeAndDipDetector detect = new SpikeAndDipDetector(bucketValues.getValues()); @@ -107,170 +136,115 @@ public InternalAggregation doReduce(Aggregations aggregations, AggregationReduce return new InternalChangePointAggregation(name(), metadata(), changePointBucket, changeType); } - static ChangeType changePValue( - MlAggsHelper.DoubleBucketValues bucketValues, - Tuple candidateChangePointsAndStep, - double pValueThreshold - ) { + static ChangeType testForChange(MlAggsHelper.DoubleBucketValues bucketValues, int[] candidateChangePoints, double pValueThreshold) { double[] timeWindow = bucketValues.getValues(); - double totalUnweightedVariance = RunningStats.from(timeWindow, i -> 1.0).variance(); - ChangeType changeType = new ChangeType.Stationary(); - if (totalUnweightedVariance == 0.0) { - return changeType; - } + return testForChange(timeWindow, candidateChangePoints, pValueThreshold).changeType(bucketValues, slope(timeWindow)); + } + + static TestStats testForChange(double[] timeWindow, int[] candidateChangePoints, double pValueThreshold) { + + logger.trace("timeWindow: [{}]", Arrays.toString(timeWindow)); + double[] timeWindowWeights = outlierWeights(timeWindow); - int[] candidateChangePoints = candidateChangePointsAndStep.v1(); - int step = candidateChangePointsAndStep.v2(); - double totalVariance = RunningStats.from(timeWindow, i -> timeWindowWeights[i]).variance(); - double vNull = totalVariance; - if (totalVariance == 0.0) { - return changeType; - } - double n = timeWindow.length; - double dfNull = n - 1; - LeastSquaresOnlineRegression allLeastSquares = new LeastSquaresOnlineRegression(2); - for (int i = 0; i < timeWindow.length; i++) { - allLeastSquares.add(i, timeWindow[i], timeWindowWeights[i]); - } - double rValue = allLeastSquares.rSquared(); - - double vAlt = totalVariance * (1 - Math.abs(rValue)); - double dfAlt = n - 3; - double pValueVsNull = fTestPValue(vNull, dfNull, vAlt, dfAlt); - if (pValueVsNull < pValueThreshold && Math.abs(rValue) >= 0.5) { - double pValueVsStationary = fTestPValue(totalVariance, n - 1, vAlt, dfAlt); - SimpleRegression regression = new SimpleRegression(); - for (int i = 0; i < timeWindow.length; i++) { - regression.addData(i, timeWindow[i]); - } - double slope = regression.getSlope(); - changeType = new ChangeType.NonStationary(pValueVsStationary, rValue, slope < 0 ? "decreasing" : "increasing"); - vNull = vAlt; - dfNull = dfAlt; - } - RunningStats lowerRange = new RunningStats(); - RunningStats upperRange = new RunningStats(); - // Initialize running stats so that they are only missing the individual changepoint values - upperRange.addValues(timeWindow, i -> timeWindowWeights[i], candidateChangePoints[0], timeWindow.length); - lowerRange.addValues(timeWindow, i -> timeWindowWeights[i], 0, candidateChangePoints[0]); - vAlt = Double.MAX_VALUE; - Set discoveredChangePoints = new HashSet<>(3, 1.0f); - int changePoint = candidateChangePoints[candidateChangePoints.length - 1] + 1; - for (int cp : candidateChangePoints) { - double maybeVAlt = (cp * lowerRange.variance() + (n - cp) * upperRange.variance()) / n; - if (maybeVAlt < vAlt) { - vAlt = maybeVAlt; - changePoint = cp; - } - lowerRange.addValues(timeWindow, i -> timeWindowWeights[i], cp, cp + step); - upperRange.removeValues(timeWindow, i -> timeWindowWeights[i], cp, cp + step); - } - discoveredChangePoints.add(changePoint); - dfAlt = n - 2; + logger.trace("timeWindowWeights: [{}]", Arrays.toString(timeWindowWeights)); + RunningStats dataRunningStats = RunningStats.from(timeWindow, i -> timeWindowWeights[i]); + DataStats dataStats = new DataStats( + dataRunningStats.count(), + dataRunningStats.mean(), + dataRunningStats.variance(), + candidateChangePoints.length + ); + logger.trace("dataStats: [{}]", dataStats); + TestStats stationary = new TestStats(Type.STATIONARY, 1.0, dataStats.var(), 1.0, dataStats); - pValueVsNull = independentTrialsPValue(fTestPValue(vNull, dfNull, vAlt, dfAlt), candidateChangePoints.length); - if (pValueVsNull < pValueThreshold) { - changeType = new ChangeType.StepChange(pValueVsNull, bucketValues.getBucketIndex(changePoint)); - vNull = vAlt; - dfNull = dfAlt; + if (dataStats.varianceZeroToWorkingPrecision()) { + return stationary; } - VarianceAndRValue vAndR = new VarianceAndRValue(Double.MAX_VALUE, Double.MAX_VALUE); - changePoint = candidateChangePoints[candidateChangePoints.length - 1] + 1; - lowerRange = new RunningStats(); - upperRange = new RunningStats(); - // Initialize running stats so that they are only missing the individual changepoint values - upperRange.addValues(timeWindow, i -> timeWindowWeights[i], candidateChangePoints[0], timeWindow.length); - lowerRange.addValues(timeWindow, i -> timeWindowWeights[i], 0, candidateChangePoints[0]); - LeastSquaresOnlineRegression lowerLeastSquares = new LeastSquaresOnlineRegression(2); - LeastSquaresOnlineRegression upperLeastSquares = new LeastSquaresOnlineRegression(2); - for (int i = 0; i < candidateChangePoints[0]; i++) { - lowerLeastSquares.add(i, timeWindow[i], timeWindowWeights[i]); - } - for (int i = candidateChangePoints[0], x = 0; i < timeWindow.length; i++, x++) { - upperLeastSquares.add(x, timeWindow[i], timeWindowWeights[i]); - } - int upperMovingWindow = 0; - for (int cp : candidateChangePoints) { - double lowerRangeVar = lowerRange.variance(); - double upperRangeVar = upperRange.variance(); - double rv1 = lowerLeastSquares.rSquared(); - double rv2 = upperLeastSquares.rSquared(); - double v1 = lowerRangeVar * (1 - Math.abs(rv1)); - double v2 = upperRangeVar * (1 - Math.abs(rv2)); - VarianceAndRValue varianceAndRValue = new VarianceAndRValue((cp * v1 + (n - cp) * v2) / n, (cp * rv1 + (n - cp) * rv2) / n); - if (varianceAndRValue.compareTo(vAndR) < 0) { - vAndR = varianceAndRValue; - changePoint = cp; - } - for (int i = 0; i < step; i++) { - lowerRange.addValue(timeWindow[i + cp], timeWindowWeights[i + cp]); - upperRange.removeValue(timeWindow[i + cp], timeWindowWeights[i + cp]); - lowerLeastSquares.add(i + cp, timeWindow[i + cp], timeWindowWeights[i + cp]); - upperLeastSquares.remove(i + upperMovingWindow, timeWindow[i + cp], timeWindowWeights[i + cp]); - upperMovingWindow++; + TestStats trendVsStationary = testTrendVs(stationary, timeWindow, timeWindowWeights); + logger.trace("trend vs stationary: [{}]", trendVsStationary); + + TestStats best = stationary; + Set discoveredChangePoints = Sets.newHashSetWithExpectedSize(4); + if (trendVsStationary.accept(pValueThreshold)) { + // Check if there is a change in the trend. + TestStats trendChangeVsTrend = testTrendChangeVs(trendVsStationary, timeWindow, timeWindowWeights, candidateChangePoints); + discoveredChangePoints.add(trendChangeVsTrend.changePoint()); + logger.trace("trend change vs trend: [{}]", trendChangeVsTrend); + + if (trendChangeVsTrend.accept(pValueThreshold)) { + // Check if modeling a trend change adds much over modeling a step change. + best = testVsStepChange(trendChangeVsTrend, timeWindow, timeWindowWeights, candidateChangePoints, pValueThreshold); + } else { + best = trendVsStationary; } - } - discoveredChangePoints.add(changePoint); - dfAlt = n - 6; - pValueVsNull = independentTrialsPValue(fTestPValue(vNull, dfNull, vAndR.variance, dfAlt), candidateChangePoints.length); - if (pValueVsNull < pValueThreshold && Math.abs(vAndR.rValue) >= 0.5) { - double pValueVsStationary = independentTrialsPValue( - fTestPValue(totalVariance, n - 1, vAndR.variance, dfAlt), - candidateChangePoints.length - ); - changeType = new ChangeType.TrendChange(pValueVsStationary, vAndR.rValue, bucketValues.getBucketIndex(changePoint)); - } - - if (changeType.pValue() > 1e-5) { - double diff = 0.0; - changePoint = -1; - lowerRange = new RunningStats(); - upperRange = new RunningStats(); - // Initialize running stats so that they are only missing the individual changepoint values - upperRange.addValues(timeWindow, i -> timeWindowWeights[i], candidateChangePoints[0], timeWindow.length); - lowerRange.addValues(timeWindow, i -> timeWindowWeights[i], 0, candidateChangePoints[0]); - for (int cp : candidateChangePoints) { - double otherDiff = Math.min(cp, timeWindow.length - cp) * (0.9 * Math.abs(lowerRange.mean() - upperRange.mean())) + 0.1 - * Math.abs(lowerRange.std() - upperRange.std()); - if (otherDiff >= diff) { - changePoint = cp; - diff = otherDiff; + } else { + // Check if there is a step change. + TestStats stepChangeVsStationary = testStepChangeVs(stationary, timeWindow, timeWindowWeights, candidateChangePoints); + discoveredChangePoints.add(stepChangeVsStationary.changePoint()); + logger.trace("step change vs stationary: [{}]", stepChangeVsStationary); + + if (stepChangeVsStationary.accept(pValueThreshold)) { + // Check if modeling a trend change adds much over modeling a step change. + TestStats trendChangeVsStepChange = testTrendChangeVs( + stepChangeVsStationary, + timeWindow, + timeWindowWeights, + candidateChangePoints + ); + discoveredChangePoints.add(stepChangeVsStationary.changePoint()); + logger.trace("trend change vs step change: [{}]", trendChangeVsStepChange); + if (trendChangeVsStepChange.accept(pValueThreshold)) { + best = trendChangeVsStepChange; + } else { + best = stepChangeVsStationary; } - lowerRange.addValues(timeWindow, i -> timeWindowWeights[i], cp, cp + step); - upperRange.removeValues(timeWindow, i -> timeWindowWeights[i], cp, cp + step); - } - discoveredChangePoints.add(changePoint); - double pValue = 1; - for (int i : discoveredChangePoints) { - double[] x = Arrays.copyOfRange(timeWindow, 0, i); - double[] y = Arrays.copyOfRange(timeWindow, i, timeWindow.length); - double statistic = KOLMOGOROV_SMIRNOV_TEST.kolmogorovSmirnovStatistic(x, y); - double ksTestPValue = x.length > 10_000 - ? KOLMOGOROV_SMIRNOV_TEST.approximateP(statistic, x.length, y.length) - : KOLMOGOROV_SMIRNOV_TEST.exactP(statistic, x.length, y.length, false); - if (ksTestPValue < pValue) { - changePoint = i; - pValue = ksTestPValue; + + } else { + // Check if there is a trend change. + TestStats trendChangeVsStationary = testTrendChangeVs(stationary, timeWindow, timeWindowWeights, candidateChangePoints); + discoveredChangePoints.add(stepChangeVsStationary.changePoint()); + logger.trace("trend change vs stationary: [{}]", trendChangeVsStationary); + if (trendChangeVsStationary.accept(pValueThreshold)) { + best = trendChangeVsStationary; } } - pValue = independentTrialsPValue(pValue, candidateChangePoints.length); - if (pValue < Math.min(pValueThreshold, 0.1 * changeType.pValue())) { - changeType = new ChangeType.DistributionChange(pValue, bucketValues.getBucketIndex(changePoint)); + } + + logger.trace("best: [{}]", best.pValueVsStationary()); + + // We're not very confident in the change point, so check if a distribution change + // fits the data better. + if (best.pValueVsStationary() > 1e-5) { + TestStats distChange = testDistributionChange( + dataStats, + timeWindow, + timeWindowWeights, + candidateChangePoints, + discoveredChangePoints + ); + logger.trace("distribution change: [{}]", distChange); + if (distChange.pValue() < Math.min(pValueThreshold, 0.1 * best.pValueVsStationary())) { + best = distChange; } } - return changeType; + + return best; } static double[] outlierWeights(double[] values) { int i = (int) Math.ceil(0.025 * values.length); double[] weights = Arrays.copyOf(values, values.length); Arrays.sort(weights); + // We have to be careful here if we have a lot of duplicate values. To avoid marking + // runs of duplicates as outliers we define outliers to be the smallest (largest) + // value strictly less (greater) than the value at i (values.length - i - 1). This + // means if i lands in a run of duplicates the entire run will be marked as inliers. double a = weights[i]; - double b = weights[values.length - i]; + double b = weights[values.length - i - 1]; for (int j = 0; j < values.length; j++) { - if (values[j] < b && values[j] >= a) { + if (values[j] <= b && values[j] >= a) { weights[j] = 1.0; } else { weights[j] = 0.01; @@ -279,22 +253,303 @@ static double[] outlierWeights(double[] values) { return weights; } + static double slope(double[] values) { + SimpleRegression regression = new SimpleRegression(); + for (int i = 0; i < values.length; i++) { + regression.addData(i, values[i]); + } + return regression.getSlope(); + } + static double independentTrialsPValue(double pValue, int nTrials) { return pValue > 1e-10 ? 1.0 - Math.pow(1.0 - pValue, nTrials) : nTrials * pValue; } - static double fTestPValue(double vNull, double dfNull, double varianceAlt, double dfAlt) { - if (varianceAlt == vNull) { + static TestStats testTrendVs(TestStats H0, double[] values, double[] weights) { + LeastSquaresOnlineRegression allLeastSquares = new LeastSquaresOnlineRegression(2); + for (int i = 0; i < values.length; i++) { + allLeastSquares.add(i, values[i], weights[i]); + } + double vTrend = H0.dataStats().var() * (1.0 - allLeastSquares.rSquared()); + double pValue = fTestNestedPValue(H0.dataStats().nValues(), H0.var(), H0.nParams(), vTrend, 3.0); + return new TestStats(Type.NON_STATIONARY, pValue, vTrend, 3.0, H0.dataStats()); + } + + static TestStats testStepChangeVs(TestStats H0, double[] values, double[] weights, int[] candidateChangePoints) { + + double vStep = Double.MAX_VALUE; + int changePoint = -1; + + // Initialize running stats so that they are only missing the individual changepoint values + RunningStats lowerRange = new RunningStats(); + RunningStats upperRange = new RunningStats(); + upperRange.addValues(values, i -> weights[i], candidateChangePoints[0], values.length); + lowerRange.addValues(values, i -> weights[i], 0, candidateChangePoints[0]); + double mean = H0.dataStats().mean(); + int last = candidateChangePoints[0]; + for (int cp : candidateChangePoints) { + lowerRange.addValues(values, i -> weights[i], last, cp); + upperRange.removeValues(values, i -> weights[i], last, cp); + last = cp; + double nl = lowerRange.count(); + double nu = upperRange.count(); + double ml = lowerRange.mean(); + double mu = upperRange.mean(); + double vl = lowerRange.variance(); + double vu = upperRange.variance(); + double v = (nl * vl + nu * vu) / (nl + nu); + if (v < vStep) { + vStep = v; + changePoint = cp; + } + } + + double pValue = independentTrialsPValue( + fTestNestedPValue(H0.dataStats().nValues(), H0.var(), H0.nParams(), vStep, 2.0), + candidateChangePoints.length + ); + + return new TestStats(Type.STEP_CHANGE, pValue, vStep, 2.0, changePoint, H0.dataStats()); + } + + static TestStats testTrendChangeVs(TestStats H0, double[] values, double[] weights, int[] candidateChangePoints) { + + double vChange = Double.MAX_VALUE; + int changePoint = -1; + + // Initialize running stats so that they are only missing the individual changepoint values + RunningStats lowerRange = new RunningStats(); + RunningStats upperRange = new RunningStats(); + lowerRange.addValues(values, i -> weights[i], 0, candidateChangePoints[0]); + upperRange.addValues(values, i -> weights[i], candidateChangePoints[0], values.length); + LeastSquaresOnlineRegression lowerLeastSquares = new LeastSquaresOnlineRegression(2); + LeastSquaresOnlineRegression upperLeastSquares = new LeastSquaresOnlineRegression(2); + int first = candidateChangePoints[0]; + int last = candidateChangePoints[0]; + for (int i = 0; i < candidateChangePoints[0]; i++) { + lowerLeastSquares.add(i, values[i], weights[i]); + } + for (int i = candidateChangePoints[0]; i < values.length; i++) { + upperLeastSquares.add(i - first, values[i], weights[i]); + } + for (int cp : candidateChangePoints) { + for (int i = last; i < cp; i++) { + lowerRange.addValue(values[i], weights[i]); + upperRange.removeValue(values[i], weights[i]); + lowerLeastSquares.add(i, values[i], weights[i]); + upperLeastSquares.remove(i - first, values[i], weights[i]); + } + last = cp; + double nl = lowerRange.count(); + double nu = upperRange.count(); + double rl = lowerLeastSquares.rSquared(); + double ru = upperLeastSquares.rSquared(); + double vl = lowerRange.variance() * (1.0 - rl); + double vu = upperRange.variance() * (1.0 - ru); + double v = (nl * vl + nu * vu) / (nl + nu); + if (v < vChange) { + vChange = v; + changePoint = cp; + } + } + + double pValue = independentTrialsPValue( + fTestNestedPValue(H0.dataStats().nValues(), H0.var(), H0.nParams(), vChange, 6.0), + candidateChangePoints.length + ); + + return new TestStats(Type.TREND_CHANGE, pValue, vChange, 6.0, changePoint, H0.dataStats()); + } + + static TestStats testVsStepChange( + TestStats trendChange, + double[] values, + double[] weights, + int[] candidateChangePoints, + double pValueThreshold + ) { + DataStats dataStats = trendChange.dataStats(); + TestStats stationary = new TestStats(Type.STATIONARY, 1.0, dataStats.var(), 1.0, dataStats); + TestStats stepChange = testStepChangeVs(stationary, values, weights, candidateChangePoints); + double n = dataStats.nValues(); + double pValue = fTestNestedPValue(n, stepChange.var(), 2.0, trendChange.var(), 6.0); + return pValue < pValueThreshold ? trendChange : stepChange; + } + + static double fTestNestedPValue(double n, double vNull, double pNull, double vAlt, double pAlt) { + if (vAlt == vNull) { return 1.0; } - if (varianceAlt == 0.0) { + if (vAlt == 0.0) { return 0.0; } - double F = dfAlt / dfNull * vNull / varianceAlt; - double sf = fDistribSf(dfNull, dfAlt, F); + double F = (vNull - vAlt) / (pAlt - pNull) * (n - pAlt) / vAlt; + double sf = fDistribSf(pAlt - pNull, n - pAlt, F); return Math.min(2 * sf, 1.0); } + static SampleData sample(double[] values, double[] weights, Set changePoints) { + Integer[] adjChangePoints = changePoints.toArray(new Integer[changePoints.size()]); + if (values.length <= MAXIMUM_SAMPLE_SIZE_FOR_KS_TEST) { + return new SampleData(values, weights, adjChangePoints); + } + + // Just want repeatable random numbers. + Random rng = new Random(126832678); + UniformRealDistribution uniform = new UniformRealDistribution(RandomGeneratorFactory.createRandomGenerator(rng), 0.0, 0.99999); + + // Fisher–Yates shuffle (why isn't this in Arrays?). + int[] choice = IntStream.range(0, values.length).toArray(); + for (int i = 0; i < MAXIMUM_SAMPLE_SIZE_FOR_KS_TEST; ++i) { + int index = i + (int) Math.floor(uniform.sample() * (values.length - i)); + int tmp = choice[i]; + choice[i] = choice[index]; + choice[index] = tmp; + } + + double[] sample = new double[MAXIMUM_SAMPLE_SIZE_FOR_KS_TEST]; + double[] sampleWeights = new double[MAXIMUM_SAMPLE_SIZE_FOR_KS_TEST]; + Arrays.sort(choice, 0, MAXIMUM_SAMPLE_SIZE_FOR_KS_TEST); + for (int i = 0; i < MAXIMUM_SAMPLE_SIZE_FOR_KS_TEST; ++i) { + sample[i] = values[choice[i]]; + sampleWeights[i] = weights[choice[i]]; + } + for (int i = 0; i < adjChangePoints.length; ++i) { + adjChangePoints[i] = lowerBound(choice, 0, MAXIMUM_SAMPLE_SIZE_FOR_KS_TEST, adjChangePoints[i].intValue()); + } + + return new SampleData(sample, sampleWeights, adjChangePoints); + } + + static TestStats testDistributionChange( + DataStats stats, + double[] values, + double[] weights, + int[] candidateChangePoints, + Set discoveredChangePoints + ) { + + double maxDiff = 0.0; + int changePoint = -1; + + // Initialize running stats so that they are only missing the individual changepoint values + RunningStats lowerRange = new RunningStats(); + RunningStats upperRange = new RunningStats(); + upperRange.addValues(values, i -> weights[i], candidateChangePoints[0], values.length); + lowerRange.addValues(values, i -> weights[i], 0, candidateChangePoints[0]); + int last = candidateChangePoints[0]; + for (int cp : candidateChangePoints) { + lowerRange.addValues(values, i -> weights[i], last, cp); + upperRange.removeValues(values, i -> weights[i], last, cp); + last = cp; + double scale = Math.min(cp, values.length - cp); + double meanDiff = Math.abs(lowerRange.mean() - upperRange.mean()); + double stdDiff = Math.abs(lowerRange.std() - upperRange.std()); + double diff = scale * (meanDiff + stdDiff); + if (diff >= maxDiff) { + maxDiff = diff; + changePoint = cp; + } + } + discoveredChangePoints.add(changePoint); + + // Note that statistical tests become increasingly powerful as the number of samples + // increases. We are not interested in detecting visually small distribution changes + // in splits of long windows so we randomly downsample the data if it is too large + // before we run the tests. + SampleData sampleData = sample(values, weights, discoveredChangePoints); + final double[] sampleValues = sampleData.values(); + final double[] sampleWeights = sampleData.weights(); + + double pValue = 1; + for (int cp : sampleData.changePoints()) { + double[] x = Arrays.copyOfRange(sampleValues, 0, cp); + double[] y = Arrays.copyOfRange(sampleValues, cp, sampleValues.length); + double statistic = KOLMOGOROV_SMIRNOV_TEST.kolmogorovSmirnovStatistic(x, y); + double ksTestPValue = KOLMOGOROV_SMIRNOV_TEST.exactP(statistic, x.length, y.length, false); + if (ksTestPValue < pValue) { + changePoint = cp; + pValue = ksTestPValue; + } + } + + // We start to get false positives if we have too many candidate change points. This + // is the classic p-value hacking problem. However, the Sidak style correction we use + // elsewhere is too conservative because test statistics for different split positions + // are strongly correlated. We assume that we have some effective number of independent + // trials equal to f * n for f < 1. Simulation shows the f = 1/50 yields low Type I + // error rates. + pValue = independentTrialsPValue(pValue, (sampleValues.length + 49) / 50); + logger.trace("distribution change p-value: [{}]", pValue); + + return new TestStats(Type.DISTRIBUTION_CHANGE, pValue, changePoint, stats); + } + + enum Type { + STATIONARY, + NON_STATIONARY, + STEP_CHANGE, + TREND_CHANGE, + DISTRIBUTION_CHANGE + } + + record TestStats(Type type, double pValue, double var, double nParams, int changePoint, DataStats dataStats) { + TestStats(Type type, double pValue, int changePoint, DataStats dataStats) { + this(type, pValue, 0.0, 0.0, changePoint, dataStats); + } + + TestStats(Type type, double pValue, double var, double nParams, DataStats dataStats) { + this(type, pValue, var, nParams, -1, dataStats); + } + + boolean accept(double pValueThreshold) { + // Check the change is: + // 1. Statistically significant. + // 2. That we explain enough of the data variance overall. + return pValue < pValueThreshold && rSquared() >= 0.5; + } + + double rSquared() { + return 1.0 - var / dataStats.var(); + } + + double pValueVsStationary() { + return independentTrialsPValue( + fTestNestedPValue(dataStats.nValues(), dataStats.var(), 1.0, var, nParams), + dataStats.nCandidateChangePoints() + ); + } + + ChangeType changeType(MlAggsHelper.DoubleBucketValues bucketValues, double slope) { + switch (type) { + case STATIONARY: + return new ChangeType.Stationary(); + case NON_STATIONARY: + return new ChangeType.NonStationary(pValueVsStationary(), rSquared(), slope < 0.0 ? "decreasing" : "increasing"); + case STEP_CHANGE: + return new ChangeType.StepChange(pValueVsStationary(), bucketValues.getBucketIndex(changePoint)); + case TREND_CHANGE: + return new ChangeType.TrendChange(pValueVsStationary(), rSquared(), bucketValues.getBucketIndex(changePoint)); + case DISTRIBUTION_CHANGE: + return new ChangeType.DistributionChange(pValue, changePoint); + } + throw new RuntimeException("Unknown change type [" + type + "]."); + } + + @Override + public String toString() { + return "TestStats{" + + ("type=" + type) + + (", dataStats=" + dataStats) + + (", var=" + var) + + (", rSquared=" + rSquared()) + + (", pValue=" + pValue) + + (", nParams=" + nParams) + + (", changePoint=" + changePoint) + + '}'; + } + } + static class RunningStats { double sumOfSqrs; double sum; @@ -306,14 +561,18 @@ static RunningStats from(double[] values, IntToDoubleFunction weightFunction) { RunningStats() {} - double variance() { - return Math.max((sumOfSqrs - ((sum * sum) / count)) / count, 0.0); + double count() { + return count; } double mean() { return sum / count; } + double variance() { + return Math.max((sumOfSqrs - ((sum * sum) / count)) / count, 0.0); + } + double std() { return Math.sqrt(variance()); } @@ -347,28 +606,11 @@ RunningStats removeValues(double[] value, IntToDoubleFunction weightFunction, in } } - record VarianceAndRValue(double variance, double rValue) implements Comparable { - @Override - public int compareTo(VarianceAndRValue o) { - int v = Double.compare(variance, o.variance); - if (v == 0) { - return Double.compare(rValue, o.rValue); - } - return v; - } - - public VarianceAndRValue min(VarianceAndRValue other) { - if (this.compareTo(other) <= 0) { - return this; - } - return other; - } - } - static double fDistribSf(double numeratorDegreesOfFreedom, double denominatorDegreesOfFreedom, double x) { if (x <= 0) { return 1; - } else if (Double.isInfinite(x) || Double.isNaN(x)) { + } + if (Double.isInfinite(x) || Double.isNaN(x)) { return 0; } @@ -378,5 +620,4 @@ static double fDistribSf(double numeratorDegreesOfFreedom, double denominatorDeg 0.5 * numeratorDegreesOfFreedom ); } - } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/LeastSquaresOnlineRegression.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/LeastSquaresOnlineRegression.java index 077ffa3ba58b5..0cd74b6395b8e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/LeastSquaresOnlineRegression.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/LeastSquaresOnlineRegression.java @@ -16,7 +16,7 @@ class LeastSquaresOnlineRegression { - private static final double SINGLE_VALUE_DECOMPOSITION_EPS = 1e+15; + private static final double SINGLE_VALUE_DECOMPOSITION_MAX_COND = 1e+15; private final RunningStatistics statistics; private final Array2DRowRealMatrix Nx; @@ -33,9 +33,8 @@ class LeastSquaresOnlineRegression { } double rSquared() { - double result = 0; if (statistics.count <= 0.0) { - return result; + return 0.0; } double var = statistics.stats[3 * N - 1] - statistics.stats[2 * N - 1] * statistics.stats[2 * N - 1]; double residualVariance = var; @@ -43,7 +42,7 @@ class LeastSquaresOnlineRegression { boolean done = false; while (--n > 0 && done == false) { if (n == 1) { - return result; + return 0.0; } else if (n == this.N) { OptionalDouble maybeResidualVar = residualVariance(N, Nx, Ny, Nz); if (maybeResidualVar.isPresent()) { @@ -54,7 +53,7 @@ class LeastSquaresOnlineRegression { Array2DRowRealMatrix x = new Array2DRowRealMatrix(n, n); Array2DRowRealMatrix y = new Array2DRowRealMatrix(n, 1); Array2DRowRealMatrix z = new Array2DRowRealMatrix(n, 1); - OptionalDouble maybeResidualVar = residualVariance(N, Nx, Ny, Nz); + OptionalDouble maybeResidualVar = residualVariance(n, x, y, z); if (maybeResidualVar.isPresent()) { residualVariance = maybeResidualVar.getAsDouble(); done = true; @@ -71,7 +70,7 @@ private double[] statisticAdj(double x, double y) { d[i] = xi; d[i + 2 * N - 1] = xi * y; } - for (int i = 3; i < 2 * N - 1; ++i, xi *= x) { + for (int i = N; i < 2 * N - 1; ++i, xi *= x) { d[i] = xi; } d[3 * N - 1] = y * y; @@ -90,6 +89,7 @@ private OptionalDouble residualVariance(int n, Array2DRowRealMatrix x, Array2DRo if (n == 1) { return OptionalDouble.of(statistics.stats[3 * N - 1] - statistics.stats[2 * N - 1] * statistics.stats[2 * N - 1]); } + for (int i = 0; i < n; ++i) { x.setEntry(i, i, statistics.stats[i + i]); y.setEntry(i, 0, statistics.stats[i + 2 * N - 1]); @@ -102,7 +102,7 @@ private OptionalDouble residualVariance(int n, Array2DRowRealMatrix x, Array2DRo SingularValueDecomposition svd = new SingularValueDecomposition(x); double[] singularValues = svd.getSingularValues(); - if (singularValues[0] > SINGLE_VALUE_DECOMPOSITION_EPS * singularValues[n - 1]) { + if (singularValues[0] > SINGLE_VALUE_DECOMPOSITION_MAX_COND * singularValues[n - 1]) { return OptionalDouble.empty(); } RealMatrix r = svd.getSolver().solve(y); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingContext.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingContext.java index 57d0084065fa5..cca59f27d5c76 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingContext.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingContext.java @@ -17,11 +17,11 @@ import org.elasticsearch.xpack.core.ml.inference.assignment.AllocationStatus; import org.elasticsearch.xpack.core.ml.inference.assignment.AssignmentState; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.core.ml.job.config.JobState; import org.elasticsearch.xpack.core.ml.job.snapshot.upgrade.SnapshotUpgradeState; import org.elasticsearch.xpack.core.ml.job.snapshot.upgrade.SnapshotUpgradeTaskParams; import org.elasticsearch.xpack.ml.MachineLearning; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; import java.util.Collection; import java.util.List; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlProcessorAutoscalingDecider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlProcessorAutoscalingDecider.java index 5605a80a7454c..44cf1188b09a2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlProcessorAutoscalingDecider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlProcessorAutoscalingDecider.java @@ -17,7 +17,7 @@ import org.elasticsearch.xpack.autoscaling.capacity.AutoscalingDeciderContext; import org.elasticsearch.xpack.core.ml.inference.assignment.Priority; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.utils.MlProcessors; import java.time.Instant; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsManager.java index 829101b3bd551..223154737df3f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsManager.java @@ -20,12 +20,16 @@ import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.ml.MlStatsIndex; +import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig; +import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsState; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; @@ -45,7 +49,10 @@ import org.elasticsearch.xpack.ml.notifications.DataFrameAnalyticsAuditor; import org.elasticsearch.xpack.ml.utils.persistence.ResultsPersisterService; +import java.util.Map; import java.util.Objects; +import java.util.Optional; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicBoolean; import static org.elasticsearch.core.Strings.format; @@ -72,6 +79,8 @@ public class DataFrameAnalyticsManager { /** Indicates whether the node is shutting down. */ private final AtomicBoolean nodeShuttingDown = new AtomicBoolean(); + private final Map memoryLimitById; + public DataFrameAnalyticsManager( Settings settings, NodeClient client, @@ -84,6 +93,37 @@ public DataFrameAnalyticsManager( ResultsPersisterService resultsPersisterService, ModelLoadingService modelLoadingService, String[] destIndexAllowedSettings + ) { + this( + settings, + client, + threadPool, + clusterService, + configProvider, + processManager, + auditor, + expressionResolver, + resultsPersisterService, + modelLoadingService, + destIndexAllowedSettings, + new ConcurrentHashMap<>() + ); + } + + // For testing only + public DataFrameAnalyticsManager( + Settings settings, + NodeClient client, + ThreadPool threadPool, + ClusterService clusterService, + DataFrameAnalyticsConfigProvider configProvider, + AnalyticsProcessManager processManager, + DataFrameAnalyticsAuditor auditor, + IndexNameExpressionResolver expressionResolver, + ResultsPersisterService resultsPersisterService, + ModelLoadingService modelLoadingService, + String[] destIndexAllowedSettings, + Map memoryLimitById ) { this.settings = Objects.requireNonNull(settings); this.client = Objects.requireNonNull(client); @@ -96,11 +136,13 @@ public DataFrameAnalyticsManager( this.resultsPersisterService = Objects.requireNonNull(resultsPersisterService); this.modelLoadingService = Objects.requireNonNull(modelLoadingService); this.destIndexAllowedSettings = Objects.requireNonNull(destIndexAllowedSettings); + this.memoryLimitById = Objects.requireNonNull(memoryLimitById); } public void execute(DataFrameAnalyticsTask task, ClusterState clusterState, TimeValue masterNodeTimeout) { // With config in hand, determine action to take ActionListener configListener = ActionListener.wrap(config -> { + memoryLimitById.put(config.getId(), config.getModelMemoryLimit()); // Check if existing destination index is incompatible. // If it is, we delete it and start from reindexing. IndexMetadata destIndex = clusterState.getMetadata().index(config.getDest().getIndex()); @@ -224,6 +266,7 @@ private void executeStep(DataFrameAnalyticsTask task, DataFrameAnalyticsConfig c case FINAL -> { LOGGER.info("[{}] Marking task completed", config.getId()); task.markAsCompleted(); + memoryLimitById.remove(config.getId()); } default -> task.markAsFailed(ExceptionsHelper.serverError("Unknown step [{}]", step)); } @@ -291,4 +334,34 @@ public boolean isNodeShuttingDown() { public void markNodeAsShuttingDown() { nodeShuttingDown.set(true); } + + /** + * Get the memory limit for a data frame analytics job if known. + * The memory limit will only be known if it is running on the + * current node, or has been very recently. + * @param id Data frame analytics job ID. + * @return The {@link ByteSizeValue} representing the memory limit, if known, otherwise {@link Optional#empty}. + */ + public Optional getMemoryLimitIfKnown(String id) { + return Optional.ofNullable(memoryLimitById.get(id)); + } + + /** + * Finds the memory used by data frame analytics jobs that are active on the current node. + * This includes jobs that are in the reindexing state, even though they don't have a running + * process, because we want to ensure that when they get as far as needing to run a process + * there'll be space for it. + * @param tasks Persistent tasks metadata. + * @return Memory used by data frame analytics jobs that are active on the current node. + */ + public ByteSizeValue getActiveTaskMemoryUsage(PersistentTasksCustomMetadata tasks) { + long memoryUsedBytes = 0; + for (Map.Entry entry : memoryLimitById.entrySet()) { + DataFrameAnalyticsState state = MlTasks.getDataFrameAnalyticsState(entry.getKey(), tasks); + if (state.consumesMemory()) { + memoryUsedBytes += entry.getValue().getBytes() + DataFrameAnalyticsConfig.PROCESS_MEMORY_OVERHEAD.getBytes(); + } + } + return ByteSizeValue.ofBytes(memoryUsedBytes); + } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/persistence/DataFrameAnalyticsConfigProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/persistence/DataFrameAnalyticsConfigProvider.java index 618cbc075bd99..68dc2bf496a15 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/persistence/DataFrameAnalyticsConfigProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/persistence/DataFrameAnalyticsConfigProvider.java @@ -176,14 +176,12 @@ public void update( // Parse the original config DataFrameAnalyticsConfig originalConfig; - try { - try ( - InputStream stream = getResponse.getSourceAsBytesRef().streamInput(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, stream) - ) { - originalConfig = DataFrameAnalyticsConfig.LENIENT_PARSER.apply(parser, null).build(); - } + try ( + InputStream stream = getResponse.getSourceAsBytesRef().streamInput(); + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(xContentRegistry, LoggingDeprecationHandler.INSTANCE, stream) + ) { + originalConfig = DataFrameAnalyticsConfig.LENIENT_PARSER.apply(parser, null).build(); } catch (IOException e) { listener.onFailure(new ElasticsearchParseException("Failed to parse data frame analytics configuration [" + id + "]", e)); return; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterService.java index a1ac1aa55c320..471615e8bbd6a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterService.java @@ -44,6 +44,7 @@ import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingInfo; import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingState; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.ml.utils.MlPlatformArchitecturesUtil; @@ -67,8 +68,8 @@ import java.util.stream.Collectors; import static org.elasticsearch.core.Strings.format; -import static org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentUtils.NODES_CHANGED_REASON; -import static org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentUtils.createShuttingDownRoute; +import static org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentUtils.NODES_CHANGED_REASON; +import static org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentUtils.createShuttingDownRoute; public class TrainedModelAssignmentClusterService implements ClusterStateListener { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java index fdb007862cfdc..3fac7c387b12e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java @@ -41,6 +41,7 @@ import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingState; import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingStateAndReason; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.core.ml.inference.persistence.InferenceIndexConstants; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.InferenceConfig; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; @@ -66,8 +67,8 @@ import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.xpack.core.ml.MlTasks.TRAINED_MODEL_ASSIGNMENT_TASK_ACTION; import static org.elasticsearch.xpack.core.ml.MlTasks.TRAINED_MODEL_ASSIGNMENT_TASK_TYPE; +import static org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentUtils.NODE_IS_SHUTTING_DOWN; import static org.elasticsearch.xpack.ml.MachineLearning.ML_PYTORCH_MODEL_INFERENCE_FEATURE; -import static org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentUtils.NODE_IS_SHUTTING_DOWN; public class TrainedModelAssignmentNodeService implements ClusterStateListener { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancer.java index 6e6b447fcea3d..a1142796558f4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancer.java @@ -19,6 +19,7 @@ import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingInfo; import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingState; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.inference.assignment.planning.AssignmentPlan; import org.elasticsearch.xpack.ml.inference.assignment.planning.AssignmentPlanner; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentService.java index 1a5b5481704a4..0609e0e6ff916 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentService.java @@ -33,6 +33,7 @@ import org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction; import org.elasticsearch.xpack.core.ml.action.UpdateTrainedModelAssignmentRoutingInfoAction; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import java.util.Objects; import java.util.function.Predicate; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java index f48e67f377817..ef5de2718e702 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java @@ -496,7 +496,14 @@ synchronized void startAndLoad(TrainedModelLocation modelLocation, ActionListene } logger.debug("[{}] start and load", task.getDeploymentId()); - process.set(pyTorchProcessFactory.createProcess(task, executorServiceForProcess, this::onProcessCrash)); + process.set( + pyTorchProcessFactory.createProcess( + task, + executorServiceForProcess, + () -> resultProcessor.awaitCompletion(COMPLETION_TIMEOUT.getMinutes(), TimeUnit.MINUTES), + this::onProcessCrash + ) + ); startTime = Instant.now(); logger.debug("[{}] process started", task.getDeploymentId()); try { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingService.java index e9b7a1a3e137b..5994c61f46297 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingService.java @@ -37,6 +37,7 @@ import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction; +import org.elasticsearch.xpack.core.ml.inference.ModelAliasMetadata; import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; import org.elasticsearch.xpack.core.ml.inference.TrainedModelType; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ClassificationConfig; @@ -46,7 +47,6 @@ import org.elasticsearch.xpack.core.ml.inference.trainedmodel.inference.InferenceDefinition; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.ml.MachineLearning; -import org.elasticsearch.xpack.ml.inference.ModelAliasMetadata; import org.elasticsearch.xpack.ml.inference.TrainedModelStatsService; import org.elasticsearch.xpack.ml.inference.ingest.InferenceProcessor; import org.elasticsearch.xpack.ml.inference.persistence.TrainedModelProvider; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankService.java index 177099801e0a5..bec162d141eba 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ltr/LearningToRankService.java @@ -189,13 +189,13 @@ private QueryExtractorBuilder applyParams(QueryExtractorBuilder queryExtractorBu try { Script script = new Script(ScriptType.INLINE, DEFAULT_TEMPLATE_LANG, templateSource, SCRIPT_OPTIONS, Collections.emptyMap()); String parsedTemplate = scriptService.compile(script, TemplateScript.CONTEXT).newInstance(params).execute(); - XContentParser parser = XContentType.JSON.xContent().createParser(parserConfiguration, parsedTemplate); - - return new QueryExtractorBuilder( - queryExtractorBuilder.featureName(), - QueryProvider.fromXContent(parser, false, INFERENCE_CONFIG_QUERY_BAD_FORMAT), - queryExtractorBuilder.defaultScore() - ); + try (XContentParser parser = XContentType.JSON.xContent().createParser(parserConfiguration, parsedTemplate)) { + return new QueryExtractorBuilder( + queryExtractorBuilder.featureName(), + QueryProvider.fromXContent(parser, false, INFERENCE_CONFIG_QUERY_BAD_FORMAT), + queryExtractorBuilder.defaultScore() + ); + } } catch (GeneralScriptException e) { if (e.getRootCause().getClass().getName().equals(MustacheInvalidParameterException.class.getName())) { // Can't use instanceof since it return unexpected result. diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java index c10510c9eac57..1311a96e0f6a8 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelProvider.java @@ -72,6 +72,7 @@ import org.elasticsearch.xpack.core.ml.MlStatsIndex; import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction; import org.elasticsearch.xpack.core.ml.inference.InferenceToXContentCompressor; +import org.elasticsearch.xpack.core.ml.inference.ModelAliasMetadata; import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; import org.elasticsearch.xpack.core.ml.inference.TrainedModelType; import org.elasticsearch.xpack.core.ml.inference.persistence.InferenceIndexConstants; @@ -85,7 +86,6 @@ import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; import org.elasticsearch.xpack.ml.MachineLearning; -import org.elasticsearch.xpack.ml.inference.ModelAliasMetadata; import org.elasticsearch.xpack.ml.inference.nlp.Vocabulary; import java.io.IOException; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/NativePyTorchProcess.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/NativePyTorchProcess.java index 5908c550d318f..d2e5369ef4bd3 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/NativePyTorchProcess.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/NativePyTorchProcess.java @@ -20,6 +20,7 @@ import java.nio.file.Path; import java.util.Iterator; import java.util.List; +import java.util.concurrent.TimeoutException; import java.util.function.Consumer; public class NativePyTorchProcess extends AbstractNativeProcess implements PyTorchProcess { @@ -27,6 +28,7 @@ public class NativePyTorchProcess extends AbstractNativeProcess implements PyTor private static final String NAME = "pytorch_inference"; private final ProcessResultsParser resultsParser; + private final PyTorchProcessFactory.TimeoutRunnable afterInStreamClose; protected NativePyTorchProcess( String jobId, @@ -34,9 +36,11 @@ protected NativePyTorchProcess( ProcessPipes processPipes, int numberOfFields, List filesToDelete, + PyTorchProcessFactory.TimeoutRunnable afterInStreamClose, Consumer onProcessCrash ) { super(jobId, nativeController, processPipes, numberOfFields, filesToDelete, onProcessCrash); + this.afterInStreamClose = afterInStreamClose; this.resultsParser = new ProcessResultsParser<>(PyTorchResult.PARSER, NamedXContentRegistry.EMPTY); } @@ -71,4 +75,9 @@ public void writeInferenceRequest(BytesReference jsonRequest) throws IOException processInStream().write('\n'); processInStream().flush(); } + + @Override + protected void afterProcessInStreamClose() throws TimeoutException { + afterInStreamClose.run(); + } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/NativePyTorchProcessFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/NativePyTorchProcessFactory.java index 4585ca29e8d14..b26c6720ed179 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/NativePyTorchProcessFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/NativePyTorchProcessFactory.java @@ -56,6 +56,7 @@ void setProcessConnectTimeout(TimeValue processConnectTimeout) { public NativePyTorchProcess createProcess( TrainedModelDeploymentTask task, ExecutorService executorService, + TimeoutRunnable afterInStreamClose, Consumer onProcessCrash ) { ProcessPipes processPipes = new ProcessPipes( @@ -80,6 +81,7 @@ public NativePyTorchProcess createProcess( processPipes, 0, Collections.emptyList(), + afterInStreamClose, onProcessCrash ); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchProcessFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchProcessFactory.java index 07d9e8faa22ea..507c6115a392d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchProcessFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchProcessFactory.java @@ -10,9 +10,19 @@ import org.elasticsearch.xpack.ml.inference.deployment.TrainedModelDeploymentTask; import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeoutException; import java.util.function.Consumer; public interface PyTorchProcessFactory { - PyTorchProcess createProcess(TrainedModelDeploymentTask task, ExecutorService executorService, Consumer onProcessCrash); + interface TimeoutRunnable { + void run() throws TimeoutException; + } + + PyTorchProcess createProcess( + TrainedModelDeploymentTask task, + ExecutorService executorService, + TimeoutRunnable afterInStreamClose, + Consumer onProcessCrash + ); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java index 035f4864ebace..7532ae4317830 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java @@ -478,24 +478,27 @@ private void validate(Job job, JobUpdate jobUpdate, ActionListener handler private void validateModelSnapshotIdUpdate(Job job, String modelSnapshotId, VoidChainTaskExecutor voidChainTaskExecutor) { if (modelSnapshotId != null && ModelSnapshot.isTheEmptySnapshot(modelSnapshotId) == false) { - voidChainTaskExecutor.add(listener -> jobResultsProvider.getModelSnapshot(job.getId(), modelSnapshotId, newModelSnapshot -> { - if (newModelSnapshot == null) { - String message = Messages.getMessage(Messages.REST_NO_SUCH_MODEL_SNAPSHOT, modelSnapshotId, job.getId()); - listener.onFailure(new ResourceNotFoundException(message)); - return; - } - jobResultsProvider.getModelSnapshot(job.getId(), job.getModelSnapshotId(), oldModelSnapshot -> { - if (oldModelSnapshot != null && newModelSnapshot.result.getTimestamp().before(oldModelSnapshot.result.getTimestamp())) { - String message = "Job [" - + job.getId() - + "] has a more recent model snapshot [" - + oldModelSnapshot.result.getSnapshotId() - + "]"; - listener.onFailure(new IllegalArgumentException(message)); + voidChainTaskExecutor.add( + listener -> jobResultsProvider.getModelSnapshot(job.getId(), modelSnapshotId, false, newModelSnapshot -> { + if (newModelSnapshot == null) { + String message = Messages.getMessage(Messages.REST_NO_SUCH_MODEL_SNAPSHOT, modelSnapshotId, job.getId()); + listener.onFailure(new ResourceNotFoundException(message)); + return; } - listener.onResponse(null); - }, listener::onFailure); - }, listener::onFailure)); + jobResultsProvider.getModelSnapshot(job.getId(), job.getModelSnapshotId(), false, oldModelSnapshot -> { + if (oldModelSnapshot != null + && newModelSnapshot.result.getTimestamp().before(oldModelSnapshot.result.getTimestamp())) { + String message = "Job [" + + job.getId() + + "] has a more recent model snapshot [" + + oldModelSnapshot.result.getSnapshotId() + + "]"; + listener.onFailure(new IllegalArgumentException(message)); + } + listener.onResponse(null); + }, listener::onFailure); + }, listener::onFailure) + ); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/NodeLoadDetector.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/NodeLoadDetector.java index 548c95d1ddd50..f2bf180943b82 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/NodeLoadDetector.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/NodeLoadDetector.java @@ -16,10 +16,10 @@ import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingInfo; import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingState; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.core.ml.utils.MemoryTrackedTaskState; import org.elasticsearch.xpack.core.ml.utils.MlTaskParams; import org.elasticsearch.xpack.ml.MachineLearning; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.process.MlMemoryTracker; import org.elasticsearch.xpack.ml.utils.NativeMemoryCalculator; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java index 2c78f9e37fa19..323214896bf7d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java @@ -1257,11 +1257,13 @@ public BatchedResultsIterator newBatchedInfluencersIterator(String j } /** - * Get a job's model snapshot by its id + * Get a job's model snapshot by its id. + * Quantiles should only be included when strictly required, because they can be very large and consume a lot of heap. */ public void getModelSnapshot( String jobId, @Nullable String modelSnapshotId, + boolean includeQuantiles, Consumer> handler, Consumer errorHandler ) { @@ -1271,6 +1273,9 @@ public void getModelSnapshot( } String resultsIndex = AnomalyDetectorsIndex.jobResultsAliasedName(jobId); SearchRequestBuilder search = createDocIdSearch(resultsIndex, ModelSnapshot.documentId(jobId, modelSnapshotId)); + if (includeQuantiles == false) { + search.setFetchSource(null, ModelSnapshot.QUANTILES.getPreferredName()); + } searchSingleResult( jobId, ModelSnapshot.TYPE.getPreferredName(), diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java index 2c13d1c4ca7fd..2f7179b194024 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java @@ -1068,4 +1068,24 @@ public void clusterChanged(ClusterChangedEvent event) { resetInProgress = MlMetadata.getMlMetadata(event.state()).isResetMode(); } + /** + * Finds the memory used by open autodetect processes on the current node. + * @return Memory used by open autodetect processes on the current node. + */ + public ByteSizeValue getOpenProcessMemoryUsage() { + long memoryUsedBytes = 0; + for (ProcessContext processContext : processByAllocation.values()) { + if (processContext.getState() == ProcessContext.ProcessStateName.RUNNING) { + ModelSizeStats modelSizeStats = processContext.getAutodetectCommunicator().getModelSizeStats(); + ModelSizeStats.AssignmentMemoryBasis basis = modelSizeStats.getAssignmentMemoryBasis(); + memoryUsedBytes += switch (basis != null ? basis : ModelSizeStats.AssignmentMemoryBasis.MODEL_MEMORY_LIMIT) { + case MODEL_MEMORY_LIMIT -> modelSizeStats.getModelBytesMemoryLimit(); + case CURRENT_MODEL_BYTES -> modelSizeStats.getModelBytes(); + case PEAK_MODEL_BYTES -> Optional.ofNullable(modelSizeStats.getPeakModelBytes()).orElse(modelSizeStats.getModelBytes()); + }; + memoryUsedBytes += Job.PROCESS_MEMORY_OVERHEAD.getBytes(); + } + } + return ByteSizeValue.ofBytes(memoryUsedBytes); + } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/snapshot/upgrader/SnapshotUpgradeTaskExecutor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/snapshot/upgrader/SnapshotUpgradeTaskExecutor.java index 69b926876302a..cc3f8f0dd1e67 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/snapshot/upgrader/SnapshotUpgradeTaskExecutor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/snapshot/upgrader/SnapshotUpgradeTaskExecutor.java @@ -329,6 +329,6 @@ private void deleteSnapshotAndFailTask(AllocatedPersistentTask task, String jobI ); }); - jobResultsProvider.getModelSnapshot(jobId, snapshotId, modelSnapshotListener::onResponse, modelSnapshotListener::onFailure); + jobResultsProvider.getModelSnapshot(jobId, snapshotId, false, modelSnapshotListener::onResponse, modelSnapshotListener::onFailure); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/AbstractNativeProcess.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/AbstractNativeProcess.java index 8ea85208a2de8..dd71800bd4f90 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/AbstractNativeProcess.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/AbstractNativeProcess.java @@ -223,7 +223,7 @@ public void close() throws IOException { * Implementations can override this if they need to perform extra processing * immediately after the native process's input stream is closed. */ - protected void afterProcessInStreamClose() { + protected void afterProcessInStreamClose() throws TimeoutException { // no-op by default } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/MlMemoryTracker.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/MlMemoryTracker.java index 5ba577eb90ab7..3f502c4d95cc9 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/MlMemoryTracker.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/MlMemoryTracker.java @@ -26,12 +26,12 @@ import org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.core.ml.job.config.AnalysisLimits; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.snapshot.upgrade.SnapshotUpgradeTaskParams; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.dataframe.persistence.DataFrameAnalyticsConfigProvider; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.job.JobManager; import org.elasticsearch.xpack.ml.job.persistence.JobResultsProvider; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/queries/TokenPruningConfig.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/queries/TokenPruningConfig.java index d789a645fd9c4..90fb9291b3b82 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/queries/TokenPruningConfig.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/queries/TokenPruningConfig.java @@ -47,8 +47,8 @@ public TokenPruningConfig(float tokensFreqRatioThreshold, float tokensWeightThre throw new IllegalArgumentException( "[" + TOKENS_FREQ_RATIO_THRESHOLD.getPreferredName() - + "] must be between [1.0] and [" - + String.format(Locale.ROOT, "%.1f", MAX_TOKENS_FREQ_RATIO_THRESHOLD) + + "] must be between [1] and [" + + String.format(Locale.ROOT, "%d", (int) MAX_TOKENS_FREQ_RATIO_THRESHOLD) + "], got " + tokensFreqRatioThreshold ); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/RestDeleteExpiredDataAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/RestDeleteExpiredDataAction.java index 1fb45c07c5818..37731fcbfb10b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/RestDeleteExpiredDataAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/RestDeleteExpiredDataAction.java @@ -49,7 +49,9 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient DeleteExpiredDataAction.Request request; if (restRequest.hasContent()) { - request = DeleteExpiredDataAction.Request.parseRequest(jobId, restRequest.contentParser()); + try (var parser = restRequest.contentParser()) { + request = DeleteExpiredDataAction.Request.parseRequest(jobId, parser); + } } else { request = new DeleteExpiredDataAction.Request(); request.setJobId(jobId); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/datafeeds/RestPutDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/datafeeds/RestPutDatafeedAction.java index a5f98763d3245..64981805717a1 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/datafeeds/RestPutDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/datafeeds/RestPutDatafeedAction.java @@ -47,8 +47,10 @@ public String getName() { protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient client) throws IOException { String datafeedId = restRequest.param(DatafeedConfig.ID.getPreferredName()); IndicesOptions indicesOptions = IndicesOptions.fromRequest(restRequest, SearchRequest.DEFAULT_INDICES_OPTIONS); - XContentParser parser = restRequest.contentParser(); - PutDatafeedAction.Request putDatafeedRequest = PutDatafeedAction.Request.parseRequest(datafeedId, indicesOptions, parser); + PutDatafeedAction.Request putDatafeedRequest; + try (XContentParser parser = restRequest.contentParser()) { + putDatafeedRequest = PutDatafeedAction.Request.parseRequest(datafeedId, indicesOptions, parser); + } putDatafeedRequest.timeout(restRequest.paramAsTime("timeout", putDatafeedRequest.timeout())); putDatafeedRequest.masterNodeTimeout(restRequest.paramAsTime("master_timeout", putDatafeedRequest.masterNodeTimeout())); return channel -> client.execute(PutDatafeedAction.INSTANCE, putDatafeedRequest, new RestToXContentListener<>(channel)); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/datafeeds/RestUpdateDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/datafeeds/RestUpdateDatafeedAction.java index f0260a9301edc..97e1514441441 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/datafeeds/RestUpdateDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/datafeeds/RestUpdateDatafeedAction.java @@ -53,8 +53,10 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient || restRequest.hasParam("ignore_throttled")) { indicesOptions = IndicesOptions.fromRequest(restRequest, SearchRequest.DEFAULT_INDICES_OPTIONS); } - XContentParser parser = restRequest.contentParser(); - UpdateDatafeedAction.Request updateDatafeedRequest = UpdateDatafeedAction.Request.parseRequest(datafeedId, indicesOptions, parser); + UpdateDatafeedAction.Request updateDatafeedRequest; + try (XContentParser parser = restRequest.contentParser()) { + updateDatafeedRequest = UpdateDatafeedAction.Request.parseRequest(datafeedId, indicesOptions, parser); + } updateDatafeedRequest.timeout(restRequest.paramAsTime("timeout", updateDatafeedRequest.timeout())); updateDatafeedRequest.masterNodeTimeout(restRequest.paramAsTime("master_timeout", updateDatafeedRequest.masterNodeTimeout())); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/dataframe/RestPostDataFrameAnalyticsUpdateAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/dataframe/RestPostDataFrameAnalyticsUpdateAction.java index 48a820360e61b..52a3d83eeb11a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/dataframe/RestPostDataFrameAnalyticsUpdateAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/dataframe/RestPostDataFrameAnalyticsUpdateAction.java @@ -48,8 +48,10 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient } String id = restRequest.param(DataFrameAnalyticsConfig.ID.getPreferredName()); - XContentParser parser = restRequest.contentParser(); - UpdateDataFrameAnalyticsAction.Request updateRequest = UpdateDataFrameAnalyticsAction.Request.parseRequest(id, parser); + UpdateDataFrameAnalyticsAction.Request updateRequest; + try (XContentParser parser = restRequest.contentParser()) { + updateRequest = UpdateDataFrameAnalyticsAction.Request.parseRequest(id, parser); + } updateRequest.timeout(restRequest.paramAsTime("timeout", updateRequest.timeout())); return channel -> client.execute(UpdateDataFrameAnalyticsAction.INSTANCE, updateRequest, new RestToXContentListener<>(channel)); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/dataframe/RestPutDataFrameAnalyticsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/dataframe/RestPutDataFrameAnalyticsAction.java index 9a3d958bd3a09..896b1dfdb6df2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/dataframe/RestPutDataFrameAnalyticsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/dataframe/RestPutDataFrameAnalyticsAction.java @@ -57,8 +57,10 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient } String id = restRequest.param(DataFrameAnalyticsConfig.ID.getPreferredName()); - XContentParser parser = restRequest.contentParser(); - PutDataFrameAnalyticsAction.Request putRequest = PutDataFrameAnalyticsAction.Request.parseRequest(id, parser); + PutDataFrameAnalyticsAction.Request putRequest; + try (XContentParser parser = restRequest.contentParser()) { + putRequest = PutDataFrameAnalyticsAction.Request.parseRequest(id, parser); + } putRequest.timeout(restRequest.paramAsTime("timeout", putRequest.timeout())); return channel -> client.execute(PutDataFrameAnalyticsAction.INSTANCE, putRequest, new RestToXContentListener<>(channel)); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/inference/RestInferTrainedModelAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/inference/RestInferTrainedModelAction.java index 4afd07479a3eb..78b02871c3c57 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/inference/RestInferTrainedModelAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/inference/RestInferTrainedModelAction.java @@ -47,7 +47,10 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient if (restRequest.hasContent() == false) { throw ExceptionsHelper.badRequestException("requires body"); } - InferModelAction.Request.Builder request = InferModelAction.Request.parseRequest(modelId, restRequest.contentParser()); + InferModelAction.Request.Builder request; + try (var parser = restRequest.contentParser()) { + request = InferModelAction.Request.parseRequest(modelId, parser); + } if (restRequest.hasParam(InferModelAction.Request.TIMEOUT.getPreferredName())) { TimeValue inferTimeout = restRequest.paramAsTime( diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/MlParserUtils.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/MlParserUtils.java index 8661497593815..36607e894edef 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/MlParserUtils.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/MlParserUtils.java @@ -10,14 +10,14 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.search.SearchHit; import org.elasticsearch.xcontent.NamedXContentRegistry; -import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; import java.io.IOException; -import java.io.InputStream; import java.util.function.BiFunction; public final class MlParserUtils { @@ -33,9 +33,12 @@ private MlParserUtils() {} public static T parse(SearchHit hit, BiFunction objectParser) { BytesReference source = hit.getSourceRef(); try ( - InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream) + XContentParser parser = XContentHelper.createParserNotCompressed( + XContentParserConfiguration.EMPTY.withRegistry(NamedXContentRegistry.EMPTY) + .withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), + source, + XContentType.JSON + ) ) { return objectParser.apply(parser, null); } catch (IOException e) { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlLifeCycleServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlLifeCycleServiceTests.java index a7a9122c96606..2b206de4cf42f 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlLifeCycleServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlLifeCycleServiceTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingInfo; import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingState; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.core.ml.job.config.JobState; import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.core.ml.job.snapshot.upgrade.SnapshotUpgradeState; @@ -35,7 +36,6 @@ import org.elasticsearch.xpack.core.ml.job.snapshot.upgrade.SnapshotUpgradeTaskState; import org.elasticsearch.xpack.ml.datafeed.DatafeedRunner; import org.elasticsearch.xpack.ml.dataframe.DataFrameAnalyticsManager; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager; import org.elasticsearch.xpack.ml.process.MlController; import org.elasticsearch.xpack.ml.process.MlMemoryTracker; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlMetricsTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlMetricsTests.java new file mode 100644 index 0000000000000..2262c21070e75 --- /dev/null +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlMetricsTests.java @@ -0,0 +1,183 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml; + +import org.elasticsearch.client.internal.node.NodeClient; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.ml.MlTasks; +import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; +import org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction; +import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; +import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsConfig; +import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsState; +import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingInfo; +import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingState; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; +import org.elasticsearch.xpack.core.ml.job.config.JobState; +import org.elasticsearch.xpack.ml.autoscaling.MlMemoryAutoscalingDeciderTests; +import org.elasticsearch.xpack.ml.dataframe.DataFrameAnalyticsManager; +import org.elasticsearch.xpack.ml.dataframe.persistence.DataFrameAnalyticsConfigProvider; +import org.elasticsearch.xpack.ml.dataframe.process.AnalyticsProcessManager; +import org.elasticsearch.xpack.ml.inference.loadingservice.ModelLoadingService; +import org.elasticsearch.xpack.ml.notifications.DataFrameAnalyticsAuditor; +import org.elasticsearch.xpack.ml.utils.persistence.ResultsPersisterService; + +import java.util.Map; + +import static org.elasticsearch.xpack.ml.MachineLearning.NATIVE_EXECUTABLE_CODE_OVERHEAD; +import static org.elasticsearch.xpack.ml.job.JobNodeSelector.AWAITING_LAZY_ASSIGNMENT; +import static org.hamcrest.Matchers.is; +import static org.mockito.Mockito.mock; + +public class MlMetricsTests extends ESTestCase { + + public void testFindTaskStatuses() { + + PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + MlMemoryAutoscalingDeciderTests.addJobTask("job1", "node1", JobState.OPENED, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addJobTask("job2", "node1", JobState.OPENED, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addJobTask("job3", "node2", JobState.FAILED, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addJobTask("job4", null, JobState.OPENING, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addJobTask("job5", "node1", JobState.CLOSING, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addJobTask("job6", "node2", JobState.OPENED, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addJobTask("job7", "node2", JobState.OPENING, tasksBuilder); + addDatafeedTask("datafeed1", "node1", DatafeedState.STARTED, tasksBuilder); + addDatafeedTask("datafeed2", "node1", DatafeedState.STARTED, tasksBuilder); + addDatafeedTask("datafeed5", "node1", DatafeedState.STOPPING, tasksBuilder); + addDatafeedTask("datafeed6", "node2", DatafeedState.STARTED, tasksBuilder); + addDatafeedTask("datafeed7", "node2", DatafeedState.STARTING, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addAnalyticsTask("dfa1", "node1", DataFrameAnalyticsState.STARTED, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addAnalyticsTask("dfa2", "node2", DataFrameAnalyticsState.STARTED, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addAnalyticsTask("dfa3", "node1", DataFrameAnalyticsState.FAILED, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addAnalyticsTask("dfa4", "node2", DataFrameAnalyticsState.REINDEXING, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addAnalyticsTask("dfa5", null, DataFrameAnalyticsState.STARTING, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addAnalyticsTask("dfa6", "node1", DataFrameAnalyticsState.ANALYZING, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addAnalyticsTask("dfa7", "node1", DataFrameAnalyticsState.STOPPING, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addAnalyticsTask("dfa8", "node1", DataFrameAnalyticsState.STARTED, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addAnalyticsTask("dfa9", null, DataFrameAnalyticsState.FAILED, tasksBuilder); + + MlMetrics.MlTaskStatusCounts counts = MlMetrics.findTaskStatuses(tasksBuilder.build()); + assertThat(counts.adOpeningCount(), is(2)); + assertThat(counts.adOpenedCount(), is(3)); + assertThat(counts.adClosingCount(), is(1)); + assertThat(counts.adFailedCount(), is(1)); + assertThat(counts.datafeedStartingCount(), is(1)); + assertThat(counts.datafeedStartedCount(), is(3)); + assertThat(counts.datafeedStoppingCount(), is(1)); + assertThat(counts.dfaStartingCount(), is(1)); + assertThat(counts.dfaStartedCount(), is(3)); + assertThat(counts.dfaReindexingCount(), is(1)); + assertThat(counts.dfaAnalyzingCount(), is(1)); + assertThat(counts.dfaStoppingCount(), is(1)); + assertThat(counts.dfaFailedCount(), is(2)); + } + + public void testFindDfaMemoryUsage() { + + PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + MlMemoryAutoscalingDeciderTests.addAnalyticsTask("dfa1", "node1", DataFrameAnalyticsState.STARTED, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addAnalyticsTask("dfa2", "node2", DataFrameAnalyticsState.STARTED, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addAnalyticsTask("dfa3", "node1", DataFrameAnalyticsState.FAILED, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addAnalyticsTask("dfa4", "node2", DataFrameAnalyticsState.REINDEXING, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addAnalyticsTask("dfa5", null, DataFrameAnalyticsState.STARTING, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addAnalyticsTask("dfa6", "node1", DataFrameAnalyticsState.ANALYZING, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addAnalyticsTask("dfa7", "node1", DataFrameAnalyticsState.STOPPING, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addAnalyticsTask("dfa8", "node1", DataFrameAnalyticsState.STARTED, tasksBuilder); + MlMemoryAutoscalingDeciderTests.addAnalyticsTask("dfa9", null, DataFrameAnalyticsState.FAILED, tasksBuilder); + + DataFrameAnalyticsManager dfaManager = new DataFrameAnalyticsManager( + Settings.EMPTY, + mock(NodeClient.class), + mock(ThreadPool.class), + mock(ClusterService.class), + mock(DataFrameAnalyticsConfigProvider.class), + mock(AnalyticsProcessManager.class), + mock(DataFrameAnalyticsAuditor.class), + mock(IndexNameExpressionResolver.class), + mock(ResultsPersisterService.class), + mock(ModelLoadingService.class), + new String[] {}, + Map.of( + "dfa1", + ByteSizeValue.ofGb(1), + "dfa3", + ByteSizeValue.ofGb(2), + "dfa6", + ByteSizeValue.ofGb(4), + "dfa7", + ByteSizeValue.ofGb(8), + "dfa8", + ByteSizeValue.ofGb(16) + ) + ); + + long bytesUsed = MlMetrics.findDfaMemoryUsage(dfaManager, tasksBuilder.build()); + assertThat(bytesUsed, is(ByteSizeValue.ofGb(29).getBytes() + 4 * DataFrameAnalyticsConfig.PROCESS_MEMORY_OVERHEAD.getBytes())); + } + + public void testFindTrainedModelAllocationCounts() { + + TrainedModelAssignmentMetadata.Builder metadataBuilder = TrainedModelAssignmentMetadata.Builder.empty(); + metadataBuilder.addNewAssignment( + "model1", + TrainedModelAssignment.Builder.empty(mock(StartTrainedModelDeploymentAction.TaskParams.class)) + .addRoutingEntry("node1", new RoutingInfo(1, 1, RoutingState.STARTED, "")) + .addRoutingEntry("node2", new RoutingInfo(0, 1, RoutingState.FAILED, "")) + ); + metadataBuilder.addNewAssignment( + "model2", + TrainedModelAssignment.Builder.empty(mock(StartTrainedModelDeploymentAction.TaskParams.class)) + .addRoutingEntry("node1", new RoutingInfo(2, 2, RoutingState.STARTED, "")) + ); + metadataBuilder.addNewAssignment( + "model3", + TrainedModelAssignment.Builder.empty(mock(StartTrainedModelDeploymentAction.TaskParams.class)) + .addRoutingEntry("node2", new RoutingInfo(0, 1, RoutingState.STARTING, "")) + ); + + MlMetrics.TrainedModelAllocationCounts counts = MlMetrics.findTrainedModelAllocationCounts(metadataBuilder.build()); + assertThat(counts.trainedModelsTargetAllocations(), is(5)); + assertThat(counts.trainedModelsCurrentAllocations(), is(3)); + assertThat(counts.trainedModelsFailedAllocations(), is(1)); + } + + public void testFindNativeMemoryFree() { + + long bytesFree = MlMetrics.findNativeMemoryFree( + ByteSizeValue.ofMb(4000).getBytes(), + ByteSizeValue.ofMb(500).getBytes(), + ByteSizeValue.ofMb(1000).getBytes(), + ByteSizeValue.ofMb(2000).getBytes() + ); + assertThat(bytesFree, is(ByteSizeValue.ofMb(500).getBytes() - NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes())); + } + + public static void addDatafeedTask( + String datafeedId, + String nodeId, + DatafeedState datafeedState, + PersistentTasksCustomMetadata.Builder builder + ) { + builder.addTask( + MlTasks.datafeedTaskId(datafeedId), + MlTasks.DATAFEED_TASK_NAME, + new StartDatafeedAction.DatafeedParams(datafeedId, System.currentTimeMillis()), + nodeId == null ? AWAITING_LAZY_ASSIGNMENT : new PersistentTasksCustomMetadata.Assignment(nodeId, "test assignment") + ); + if (datafeedState != null) { + builder.updateTaskState(MlTasks.datafeedTaskId(datafeedId), datafeedState); + } + } +} diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsActionTests.java index c54ac8ba3b84d..bf6d13ada0f94 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportGetTrainedModelsStatsActionTests.java @@ -28,7 +28,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ml.MachineLearningField; -import org.elasticsearch.xpack.ml.inference.ModelAliasMetadata; +import org.elasticsearch.xpack.core.ml.inference.ModelAliasMetadata; import org.elasticsearch.xpack.ml.inference.ingest.InferenceProcessor; import org.junit.Before; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointAggregatorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointAggregatorTests.java index 442641db8c4ed..2f4ecec87509a 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointAggregatorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointAggregatorTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.ml.aggs.changepoint; +import org.apache.commons.math3.distribution.GammaDistribution; import org.apache.commons.math3.distribution.NormalDistribution; import org.apache.commons.math3.random.RandomGeneratorFactory; import org.apache.lucene.document.NumericDocValuesField; @@ -34,7 +35,9 @@ import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.lessThan; public class ChangePointAggregatorTests extends AggregatorTestCase { @@ -47,7 +50,208 @@ protected List getSearchPlugins() { private static final String NUMERIC_FIELD_NAME = "value"; private static final String TIME_FIELD_NAME = "timestamp"; - public void testNoChange() throws IOException { + public void testStationaryFalsePositiveRate() throws IOException { + NormalDistribution normal = new NormalDistribution(RandomGeneratorFactory.createRandomGenerator(Randomness.get()), 0, 2); + int fp = 0; + for (int i = 0; i < 100; i++) { + double[] bucketValues = DoubleStream.generate(() -> 10 + normal.sample()).limit(40).toArray(); + int[] candidatePoints = ChangePointAggregator.candidateChangePoints(bucketValues); + ChangePointAggregator.TestStats test = ChangePointAggregator.testForChange(bucketValues, candidatePoints, 1e-3); + fp += test.type() == ChangePointAggregator.Type.STATIONARY ? 0 : 1; + } + assertThat(fp, lessThan(5)); + + fp = 0; + GammaDistribution gamma = new GammaDistribution(RandomGeneratorFactory.createRandomGenerator(Randomness.get()), 1, 2); + for (int i = 0; i < 100; i++) { + double[] bucketValues = DoubleStream.generate(() -> gamma.sample()).limit(40).toArray(); + int[] candidatePoints = ChangePointAggregator.candidateChangePoints(bucketValues); + ChangePointAggregator.TestStats test = ChangePointAggregator.testForChange(bucketValues, candidatePoints, 1e-3); + fp += test.type() == ChangePointAggregator.Type.STATIONARY ? 0 : 1; + } + assertThat(fp, lessThan(5)); + } + + public void testSampledDistributionTestFalsePositiveRate() throws IOException { + NormalDistribution normal = new NormalDistribution(RandomGeneratorFactory.createRandomGenerator(Randomness.get()), 0.0, 1.0); + int fp = 0; + for (int i = 0; i < 100; i++) { + double[] bucketValues = DoubleStream.generate(() -> 10 + normal.sample()).limit(5000).toArray(); + int[] candidatePoints = ChangePointAggregator.candidateChangePoints(bucketValues); + ChangePointAggregator.TestStats test = ChangePointAggregator.testForChange(bucketValues, candidatePoints, 0.05); + fp += test.type() == ChangePointAggregator.Type.STATIONARY ? 0 : 1; + } + assertThat(fp, lessThan(5)); + } + + public void testNonStationaryFalsePositiveRate() throws IOException { + NormalDistribution normal = new NormalDistribution(RandomGeneratorFactory.createRandomGenerator(Randomness.get()), 0, 2); + int fp = 0; + for (int i = 0; i < 100; i++) { + AtomicInteger j = new AtomicInteger(); + double[] bucketValues = DoubleStream.generate(() -> j.incrementAndGet() + normal.sample()).limit(40).toArray(); + int[] candidatePoints = ChangePointAggregator.candidateChangePoints(bucketValues); + ChangePointAggregator.TestStats test = ChangePointAggregator.testForChange(bucketValues, candidatePoints, 1e-3); + fp += test.type() == ChangePointAggregator.Type.NON_STATIONARY ? 0 : 1; + } + assertThat(fp, lessThan(5)); + + fp = 0; + GammaDistribution gamma = new GammaDistribution(RandomGeneratorFactory.createRandomGenerator(Randomness.get()), 1, 2); + for (int i = 0; i < 100; i++) { + AtomicInteger j = new AtomicInteger(); + double[] bucketValues = DoubleStream.generate(() -> j.incrementAndGet() + gamma.sample()).limit(40).toArray(); + int[] candidatePoints = ChangePointAggregator.candidateChangePoints(bucketValues); + ChangePointAggregator.TestStats test = ChangePointAggregator.testForChange(bucketValues, candidatePoints, 1e-3); + fp += test.type() == ChangePointAggregator.Type.NON_STATIONARY ? 0 : 1; + } + assertThat(fp, lessThan(5)); + } + + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103847") + public void testStepChangePower() throws IOException { + NormalDistribution normal = new NormalDistribution(RandomGeneratorFactory.createRandomGenerator(Randomness.get()), 0, 2); + int tp = 0; + for (int i = 0; i < 100; i++) { + double[] bucketValues = DoubleStream.concat( + DoubleStream.generate(() -> normal.sample()).limit(20), + DoubleStream.generate(() -> 10 + normal.sample()).limit(20) + ).toArray(); + int[] candidatePoints = ChangePointAggregator.candidateChangePoints(bucketValues); + ChangePointAggregator.TestStats test = ChangePointAggregator.testForChange(bucketValues, candidatePoints, 0.05); + tp += test.type() == ChangePointAggregator.Type.STEP_CHANGE ? 1 : 0; + } + assertThat(tp, greaterThan(90)); + + tp = 0; + GammaDistribution gamma = new GammaDistribution(RandomGeneratorFactory.createRandomGenerator(Randomness.get()), 1, 2); + for (int i = 0; i < 100; i++) { + double[] bucketValues = DoubleStream.concat( + DoubleStream.generate(() -> gamma.sample()).limit(20), + DoubleStream.generate(() -> 10 + gamma.sample()).limit(20) + ).toArray(); + int[] candidatePoints = ChangePointAggregator.candidateChangePoints(bucketValues); + ChangePointAggregator.TestStats test = ChangePointAggregator.testForChange(bucketValues, candidatePoints, 0.05); + tp += test.type() == ChangePointAggregator.Type.STEP_CHANGE ? 1 : 0; + } + assertThat(tp, greaterThan(90)); + } + + public void testTrendChangePower() throws IOException { + NormalDistribution normal = new NormalDistribution(RandomGeneratorFactory.createRandomGenerator(Randomness.get()), 0, 2); + int tp = 0; + for (int i = 0; i < 100; i++) { + AtomicInteger j = new AtomicInteger(); + double[] bucketValues = DoubleStream.concat( + DoubleStream.generate(() -> j.incrementAndGet() + normal.sample()).limit(20), + DoubleStream.generate(() -> 2.0 * j.incrementAndGet() + normal.sample()).limit(20) + ).toArray(); + int[] candidatePoints = ChangePointAggregator.candidateChangePoints(bucketValues); + ChangePointAggregator.TestStats test = ChangePointAggregator.testForChange(bucketValues, candidatePoints, 0.05); + tp += test.type() == ChangePointAggregator.Type.TREND_CHANGE ? 1 : 0; + } + assertThat(tp, greaterThan(90)); + + tp = 0; + GammaDistribution gamma = new GammaDistribution(RandomGeneratorFactory.createRandomGenerator(Randomness.get()), 1, 2); + for (int i = 0; i < 100; i++) { + AtomicInteger j = new AtomicInteger(); + double[] bucketValues = DoubleStream.concat( + DoubleStream.generate(() -> j.incrementAndGet() + gamma.sample()).limit(20), + DoubleStream.generate(() -> 2.0 * j.incrementAndGet() + gamma.sample()).limit(20) + ).toArray(); + int[] candidatePoints = ChangePointAggregator.candidateChangePoints(bucketValues); + ChangePointAggregator.TestStats test = ChangePointAggregator.testForChange(bucketValues, candidatePoints, 0.05); + tp += test.type() == ChangePointAggregator.Type.TREND_CHANGE ? 1 : 0; + } + assertThat(tp, greaterThan(90)); + } + + public void testDistributionChangeTestPower() throws IOException { + NormalDistribution normal1 = new NormalDistribution(RandomGeneratorFactory.createRandomGenerator(Randomness.get()), 0.0, 1.0); + NormalDistribution normal2 = new NormalDistribution(RandomGeneratorFactory.createRandomGenerator(Randomness.get()), 0.0, 10.0); + int tp = 0; + for (int i = 0; i < 100; i++) { + double[] bucketValues = DoubleStream.concat( + DoubleStream.generate(() -> 10 + normal1.sample()).limit(50), + DoubleStream.generate(() -> 10 + normal2.sample()).limit(50) + ).toArray(); + int[] candidatePoints = ChangePointAggregator.candidateChangePoints(bucketValues); + ChangePointAggregator.TestStats test = ChangePointAggregator.testForChange(bucketValues, candidatePoints, 0.05); + tp += test.type() == ChangePointAggregator.Type.DISTRIBUTION_CHANGE ? 1 : 0; + } + assertThat(tp, greaterThan(90)); + } + + public void testMultipleChanges() throws IOException { + NormalDistribution normal1 = new NormalDistribution(RandomGeneratorFactory.createRandomGenerator(Randomness.get()), 78.0, 3.0); + NormalDistribution normal2 = new NormalDistribution(RandomGeneratorFactory.createRandomGenerator(Randomness.get()), 40.0, 6.0); + NormalDistribution normal3 = new NormalDistribution(RandomGeneratorFactory.createRandomGenerator(Randomness.get()), 1.0, 0.3); + int tp = 0; + for (int i = 0; i < 100; i++) { + double[] bucketValues = DoubleStream.concat( + DoubleStream.concat( + DoubleStream.generate(() -> normal1.sample()).limit(7), + DoubleStream.generate(() -> normal2.sample()).limit(6) + ), + DoubleStream.generate(() -> normal3.sample()).limit(23) + ).toArray(); + int[] candidatePoints = ChangePointAggregator.candidateChangePoints(bucketValues); + ChangePointAggregator.TestStats result = ChangePointAggregator.testForChange(bucketValues, candidatePoints, 0.05); + tp += result.type() == ChangePointAggregator.Type.TREND_CHANGE ? 1 : 0; + } + assertThat(tp, greaterThan(90)); + } + + public void testProblemDistributionChange() throws IOException { + double[] bucketValues = new double[] { + 546.3651753325270, + 550.872738079514, + 551.1312487618040, + 550.3323904749380, + 549.2652495378930, + 548.9761274963630, + 549.3433969743010, + 549.0935313531350, + 551.1762550747600, + 551.3772184469220, + 548.6163495094490, + 548.5866591594080, + 546.9364791288570, + 548.1167839989470, + 549.3484016149320, + 550.4242803917040, + 551.2316023050940, + 548.4713993534340, + 546.0254901960780, + 548.4376996805110, + 561.1920529801320, + 557.3930041152260, + 565.8497217068650, + 566.787072243346, + 546.6094890510950, + 530.5905797101450, + 556.7340823970040, + 557.3857677902620, + 543.0754716981130, + 574.3297101449280, + 559.2962962962960, + 549.5202952029520, + 531.7217741935480, + 551.4333333333330, + 557.637168141593, + 545.1880733944950, + 564.6893203883500, + 543.0204081632650, + 571.820809248555, + 541.2589928057550, + 520.4387755102040 }; + int[] candidatePoints = ChangePointAggregator.candidateChangePoints(bucketValues); + ChangePointAggregator.TestStats result = ChangePointAggregator.testForChange(bucketValues, candidatePoints, 0.05); + assertThat(result.type(), equalTo(ChangePointAggregator.Type.DISTRIBUTION_CHANGE)); + } + + public void testConstant() throws IOException { double[] bucketValues = DoubleStream.generate(() -> 10).limit(100).toArray(); testChangeType( bucketValues, @@ -137,14 +341,22 @@ public void testStepChange() throws IOException { DoubleStream.generate(() -> 30 + normal.sample()).limit(20) ).toArray(); testChangeType(bucketValues, changeType -> { - assertThat(Arrays.toString(bucketValues), changeType, instanceOf(ChangeType.StepChange.class)); + assertThat( + Arrays.toString(bucketValues), + changeType, + anyOf( + // Due to the random nature of the values generated, either of these could be detected + instanceOf(ChangeType.StepChange.class), + instanceOf(ChangeType.TrendChange.class) + ) + ); assertThat(changeType.changePoint(), equalTo(20)); }); } public void testDistributionChange() throws IOException { - NormalDistribution first = new NormalDistribution(RandomGeneratorFactory.createRandomGenerator(Randomness.get()), 50, 1); - NormalDistribution second = new NormalDistribution(RandomGeneratorFactory.createRandomGenerator(Randomness.get()), 50, 5); + NormalDistribution first = new NormalDistribution(RandomGeneratorFactory.createRandomGenerator(Randomness.get()), 0, 1); + NormalDistribution second = new NormalDistribution(RandomGeneratorFactory.createRandomGenerator(Randomness.get()), 0, 5); double[] bucketValues = DoubleStream.concat( DoubleStream.generate(first::sample).limit(50), DoubleStream.generate(second::sample).limit(50) @@ -168,8 +380,14 @@ public void testDistributionChange() throws IOException { } public void testZeroDeviation() throws IOException { - double[] bucketValues = DoubleStream.generate(() -> 4243.1621621621625).limit(30).toArray(); - testChangeType(bucketValues, changeType -> { assertThat(changeType, instanceOf(ChangeType.Stationary.class)); }); + { + double[] bucketValues = DoubleStream.generate(() -> 4243.1621621621625).limit(30).toArray(); + testChangeType(bucketValues, changeType -> { assertThat(changeType, instanceOf(ChangeType.Stationary.class)); }); + } + { + double[] bucketValues = DoubleStream.generate(() -> -4243.1621621621625).limit(30).toArray(); + testChangeType(bucketValues, changeType -> { assertThat(changeType, instanceOf(ChangeType.Stationary.class)); }); + } } public void testStepChangeEdgeCaseScenarios() throws IOException { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlProcessorAutoscalingDeciderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlProcessorAutoscalingDeciderTests.java index a56ad515690cf..97fd66e284010 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlProcessorAutoscalingDeciderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlProcessorAutoscalingDeciderTests.java @@ -25,8 +25,8 @@ import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingInfo; import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingState; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.MachineLearning; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.junit.Before; import java.util.Map; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterServiceTests.java index f966ac85c7a65..f08d2735be8a5 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterServiceTests.java @@ -58,6 +58,7 @@ import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingState; import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingStateAndReason; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.core.ml.job.config.JobState; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.autoscaling.NodeAvailabilityZoneMapper; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentMetadataTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentMetadataTests.java index 3057da83d11e9..6c5223eae4d99 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentMetadataTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentMetadataTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction; import org.elasticsearch.xpack.core.ml.inference.assignment.Priority; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentTests; import java.io.IOException; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeServiceTests.java index 795f184a49a4d..2444134ce2920 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeServiceTests.java @@ -38,6 +38,7 @@ import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingInfo; import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingState; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.inference.deployment.DeploymentManager; import org.elasticsearch.xpack.ml.inference.deployment.TrainedModelDeploymentTask; import org.junit.After; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancerTests.java index 334fdfbb8b922..53b737b38c284 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentRebalancerTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingInfo; import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingState; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.job.NodeLoad; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingServiceTests.java index 2f4640cfa38dc..40b0dd519f7d8 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingServiceTests.java @@ -39,6 +39,7 @@ import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction; +import org.elasticsearch.xpack.core.ml.inference.ModelAliasMetadata; import org.elasticsearch.xpack.core.ml.inference.TrainedModelConfig; import org.elasticsearch.xpack.core.ml.inference.TrainedModelInput; import org.elasticsearch.xpack.core.ml.inference.trainedmodel.ClassificationConfig; @@ -46,7 +47,6 @@ import org.elasticsearch.xpack.core.ml.inference.trainedmodel.inference.InferenceDefinition; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.ml.MachineLearning; -import org.elasticsearch.xpack.ml.inference.ModelAliasMetadata; import org.elasticsearch.xpack.ml.inference.TrainedModelStatsService; import org.elasticsearch.xpack.ml.inference.ingest.InferenceProcessor; import org.elasticsearch.xpack.ml.inference.persistence.TrainedModelProvider; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/NodeLoadDetectorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/NodeLoadDetectorTests.java index ccc7f14d2264e..fef9b07429702 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/NodeLoadDetectorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/NodeLoadDetectorTests.java @@ -21,9 +21,9 @@ import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingInfo; import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingState; import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.core.ml.job.config.JobState; import org.elasticsearch.xpack.ml.MachineLearning; -import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.job.task.OpenJobPersistentTasksExecutorTests; import org.elasticsearch.xpack.ml.process.MlMemoryTracker; import org.junit.Before; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java index bde774686b84c..076e95ea14533 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -53,6 +54,7 @@ import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndexFields; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats; +import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats.AssignmentMemoryBasis; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSnapshot; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.Quantiles; import org.elasticsearch.xpack.ml.MachineLearning; @@ -849,6 +851,35 @@ public void testCreate_givenNonZeroCountsAndNoModelSnapshotNorQuantiles() { verifyNoMoreInteractions(auditor); } + public void testGetOpenProcessMemoryUsage() { + modelSnapshot = null; + quantiles = null; + dataCounts = new DataCounts("foo"); + dataCounts.setLatestRecordTimeStamp(new Date(0L)); + dataCounts.incrementProcessedRecordCount(42L); + long modelMemoryLimitBytes = ByteSizeValue.ofMb(randomIntBetween(10, 1000)).getBytes(); + long peakModelBytes = randomLongBetween(100000, modelMemoryLimitBytes - 1); + long modelBytes = randomLongBetween(1, peakModelBytes - 1); + AssignmentMemoryBasis assignmentMemoryBasis = randomFrom(AssignmentMemoryBasis.values()); + modelSizeStats = new ModelSizeStats.Builder("foo").setModelBytesMemoryLimit(modelMemoryLimitBytes) + .setPeakModelBytes(peakModelBytes) + .setModelBytes(modelBytes) + .setAssignmentMemoryBasis(assignmentMemoryBasis) + .build(); + when(autodetectCommunicator.getModelSizeStats()).thenReturn(modelSizeStats); + AutodetectProcessManager manager = createSpyManager(); + JobTask jobTask = mock(JobTask.class); + when(jobTask.getJobId()).thenReturn("foo"); + manager.openJob(jobTask, clusterState, DEFAULT_MASTER_NODE_TIMEOUT, (e, b) -> {}); + + long expectedSizeBytes = Job.PROCESS_MEMORY_OVERHEAD.getBytes() + switch (assignmentMemoryBasis) { + case MODEL_MEMORY_LIMIT -> modelMemoryLimitBytes; + case CURRENT_MODEL_BYTES -> modelBytes; + case PEAK_MODEL_BYTES -> peakModelBytes; + }; + assertThat(manager.getOpenProcessMemoryUsage(), equalTo(ByteSizeValue.ofBytes(expectedSizeBytes))); + } + private AutodetectProcessManager createNonSpyManager(String jobId) { ExecutorService executorService = mock(ExecutorService.class); when(threadPool.executor(anyString())).thenReturn(executorService); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/queries/WeightedTokensQueryBuilderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/queries/WeightedTokensQueryBuilderTests.java index 4d91c66de4b9e..59d6db2c2ea4f 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/queries/WeightedTokensQueryBuilderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/queries/WeightedTokensQueryBuilderTests.java @@ -231,7 +231,7 @@ public void testPruningIsAppliedCorrectly() throws IOException { WeightedTokensQueryBuilder queryThatShouldBePruned = new WeightedTokensQueryBuilder( RANK_FEATURES_FIELD, inputTokens, - new TokenPruningConfig(1.5f, 0.5f, false) + new TokenPruningConfig(2, 0.5f, false) ); query = queryThatShouldBePruned.doToQuery(context); assertCorrectLuceneQuery("queryThatShouldBePruned", query, List.of("dog", "jumped", "on", "me")); @@ -239,7 +239,7 @@ public void testPruningIsAppliedCorrectly() throws IOException { WeightedTokensQueryBuilder onlyScorePrunedTokensQuery = new WeightedTokensQueryBuilder( RANK_FEATURES_FIELD, inputTokens, - new TokenPruningConfig(1.5f, 0.5f, true) + new TokenPruningConfig(2, 0.5f, true) ); query = onlyScorePrunedTokensQuery.doToQuery(context); assertCorrectLuceneQuery("onlyScorePrunedTokensQuery", query, List.of("the", "black")); @@ -361,21 +361,21 @@ public void testIllegalValues() { { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> new WeightedTokensQueryBuilder("field name", weightedTokens, new TokenPruningConfig(-1f, 0.0f, false)) + () -> new WeightedTokensQueryBuilder("field name", weightedTokens, new TokenPruningConfig(-1, 0.0f, false)) ); - assertEquals("[tokens_freq_ratio_threshold] must be between [1.0] and [100.0], got -1.0", e.getMessage()); + assertEquals("[tokens_freq_ratio_threshold] must be between [1] and [100], got -1.0", e.getMessage()); } { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> new WeightedTokensQueryBuilder("field name", weightedTokens, new TokenPruningConfig(101f, 0.0f, false)) + () -> new WeightedTokensQueryBuilder("field name", weightedTokens, new TokenPruningConfig(101, 0.0f, false)) ); - assertEquals("[tokens_freq_ratio_threshold] must be between [1.0] and [100.0], got 101.0", e.getMessage()); + assertEquals("[tokens_freq_ratio_threshold] must be between [1] and [100], got 101.0", e.getMessage()); } { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> new WeightedTokensQueryBuilder("field name", weightedTokens, new TokenPruningConfig(5f, 5f, false)) + () -> new WeightedTokensQueryBuilder("field name", weightedTokens, new TokenPruningConfig(5, 5f, false)) ); assertEquals("[tokens_weight_threshold] must be between 0 and 1", e.getMessage()); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/persistence/ResultsPersisterServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/persistence/ResultsPersisterServiceTests.java index 9e2f14aaabd84..f8ffed0864372 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/persistence/ResultsPersisterServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/persistence/ResultsPersisterServiceTests.java @@ -33,6 +33,8 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndexPrimaryShardNotAllocatedException; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ClientHelper; @@ -75,23 +77,28 @@ public class ResultsPersisterServiceTests extends ESTestCase { // Constants for searchWithRetry tests private static final SearchRequest SEARCH_REQUEST = new SearchRequest("my-index"); - private static final SearchResponse SEARCH_RESPONSE_SUCCESS = new SearchResponse( - null, + public static final SearchResponse SEARCH_RESPONSE_SUCCESS = SearchResponseUtils.emptyWithTotalHits( null, 1, 1, 0, - 0, + 1L, ShardSearchFailure.EMPTY_ARRAY, null ); - private static final SearchResponse SEARCH_RESPONSE_FAILURE = new SearchResponse( + public static final SearchResponse SEARCH_RESPONSE_FAILURE = new SearchResponse( + SearchHits.EMPTY_WITHOUT_TOTAL_HITS, + null, + null, + false, null, null, 1, + null, + 1, 0, 0, - 0, + 1L, ShardSearchFailure.EMPTY_ARRAY, null ); @@ -418,4 +425,5 @@ public static ResultsPersisterService buildResultsPersisterService(OriginSetting }).when(tp).schedule(any(Runnable.class), any(TimeValue.class), any(Executor.class)); return new ResultsPersisterService(tp, client, clusterService, Settings.EMPTY); } + } diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/CancellationIT.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/CancellationIT.java index ef5198499ff09..fa28877f5b4c1 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/CancellationIT.java +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/CancellationIT.java @@ -10,6 +10,7 @@ import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; import org.apache.logging.log4j.LogManager; +import org.apache.lucene.tests.util.LuceneTestCase; import org.apache.lucene.util.SetOnce; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; @@ -48,6 +49,7 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.instanceOf; +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103809") public class CancellationIT extends ProfilingTestCase { @Override protected Collection> nodePlugins() { diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetFlameGraphActionIT.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetFlameGraphActionIT.java index 8553574d39646..e0e4ef2a12985 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetFlameGraphActionIT.java +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetFlameGraphActionIT.java @@ -7,6 +7,9 @@ package org.elasticsearch.xpack.profiling; +import org.apache.lucene.tests.util.LuceneTestCase; + +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103809") public class GetFlameGraphActionIT extends ProfilingTestCase { public void testGetStackTracesUnfiltered() throws Exception { GetStackTracesRequest request = new GetStackTracesRequest(1000, 600.0d, 1.0d, null, null, null, null, null, null, null, null); diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java index 289f6896ed698..9c60a6bcdfc1c 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java @@ -7,11 +7,13 @@ package org.elasticsearch.xpack.profiling; +import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.TermQueryBuilder; import java.util.List; +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103809") public class GetStackTracesActionIT extends ProfilingTestCase { public void testGetStackTracesUnfiltered() throws Exception { GetStackTracesRequest request = new GetStackTracesRequest(1000, 600.0d, 1.0d, null, null, null, null, null, null, null, null); @@ -30,8 +32,8 @@ public void testGetStackTracesUnfiltered() throws Exception { assertEquals(18, stackTrace.fileIds.size()); assertEquals(18, stackTrace.frameIds.size()); assertEquals(18, stackTrace.typeIds.size()); - assertEquals(0.0000098789d, stackTrace.annualCO2Tons, 0.0000000001d); - assertEquals(0.093075d, stackTrace.annualCostsUSD, 0.000001d); + assertEquals(0.0000048475146d, stackTrace.annualCO2Tons, 0.0000000001d); + assertEquals(0.18834d, stackTrace.annualCostsUSD, 0.00001d); assertNotNull(response.getStackFrames()); StackFrame stackFrame = response.getStackFrames().get("8NlMClggx8jaziUTJXlmWAAAAAAAAIYI"); diff --git a/x-pack/plugin/profiling/src/internalClusterTest/resources/data/profiling-hosts.ndjson b/x-pack/plugin/profiling/src/internalClusterTest/resources/data/profiling-hosts.ndjson index cd3ddc1271d2d..a830ef8da66f1 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/resources/data/profiling-hosts.ndjson +++ b/x-pack/plugin/profiling/src/internalClusterTest/resources/data/profiling-hosts.ndjson @@ -1,2 +1,2 @@ {"create": {"_index": "profiling-hosts", "_id": "eLH27YsBj2lLi3tJYlvr"}} -{"profiling.project.id": 100, "host.id": "8457605156473051743", "@timestamp": 1700504426, "ecs.version": "1.12.0", "profiling.agent.build_timestamp": 1688111067, "profiling.instance.private_ipv4s": ["192.168.1.2"], "ec2.instance_life_cycle": "on-demand", "profiling.agent.config.map_scale_factor": 0, "ec2.instance_type": "i3.2xlarge", "profiling.host.ip": "192.168.1.2", "profiling.agent.config.bpf_log_level": 0, "profiling.host.sysctl.net.core.bpf_jit_enable": 1, "profiling.agent.config.file": "/etc/prodfiler/prodfiler.conf", "ec2.local_ipv4": "192.168.1.2", "profiling.agent.config.no_kernel_version_check": false, "profiling.host.machine": "x86_64", ",profiling.host.tags": ["cloud_provider:aws", "cloud_environment:qa", "cloud_region:eu-west-1"], "profiling.agent.config.probabilistic_threshold": 100, "profiling.agent.config.disable_tls": false, "profiling.agent.config.tracers": "all", "profiling.agent.start_time": 1700090045589, "profiling.agent.config.max_elements_per_interval": 800, "ec2.placement.region": "eu-west-1", "profiling.agent.config.present_cpu_cores": 8, "profiling.host.kernel_version": "9.9.9-0-aws", "profiling.agent.config.bpf_log_size": 65536, "profiling.agent.config.known_traces_entries": 65536, "profiling.host.sysctl.kernel.unprivileged_bpf_disabled": 1, "profiling.agent.config.verbose": false, "profiling.agent.config.probabilistic_interval": "1m0s", "ec2.placement.availability_zone_id": "euw1-az1", "ec2.security_groups": "", "ec2.local_hostname": "ip-192-168-1-2.eu-west-1.compute.internal", "ec2.placement.availability_zone": "eu-west-1c", "profiling.agent.config.upload_symbols": false, "profiling.host.sysctl.kernel.bpf_stats_enabled": 0, "profiling.host.name": "ip-192-168-1-2", "ec2.mac": "00:11:22:33:44:55", "profiling.host.kernel_proc_version": "Linux version 9.9.9-0-aws", "profiling.agent.config.cache_directory": "/var/cache/optimyze/", "profiling.agent.version": "v8.12.0", "ec2.hostname": "ip-192-168-1-2.eu-west-1.compute.internal", "profiling.agent.config.elastic_mode": false, "ec2.ami_id": "ami-aaaaaaaaaaa", "ec2.instance_id": "i-0b999999999999999" } +{"profiling.project.id": 100, "host.id": "8457605156473051743", "@timestamp": 1700504426, "ecs.version": "1.12.0", "profiling.agent.build_timestamp": 1688111067, "profiling.instance.private_ipv4s": ["192.168.1.2"], "ec2.instance_life_cycle": "on-demand", "profiling.agent.config.map_scale_factor": 0, "ec2.instance_type": "i3.2xlarge", "profiling.host.ip": "192.168.1.2", "profiling.agent.config.bpf_log_level": 0, "profiling.host.sysctl.net.core.bpf_jit_enable": 1, "profiling.agent.config.file": "/etc/prodfiler/prodfiler.conf", "ec2.local_ipv4": "192.168.1.2", "profiling.agent.config.no_kernel_version_check": false, "profiling.host.machine": "x86_64", "profiling.host.tags": ["cloud_provider:aws", "cloud_environment:qa", "cloud_region:eu-west-1"], "profiling.agent.config.probabilistic_threshold": 100, "profiling.agent.config.disable_tls": false, "profiling.agent.config.tracers": "all", "profiling.agent.start_time": 1700090045589, "profiling.agent.config.max_elements_per_interval": 800, "ec2.placement.region": "eu-west-1", "profiling.agent.config.present_cpu_cores": 8, "profiling.host.kernel_version": "9.9.9-0-aws", "profiling.agent.config.bpf_log_size": 65536, "profiling.agent.config.known_traces_entries": 65536, "profiling.host.sysctl.kernel.unprivileged_bpf_disabled": 1, "profiling.agent.config.verbose": false, "profiling.agent.config.probabilistic_interval": "1m0s", "ec2.placement.availability_zone_id": "euw1-az1", "ec2.security_groups": "", "ec2.local_hostname": "ip-192-168-1-2.eu-west-1.compute.internal", "ec2.placement.availability_zone": "eu-west-1c", "profiling.agent.config.upload_symbols": false, "profiling.host.sysctl.kernel.bpf_stats_enabled": 0, "profiling.host.name": "ip-192-168-1-2", "ec2.mac": "00:11:22:33:44:55", "profiling.host.kernel_proc_version": "Linux version 9.9.9-0-aws", "profiling.agent.config.cache_directory": "/var/cache/optimyze/", "profiling.agent.version": "v8.12.0", "ec2.hostname": "ip-192-168-1-2.eu-west-1.compute.internal", "profiling.agent.config.elastic_mode": false, "ec2.ami_id": "ami-aaaaaaaaaaa", "ec2.instance_id": "i-0b999999999999999" } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/CO2Calculator.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/CO2Calculator.java index 0d92bf0a78d09..1e44cba4e62b2 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/CO2Calculator.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/CO2Calculator.java @@ -18,7 +18,6 @@ final class CO2Calculator { private static final double DEFAULT_KILOWATTS_PER_CORE_ARM64 = 2.8d / 1000.0d; // unit: watt / core private static final double DEFAULT_KILOWATTS_PER_CORE = DEFAULT_KILOWATTS_PER_CORE_X86; // unit: watt / core private static final double DEFAULT_DATACENTER_PUE = 1.7d; - private final InstanceTypeService instanceTypeService; private final Map hostMetadata; private final double samplingDurationInSeconds; private final double customCO2PerKWH; @@ -27,7 +26,6 @@ final class CO2Calculator { private final double customKilowattsPerCoreARM64; CO2Calculator( - InstanceTypeService instanceTypeService, Map hostMetadata, double samplingDurationInSeconds, Double customCO2PerKWH, @@ -35,7 +33,6 @@ final class CO2Calculator { Double customPerCoreWattX86, Double customPerCoreWattARM64 ) { - this.instanceTypeService = instanceTypeService; this.hostMetadata = hostMetadata; this.samplingDurationInSeconds = samplingDurationInSeconds > 0 ? samplingDurationInSeconds : 1.0d; // avoid division by zero this.customCO2PerKWH = customCO2PerKWH == null ? DEFAULT_CO2_TONS_PER_KWH : customCO2PerKWH; @@ -54,7 +51,7 @@ public double getAnnualCO2Tons(String hostID, long samples) { return DEFAULT_KILOWATTS_PER_CORE * customCO2PerKWH * annualCoreHours * customDatacenterPUE; } - CostEntry costs = instanceTypeService.getCosts(host.instanceType); + CostEntry costs = InstanceTypeService.getCosts(host.instanceType); if (costs == null) { return getKiloWattsPerCore(host) * getCO2TonsPerKWH(host) * annualCoreHours * getDatacenterPUE(host); } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/CostCalculator.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/CostCalculator.java index 05319ba7d1cc4..ecaaee5d3bf4b 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/CostCalculator.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/CostCalculator.java @@ -15,20 +15,17 @@ final class CostCalculator { private static final double SECONDS_PER_YEAR = SECONDS_PER_HOUR * 24 * 365.0d; // unit: seconds private static final double DEFAULT_COST_USD_PER_CORE_HOUR = 0.0425d; // unit: USD / (core * hour) private static final double DEFAULT_AWS_COST_FACTOR = 1.0d; - private final InstanceTypeService instanceTypeService; private final Map hostMetadata; private final double samplingDurationInSeconds; private final double awsCostFactor; private final double customCostPerCoreHour; CostCalculator( - InstanceTypeService instanceTypeService, Map hostMetadata, double samplingDurationInSeconds, Double awsCostFactor, Double customCostPerCoreHour ) { - this.instanceTypeService = instanceTypeService; this.hostMetadata = hostMetadata; this.samplingDurationInSeconds = samplingDurationInSeconds > 0 ? samplingDurationInSeconds : 1.0d; // avoid division by zero this.awsCostFactor = awsCostFactor == null ? DEFAULT_AWS_COST_FACTOR : awsCostFactor; @@ -45,7 +42,7 @@ public double annualCostsUSD(String hostID, double samples) { double providerCostFactor = host.instanceType.provider.equals("aws") ? awsCostFactor : 1.0d; - CostEntry costs = instanceTypeService.getCosts(host.instanceType); + CostEntry costs = InstanceTypeService.getCosts(host.instanceType); if (costs == null) { return annualCoreHours * customCostPerCoreHour * providerCostFactor; } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphAction.java index 79f8632238d4c..fc04f735fdf87 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphAction.java @@ -7,12 +7,13 @@ package org.elasticsearch.xpack.profiling; import org.elasticsearch.action.ActionType; +import org.elasticsearch.common.io.stream.Writeable; public final class GetFlamegraphAction extends ActionType { public static final GetFlamegraphAction INSTANCE = new GetFlamegraphAction(); public static final String NAME = "indices:data/read/profiling/flamegraph"; private GetFlamegraphAction() { - super(NAME, GetFlamegraphResponse::new); + super(NAME, Writeable.Reader.localOnly()); } } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphResponse.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphResponse.java index 457faecf4ad54..468b74ed16000 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphResponse.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphResponse.java @@ -8,8 +8,8 @@ package org.elasticsearch.xpack.profiling; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.common.collect.Iterators; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.common.xcontent.ChunkedToXContentObject; @@ -43,30 +43,6 @@ public class GetFlamegraphResponse extends ActionResponse implements ChunkedToXC private final List annualCostsUSDInclusive; private final List annualCostsUSDExclusive; - public GetFlamegraphResponse(StreamInput in) throws IOException { - this.size = in.readInt(); - this.samplingRate = in.readDouble(); - this.edges = in.readCollectionAsList(i -> i.readMap(StreamInput::readInt)); - this.fileIds = in.readCollectionAsList(StreamInput::readString); - this.frameTypes = in.readCollectionAsList(StreamInput::readInt); - this.inlineFrames = in.readCollectionAsList(StreamInput::readBoolean); - this.fileNames = in.readCollectionAsList(StreamInput::readString); - this.addressOrLines = in.readCollectionAsList(StreamInput::readInt); - this.functionNames = in.readCollectionAsList(StreamInput::readString); - this.functionOffsets = in.readCollectionAsList(StreamInput::readInt); - this.sourceFileNames = in.readCollectionAsList(StreamInput::readString); - this.sourceLines = in.readCollectionAsList(StreamInput::readInt); - this.countInclusive = in.readCollectionAsList(StreamInput::readLong); - this.countExclusive = in.readCollectionAsList(StreamInput::readLong); - this.annualCO2TonsInclusive = in.readCollectionAsList(StreamInput::readDouble); - this.annualCO2TonsExclusive = in.readCollectionAsList(StreamInput::readDouble); - this.annualCostsUSDInclusive = in.readCollectionAsList(StreamInput::readDouble); - this.annualCostsUSDExclusive = in.readCollectionAsList(StreamInput::readDouble); - this.selfCPU = in.readLong(); - this.totalCPU = in.readLong(); - this.totalSamples = in.readLong(); - } - public GetFlamegraphResponse( int size, double samplingRate, @@ -115,27 +91,7 @@ public GetFlamegraphResponse( @Override public void writeTo(StreamOutput out) throws IOException { - out.writeInt(this.size); - out.writeDouble(this.samplingRate); - out.writeCollection(this.edges, (o, v) -> o.writeMap(v, StreamOutput::writeString, StreamOutput::writeInt)); - out.writeCollection(this.fileIds, StreamOutput::writeString); - out.writeCollection(this.frameTypes, StreamOutput::writeInt); - out.writeCollection(this.inlineFrames, StreamOutput::writeBoolean); - out.writeCollection(this.fileNames, StreamOutput::writeString); - out.writeCollection(this.addressOrLines, StreamOutput::writeInt); - out.writeCollection(this.functionNames, StreamOutput::writeString); - out.writeCollection(this.functionOffsets, StreamOutput::writeInt); - out.writeCollection(this.sourceFileNames, StreamOutput::writeString); - out.writeCollection(this.sourceLines, StreamOutput::writeInt); - out.writeCollection(this.countInclusive, StreamOutput::writeLong); - out.writeCollection(this.countExclusive, StreamOutput::writeLong); - out.writeCollection(this.annualCO2TonsInclusive, StreamOutput::writeDouble); - out.writeCollection(this.annualCO2TonsExclusive, StreamOutput::writeDouble); - out.writeCollection(this.annualCostsUSDInclusive, StreamOutput::writeDouble); - out.writeCollection(this.annualCostsUSDExclusive, StreamOutput::writeDouble); - out.writeLong(this.selfCPU); - out.writeLong(this.totalCPU); - out.writeLong(this.totalSamples); + TransportAction.localOnly(); } public int getSize() { diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesAction.java index 8df5b1ec9154e..84ab6643be781 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesAction.java @@ -7,12 +7,13 @@ package org.elasticsearch.xpack.profiling; import org.elasticsearch.action.ActionType; +import org.elasticsearch.common.io.stream.Writeable; public final class GetStackTracesAction extends ActionType { public static final GetStackTracesAction INSTANCE = new GetStackTracesAction(); public static final String NAME = "indices:data/read/profiling/stack_traces"; private GetStackTracesAction() { - super(NAME, GetStackTracesResponse::new); + super(NAME, Writeable.Reader.localOnly()); } } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesRequest.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesRequest.java index f81b5f01caae3..efa8fc1d64244 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesRequest.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesRequest.java @@ -10,8 +10,8 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.tasks.CancellableTask; @@ -93,35 +93,9 @@ public GetStackTracesRequest( this.customCostPerCoreHour = customCostPerCoreHour; } - public GetStackTracesRequest(StreamInput in) throws IOException { - this.query = in.readOptionalNamedWriteable(QueryBuilder.class); - this.sampleSize = in.readOptionalInt(); - this.requestedDuration = in.readOptionalDouble(); - this.awsCostFactor = in.readOptionalDouble(); - this.adjustSampleCount = in.readOptionalBoolean(); - this.indices = in.readOptionalString(); - this.stackTraceIds = in.readOptionalString(); - this.customCO2PerKWH = in.readOptionalDouble(); - this.customDatacenterPUE = in.readOptionalDouble(); - this.customPerCoreWattX86 = in.readOptionalDouble(); - this.customPerCoreWattARM64 = in.readOptionalDouble(); - this.customCostPerCoreHour = in.readOptionalDouble(); - } - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeOptionalNamedWriteable(query); - out.writeOptionalInt(sampleSize); - out.writeOptionalDouble(requestedDuration); - out.writeOptionalDouble(awsCostFactor); - out.writeOptionalBoolean(adjustSampleCount); - out.writeOptionalString(indices); - out.writeOptionalString(stackTraceIds); - out.writeOptionalDouble(customCO2PerKWH); - out.writeOptionalDouble(customDatacenterPUE); - out.writeOptionalDouble(customPerCoreWattX86); - out.writeOptionalDouble(customPerCoreWattARM64); - out.writeOptionalDouble(customCostPerCoreHour); + public void writeTo(StreamOutput out) { + TransportAction.localOnly(); } public Integer getSampleSize() { diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesResponse.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesResponse.java index 2f1e15252c277..89c0b4ab6b0fb 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesResponse.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesResponse.java @@ -7,16 +7,15 @@ package org.elasticsearch.xpack.profiling; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; import org.elasticsearch.common.xcontent.ChunkedToXContentObject; import org.elasticsearch.core.Nullable; import org.elasticsearch.xcontent.ToXContent; -import java.io.IOException; import java.util.Collections; import java.util.Iterator; import java.util.Map; @@ -36,37 +35,6 @@ public class GetStackTracesResponse extends ActionResponse implements ChunkedToX private final double samplingRate; private final long totalSamples; - public GetStackTracesResponse(StreamInput in) throws IOException { - this.stackTraces = in.readBoolean() - ? in.readMap( - i -> new StackTrace( - i.readCollectionAsList(StreamInput::readInt), - i.readCollectionAsList(StreamInput::readString), - i.readCollectionAsList(StreamInput::readString), - i.readCollectionAsList(StreamInput::readInt), - i.readDouble(), - i.readDouble(), - i.readLong() - ) - ) - : null; - this.stackFrames = in.readBoolean() - ? in.readMap( - i -> new StackFrame( - i.readCollectionAsList(StreamInput::readString), - i.readCollectionAsList(StreamInput::readString), - i.readCollectionAsList(StreamInput::readInt), - i.readCollectionAsList(StreamInput::readInt) - ) - ) - : null; - this.executables = in.readBoolean() ? in.readMap(StreamInput::readString) : null; - this.stackTraceEvents = in.readBoolean() ? in.readMap(i -> new TraceEvent(i.readString(), i.readLong())) : null; - this.totalFrames = in.readInt(); - this.samplingRate = in.readDouble(); - this.totalSamples = in.readLong(); - } - public GetStackTracesResponse( Map stackTraces, Map stackFrames, @@ -86,50 +54,8 @@ public GetStackTracesResponse( } @Override - public void writeTo(StreamOutput out) throws IOException { - if (stackTraces != null) { - out.writeBoolean(true); - out.writeMap(stackTraces, (o, v) -> { - o.writeCollection(v.addressOrLines, StreamOutput::writeInt); - o.writeStringCollection(v.fileIds); - o.writeStringCollection(v.frameIds); - o.writeCollection(v.typeIds, StreamOutput::writeInt); - o.writeDouble(v.annualCO2Tons); - o.writeDouble(v.annualCostsUSD); - o.writeLong(v.count); - }); - } else { - out.writeBoolean(false); - } - if (stackFrames != null) { - out.writeBoolean(true); - out.writeMap(stackFrames, (o, v) -> { - o.writeStringCollection(v.fileName); - o.writeStringCollection(v.functionName); - o.writeCollection(v.functionOffset, StreamOutput::writeInt); - o.writeCollection(v.lineNumber, StreamOutput::writeInt); - }); - } else { - out.writeBoolean(false); - } - if (executables != null) { - out.writeBoolean(true); - out.writeMap(executables, StreamOutput::writeString); - } else { - out.writeBoolean(false); - } - if (stackTraceEvents != null) { - out.writeBoolean(true); - out.writeMap(stackTraceEvents, (o, v) -> { - o.writeString(v.stacktraceID); - o.writeLong(v.count); - }); - } else { - out.writeBoolean(false); - } - out.writeInt(totalFrames); - out.writeDouble(samplingRate); - out.writeLong(totalSamples); + public void writeTo(StreamOutput out) { + TransportAction.localOnly(); } public Map getStackTraces() { diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/InstanceType.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/InstanceType.java index 98e75ff264375..150b2639e9ac3 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/InstanceType.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/InstanceType.java @@ -23,19 +23,9 @@ final class InstanceType implements ToXContentObject { final String name; InstanceType(String provider, String region, String name) { - this.provider = provider; - this.region = region; - this.name = name; - } - - /** - * Creates a {@link InstanceType} from a {@link Map} of source data provided from JSON or profiling-costs. - * - * @param source the source data - * @return the {@link InstanceType} - */ - public static InstanceType fromCostSource(Map source) { - return new InstanceType((String) source.get("provider"), (String) source.get("region"), (String) source.get("instance_type")); + this.provider = provider != null ? provider : ""; + this.region = region != null ? region : ""; + this.name = name != null ? name : ""; } /** @@ -45,16 +35,45 @@ public static InstanceType fromCostSource(Map source) { * @return the {@link InstanceType} */ public static InstanceType fromHostSource(Map source) { + // Check and handle AWS. + String region = (String) source.get("ec2.placement.region"); + if (region != null) { + String instanceType = (String) source.get("ec2.instance_type"); + return new InstanceType("aws", region, instanceType); + } + + // Check and handle GCP. + String zone = (String) source.get("gce.instance.zone"); + if (zone != null) { + // example: "gce.instance.zone": "projects/123456789/zones/europe-west1-b" + region = zone.substring(zone.lastIndexOf('/') + 1); + // region consist of the zone's first two tokens + String[] tokens = region.split("-", 3); + if (tokens.length > 2) { + region = tokens[0] + "-" + tokens[1]; + } + + // Support for instance type is planned for 8.13. + return new InstanceType("gcp", region, null); + } + + // Check and handle Azure. + region = (String) source.get("azure.compute.location"); + if (region != null) { + // example: "azure.compute.location": "eastus2" + // Support for instance type is planned for 8.13. + return new InstanceType("azure", region, null); + } + + // Support for configured tags (ECS). // Example of tags: // "profiling.host.tags": [ // "cloud_provider:aws", // "cloud_environment:qa", // "cloud_region:eu-west-1", // ], - String provider = ""; - String region = ""; - String instanceType = ""; - + String provider = null; + region = null; List tags = listOf(source.get("profiling.host.tags")); for (String tag : tags) { String[] kv = tag.toLowerCase(Locale.ROOT).split(":", 2); @@ -69,14 +88,7 @@ public static InstanceType fromHostSource(Map source) { } } - // We only support AWS for 8.12, but plan for GCP and Azure later. - // "gcp": check 'gce.instance.name' or 'gce.instance.name' to extract the instanceType - // "azure": extract the instanceType - if ("aws".equals(provider)) { - instanceType = (String) source.get("ec2.instance_type"); - } - - return new InstanceType(provider, region, instanceType); + return new InstanceType(provider, region, null); } @SuppressWarnings("unchecked") @@ -109,7 +121,7 @@ public boolean equals(Object o) { return false; } InstanceType that = (InstanceType) o; - return Objects.equals(provider, that.provider) && Objects.equals(region, that.region) && Objects.equals(name, that.name); + return provider.equals(that.provider) && region.equals(that.region) && name.equals(that.name); } @Override @@ -119,6 +131,6 @@ public int hashCode() { @Override public String toString() { - return name + " in region " + region; + return "provider '" + name + "' in region '" + region + "'"; } } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/InstanceTypeService.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/InstanceTypeService.java index 570a2c499fe35..58dd19c91f966 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/InstanceTypeService.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/InstanceTypeService.java @@ -13,36 +13,51 @@ import org.elasticsearch.xcontent.XContentType; import java.io.IOException; -import java.io.UncheckedIOException; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.function.Function; import java.util.zip.GZIPInputStream; -public class InstanceTypeService { - private final Map costsPerDatacenter = new HashMap<>(); - - public void load() { - try ( - GZIPInputStream in = new GZIPInputStream( - InstanceTypeService.class.getClassLoader().getResourceAsStream("profiling-costs.json.gz") - ) - ) { - XContentParser parser = XContentType.JSON.xContent().createParser(XContentParserConfiguration.EMPTY, in); - if (parser.currentToken() == null) { - parser.nextToken(); - } - List> rawData = XContentParserUtils.parseList(parser, XContentParser::map); - for (Map entry : rawData) { - costsPerDatacenter.put(InstanceType.fromCostSource(entry), CostEntry.fromSource(entry)); - } +public final class InstanceTypeService { + + private InstanceTypeService() {} - } catch (IOException e) { - throw new UncheckedIOException(e); + private static final class Holder { + private static final Map costsPerDatacenter; + + static { + final Map objects = new HashMap<>(); + final Function dedupString = s -> (String) objects.computeIfAbsent(s, Function.identity()); + final Map tmp = new HashMap<>(); + try ( + GZIPInputStream in = new GZIPInputStream( + InstanceTypeService.class.getClassLoader().getResourceAsStream("profiling-costs.json.gz") + ); + XContentParser parser = XContentType.JSON.xContent().createParser(XContentParserConfiguration.EMPTY, in) + ) { + if (parser.currentToken() == null) { + parser.nextToken(); + } + List> rawData = XContentParserUtils.parseList(parser, XContentParser::map); + for (Map entry : rawData) { + tmp.put( + new InstanceType( + dedupString.apply((String) entry.get("provider")), + dedupString.apply((String) entry.get("region")), + dedupString.apply((String) entry.get("instance_type")) + ), + (CostEntry) objects.computeIfAbsent(CostEntry.fromSource(entry), Function.identity()) + ); + } + costsPerDatacenter = Map.copyOf(tmp); + } catch (IOException e) { + throw new ExceptionInInitializerError(e); + } } } - public CostEntry getCosts(InstanceType instance) { - return costsPerDatacenter.get(instance); + public static CostEntry getCosts(InstanceType instance) { + return Holder.costsPerDatacenter.get(instance); } } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingIndexTemplateRegistry.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingIndexTemplateRegistry.java index 0068d03767387..ce15982450a66 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingIndexTemplateRegistry.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingIndexTemplateRegistry.java @@ -174,11 +174,8 @@ protected List getLifecyclePolicies() { indexVersion("symbols", PROFILING_SYMBOLS_VERSION) ) )) { - try { - componentTemplates.put( - config.getTemplateName(), - ComponentTemplate.parse(JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, config.loadBytes())) - ); + try (var parser = JsonXContent.jsonXContent.createParser(XContentParserConfiguration.EMPTY, config.loadBytes())) { + componentTemplates.put(config.getTemplateName(), ComponentTemplate.parse(parser)); } catch (IOException e) { throw new AssertionError(e); } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingPlugin.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingPlugin.java index a2459f839523b..b105cde3d5c2a 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingPlugin.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/ProfilingPlugin.java @@ -86,24 +86,18 @@ public Collection createComponents(PluginServices services) { // set initial value updateTemplatesEnabled(PROFILING_TEMPLATES_ENABLED.get(settings)); clusterService.getClusterSettings().addSettingsUpdateConsumer(PROFILING_TEMPLATES_ENABLED, this::updateTemplatesEnabled); - InstanceTypeService instanceTypeService = createInstanceTypeService(); if (enabled) { registry.get().initialize(); indexManager.get().initialize(); dataStreamManager.get().initialize(); - instanceTypeService.load(); } - return List.of(createLicenseChecker(), instanceTypeService); + return List.of(createLicenseChecker()); } protected ProfilingLicenseChecker createLicenseChecker() { return new ProfilingLicenseChecker(XPackPlugin::getSharedLicenseState); } - protected InstanceTypeService createInstanceTypeService() { - return new InstanceTypeService(); - } - public void updateCheckOutdatedIndices(boolean newValue) { if (newValue == false) { logger.info("profiling will ignore outdated indices"); diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/RestGetFlamegraphAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/RestGetFlamegraphAction.java index d3c8fc4fd295b..3b1b2e1789ad1 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/RestGetFlamegraphAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/RestGetFlamegraphAction.java @@ -11,9 +11,8 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; -import org.elasticsearch.rest.action.RestActionListener; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.io.IOException; import java.util.List; @@ -35,9 +34,12 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli getStackTracesRequest.setAdjustSampleCount(true); return channel -> { - RestActionListener listener = new RestChunkedToXContentListener<>(channel); RestCancellableNodeClient cancelClient = new RestCancellableNodeClient(client, request.getHttpChannel()); - cancelClient.execute(GetFlamegraphAction.INSTANCE, getStackTracesRequest, listener); + cancelClient.execute( + GetFlamegraphAction.INSTANCE, + getStackTracesRequest, + new RestRefCountedChunkedToXContentListener<>(channel) + ); }; } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/RestGetStackTracesAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/RestGetStackTracesAction.java index f8ee53ce0826e..ac7e9943b6566 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/RestGetStackTracesAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/RestGetStackTracesAction.java @@ -11,9 +11,8 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; -import org.elasticsearch.rest.action.RestActionListener; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.io.IOException; import java.util.List; @@ -33,9 +32,12 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli request.applyContentParser(getStackTracesRequest::parseXContent); return channel -> { - RestActionListener listener = new RestChunkedToXContentListener<>(channel); RestCancellableNodeClient cancelClient = new RestCancellableNodeClient(client, request.getHttpChannel()); - cancelClient.execute(GetStackTracesAction.INSTANCE, getStackTracesRequest, listener); + cancelClient.execute( + GetStackTracesAction.INSTANCE, + getStackTracesRequest, + new RestRefCountedChunkedToXContentListener<>(channel) + ); }; } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetFlamegraphAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetFlamegraphAction.java index 3cd9ded3005a2..dd78d6f1815f5 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetFlamegraphAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetFlamegraphAction.java @@ -11,12 +11,11 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.ParentTaskAssigningClient; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; @@ -27,7 +26,7 @@ import java.util.SortedMap; import java.util.TreeMap; -public class TransportGetFlamegraphAction extends HandledTransportAction { +public class TransportGetFlamegraphAction extends TransportAction { private static final Logger log = LogManager.getLogger(TransportGetFlamegraphAction.class); private static final StackFrame EMPTY_STACKFRAME = new StackFrame("", "", 0, 0); @@ -36,7 +35,7 @@ public class TransportGetFlamegraphAction extends HandledTransportAction { +public class TransportGetStackTracesAction extends TransportAction { private static final Logger log = LogManager.getLogger(TransportGetStackTracesAction.class); public static final Setting PROFILING_MAX_STACKTRACE_QUERY_SLICES = Setting.intSetting( @@ -111,7 +110,6 @@ public class TransportGetStackTracesAction extends HandledTransportAction hostsTable = Map.ofEntries( Map.entry(HOST_ID_A, @@ -40,7 +37,7 @@ public void testCreateFromRegularSource() { new InstanceType( "gcp", "europe-west1", - "" // Doesn't matter for unknown datacenters. + null // Doesn't matter for unknown datacenters. ), "x86_64" ) @@ -51,7 +48,7 @@ public void testCreateFromRegularSource() { new InstanceType( "azure", "northcentralus", - "" // Doesn't matter for unknown datacenters. + null // Doesn't matter for unknown datacenters. ), "aarch64" ) @@ -62,7 +59,7 @@ public void testCreateFromRegularSource() { new InstanceType( "on-prem-provider", "on-prem-region", - "" // Doesn't matter for unknown datacenters. + null // Doesn't matter for unknown datacenters. ), "aarch64" ) @@ -73,7 +70,7 @@ public void testCreateFromRegularSource() { double samplingDurationInSeconds = 1_800.0d; // 30 minutes long samples = 100_000L; // 100k samples double annualCoreHours = CostCalculator.annualCoreHours(samplingDurationInSeconds, samples, 20.0d); - CO2Calculator co2Calculator = new CO2Calculator(instanceTypeService, hostsTable, samplingDurationInSeconds, null, null, null, null); + CO2Calculator co2Calculator = new CO2Calculator(hostsTable, samplingDurationInSeconds, null, null, null, null); checkCO2Calculation(co2Calculator.getAnnualCO2Tons(HOST_ID_A, samples), annualCoreHours, 0.000002213477d); checkCO2Calculation(co2Calculator.getAnnualCO2Tons(HOST_ID_B, samples), annualCoreHours, 1.1d, 0.00004452d, 7.0d); diff --git a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/CarthesianCombinator.java b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/CarthesianCombinator.java new file mode 100644 index 0000000000000..2982df317a38c --- /dev/null +++ b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/CarthesianCombinator.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.profiling; + +import java.lang.reflect.Array; +import java.util.function.Consumer; + +public class CarthesianCombinator { + private final T[] elems; + private final int[] index; + private final T[] result; + private final int len; + + @SuppressWarnings("unchecked") + CarthesianCombinator(T[] elems, int len) { + if (elems.length == 0) { + throw new IllegalArgumentException("elems must not be empty"); + } + this.elems = elems; + this.index = new int[len]; + this.result = (T[]) Array.newInstance(elems[0].getClass(), len); + this.len = len; + } + + private void init(int length) { + for (int i = 0; i < length; i++) { + index[i] = 0; + result[i] = elems[0]; + } + } + + public void forEach(Consumer action) { + // Initialize index and result + init(len); + + int pos = 0; + while (pos < len) { + if (index[pos] < elems.length) { + result[pos] = elems[index[pos]]; + action.accept(result); + index[pos]++; + continue; + } + while (pos < len && index[pos] + 1 >= elems.length) { + pos++; + } + if (pos < len) { + index[pos]++; + result[pos] = elems[index[pos]]; + init(pos); + pos = 0; + } + } + } +} diff --git a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/CostCalculatorTests.java b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/CostCalculatorTests.java index f42ad1188693b..030616d285416 100644 --- a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/CostCalculatorTests.java +++ b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/CostCalculatorTests.java @@ -16,9 +16,6 @@ public class CostCalculatorTests extends ESTestCase { private static final String HOST_ID_B = "2220256254710195392"; public void testCreateFromRegularSource() { - InstanceTypeService instanceTypeService = new InstanceTypeService(); - instanceTypeService.load(); - // tag::noformat Map hostsTable = Map.ofEntries( Map.entry(HOST_ID_A, @@ -49,7 +46,7 @@ public void testCreateFromRegularSource() { double samplingDurationInSeconds = 1_800.0d; // 30 minutes long samples = 100_000L; // 100k samples double annualCoreHours = CostCalculator.annualCoreHours(samplingDurationInSeconds, samples, 20.0d); - CostCalculator costCalculator = new CostCalculator(instanceTypeService, hostsTable, samplingDurationInSeconds, null, null); + CostCalculator costCalculator = new CostCalculator(hostsTable, samplingDurationInSeconds, null, null); // Checks whether the cost calculation is based on the pre-calculated lookup data. checkCostCalculation(costCalculator.annualCostsUSD(HOST_ID_A, samples), annualCoreHours, 0.061d); diff --git a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/GetStackTracesRequestTests.java b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/GetStackTracesRequestTests.java index 8bf4598cf75f7..f0f328e48d00b 100644 --- a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/GetStackTracesRequestTests.java +++ b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/GetStackTracesRequestTests.java @@ -8,12 +8,8 @@ package org.elasticsearch.xpack.profiling; import org.elasticsearch.common.ParsingException; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.query.BoolQueryBuilder; -import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.ESTestCase; @@ -28,47 +24,6 @@ import static java.util.Collections.emptyList; public class GetStackTracesRequestTests extends ESTestCase { - public void testSerialization() throws IOException { - Integer sampleSize = randomIntBetween(1, Integer.MAX_VALUE); - Double requestedDuration = randomBoolean() ? randomDoubleBetween(0.001d, Double.MAX_VALUE, true) : null; - Double awsCostFactor = randomBoolean() ? randomDoubleBetween(0.1d, 5.0d, true) : null; - Double customCO2PerKWH = randomBoolean() ? randomDoubleBetween(0.000001d, 0.001d, true) : null; - Double datacenterPUE = randomBoolean() ? randomDoubleBetween(1.0d, 3.0d, true) : null; - Double perCoreWattX86 = randomBoolean() ? randomDoubleBetween(0.01d, 20.0d, true) : null; - Double perCoreWattARM64 = randomBoolean() ? randomDoubleBetween(0.01d, 20.0d, true) : null; - Double customCostPerCoreHour = randomBoolean() ? randomDoubleBetween(0.001d, 1000.0d, true) : null; - QueryBuilder query = randomBoolean() ? new BoolQueryBuilder() : null; - - GetStackTracesRequest request = new GetStackTracesRequest( - sampleSize, - requestedDuration, - awsCostFactor, - query, - null, - null, - customCO2PerKWH, - datacenterPUE, - perCoreWattX86, - perCoreWattARM64, - customCostPerCoreHour - ); - try (BytesStreamOutput out = new BytesStreamOutput()) { - request.writeTo(out); - try (NamedWriteableAwareStreamInput in = new NamedWriteableAwareStreamInput(out.bytes().streamInput(), writableRegistry())) { - GetStackTracesRequest deserialized = new GetStackTracesRequest(in); - assertEquals(sampleSize, deserialized.getSampleSize()); - assertEquals(requestedDuration, deserialized.getRequestedDuration()); - assertEquals(awsCostFactor, deserialized.getAwsCostFactor()); - assertEquals(customCO2PerKWH, deserialized.getCustomCO2PerKWH()); - assertEquals(datacenterPUE, deserialized.getCustomDatacenterPUE()); - assertEquals(perCoreWattX86, deserialized.getCustomPerCoreWattX86()); - assertEquals(perCoreWattARM64, deserialized.getCustomPerCoreWattARM64()); - assertEquals(customCostPerCoreHour, deserialized.getCustomCostPerCoreHour()); - assertEquals(query, deserialized.getQuery()); - } - } - } - public void testParseValidXContent() throws IOException { try (XContentParser content = createParser(XContentFactory.jsonBuilder() //tag::noformat @@ -93,6 +48,15 @@ public void testParseValidXContent() throws IOException { assertEquals(Double.valueOf(100.54d), request.getRequestedDuration()); // a basic check suffices here assertEquals("@timestamp", ((RangeQueryBuilder) request.getQuery()).fieldName()); + // Expect the default values + assertNull(request.getIndices()); + assertNull(request.getStackTraceIds()); + assertNull(request.getAwsCostFactor()); + assertNull(request.getCustomCO2PerKWH()); + assertNull(request.getCustomDatacenterPUE()); + assertNull(request.getCustomCostPerCoreHour()); + assertNull(request.getCustomPerCoreWattX86()); + assertNull(request.getCustomPerCoreWattARM64()); } } @@ -124,7 +88,57 @@ public void testParseValidXContentWithCustomIndex() throws IOException { assertEquals("@timestamp", ((RangeQueryBuilder) request.getQuery()).fieldName()); // Expect the default values - assertEquals(null, request.getRequestedDuration()); + assertNull(request.getRequestedDuration()); + assertNull(request.getAwsCostFactor()); + assertNull(request.getCustomCO2PerKWH()); + assertNull(request.getCustomDatacenterPUE()); + assertNull(request.getCustomCostPerCoreHour()); + assertNull(request.getCustomPerCoreWattX86()); + assertNull(request.getCustomPerCoreWattARM64()); + } + } + + public void testParseValidXContentWithCustomCostAndCO2Data() throws IOException { + try (XContentParser content = createParser(XContentFactory.jsonBuilder() + //tag::noformat + .startObject() + .field("sample_size", 2000) + .field("requested_duration", 100.54d) + .field("aws_cost_factor", 7.3d) + .field("co2_per_kwh", 22.4d) + .field("datacenter_pue", 1.05d) + .field("cost_per_core_hour", 3.32d) + .field("per_core_watt_x86", 7.2d) + .field("per_core_watt_arm64", 2.82d) + .startObject("query") + .startObject("range") + .startObject("@timestamp") + .field("gte", "2022-10-05") + .endObject() + .endObject() + .endObject() + .endObject() + //end::noformat + )) { + + GetStackTracesRequest request = new GetStackTracesRequest(); + request.parseXContent(content); + + assertEquals(Integer.valueOf(2000), request.getSampleSize()); + assertEquals(Double.valueOf(100.54d), request.getRequestedDuration()); + assertEquals(Double.valueOf(7.3d), request.getAwsCostFactor()); + assertEquals(Double.valueOf(22.4d), request.getCustomCO2PerKWH()); + assertEquals(Double.valueOf(1.05d), request.getCustomDatacenterPUE()); + assertEquals(Double.valueOf(3.32d), request.getCustomCostPerCoreHour()); + assertEquals(Double.valueOf(7.2d), request.getCustomPerCoreWattX86()); + assertEquals(Double.valueOf(2.82d), request.getCustomPerCoreWattARM64()); + + // a basic check suffices here + assertEquals("@timestamp", ((RangeQueryBuilder) request.getQuery()).fieldName()); + + // Expect the default values + assertNull(request.getIndices()); + assertNull(request.getStackTraceIds()); } } @@ -246,7 +260,6 @@ public void testConsidersCustomIndicesInRelatedIndices() { } public void testConsidersDefaultIndicesInRelatedIndices() { - String customIndex = randomAlphaOfLength(5); GetStackTracesRequest request = new GetStackTracesRequest(1, 1.0d, 1.0d, null, null, null, null, null, null, null, null); String[] indices = request.indices(); assertEquals(15, indices.length); diff --git a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/GetStackTracesResponseTests.java b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/GetStackTracesResponseTests.java index 7455c2b30e13d..99a34719f96c9 100644 --- a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/GetStackTracesResponseTests.java +++ b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/GetStackTracesResponseTests.java @@ -7,20 +7,18 @@ package org.elasticsearch.xpack.profiling; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.AbstractChunkedSerializingTestCase; -import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.test.ESTestCase; import java.util.List; import java.util.Map; -public class GetStackTracesResponseTests extends AbstractWireSerializingTestCase { +public class GetStackTracesResponseTests extends ESTestCase { private T randomNullable(T v) { return randomBoolean() ? v : null; } - @Override - protected GetStackTracesResponse createTestInstance() { + private GetStackTracesResponse createTestInstance() { int totalFrames = randomIntBetween(1, 100); Map stackTraces = randomNullable( @@ -57,16 +55,6 @@ protected GetStackTracesResponse createTestInstance() { return new GetStackTracesResponse(stackTraces, stackFrames, executables, stackTraceEvents, totalFrames, 1.0, totalSamples); } - @Override - protected GetStackTracesResponse mutateInstance(GetStackTracesResponse instance) { - return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929 - } - - @Override - protected Writeable.Reader instanceReader() { - return GetStackTracesResponse::new; - } - public void testChunking() { AbstractChunkedSerializingTestCase.assertChunkCount(createTestInstance(), instance -> { // start, end, total_frames, samplingrate diff --git a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/HostMetadataTests.java b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/HostMetadataTests.java index 0359357004687..d8f93cd129916 100644 --- a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/HostMetadataTests.java +++ b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/HostMetadataTests.java @@ -13,7 +13,7 @@ import java.util.Map; public class HostMetadataTests extends ESTestCase { - public void testCreateFromRegularSource() { + public void testCreateFromSourceAWS() { final String hostID = "1440256254710195396"; final String machine = "x86_64"; final String provider = "aws"; @@ -25,9 +25,8 @@ public void testCreateFromRegularSource() { Map.of( "host.id", hostID, "profiling.host.machine", machine, - "profiling.host.tags", Arrays.asList( - "cloud_provider:"+provider, "cloud_environment:qa", "cloud_region:"+region), - "ec2.instance_type", instanceType + "ec2.instance_type", instanceType, + "ec2.placement.region", region ) ); // end::noformat @@ -38,4 +37,141 @@ public void testCreateFromRegularSource() { assertEquals(region, host.instanceType.region); assertEquals(instanceType, host.instanceType.name); } + + public void testCreateFromSourceGCP() { + final String hostID = "1440256254710195396"; + final String machine = "x86_64"; + final String provider = "gcp"; + final String[] regions = { "", "", "europe-west1", "europewest", "europe-west1" }; + final String[] zones = { + "", + "/", + "projects/123456789/zones/" + regions[2] + "-b", + "projects/123456789/zones/" + regions[3], + "projects/123456789/zones/" + regions[4] + "-b-c" }; + + for (int i = 0; i < regions.length; i++) { + String region = regions[i]; + String zone = zones[i]; + + // tag::noformat + HostMetadata host = HostMetadata.fromSource( + Map.of( + "host.id", hostID, + "profiling.host.machine", machine, + "gce.instance.zone", zone + ) + ); + // end::noformat + + assertEquals(hostID, host.hostID); + assertEquals(machine, host.profilingHostMachine); + assertEquals(provider, host.instanceType.provider); + assertEquals(region, host.instanceType.region); + assertEquals("", host.instanceType.name); + } + } + + public void testCreateFromSourceGCPZoneFuzzer() { + final String hostID = "1440256254710195396"; + final String machine = "x86_64"; + final String provider = "gcp"; + final Character[] chars = new Character[] { '/', '-', 'a' }; + + for (int zoneLength = 1; zoneLength <= 5; zoneLength++) { + CarthesianCombinator combinator = new CarthesianCombinator<>(chars, zoneLength); + + combinator.forEach((result) -> { + StringBuilder sb = new StringBuilder(); + for (Character c : result) { + sb.append(c); + } + String zone = sb.toString(); + + // tag::noformat + HostMetadata host = HostMetadata.fromSource( + Map.of( + "host.id", hostID, + "profiling.host.machine", machine, + "gce.instance.zone", zone + ) + ); + // end::noformat + + assertEquals(hostID, host.hostID); + assertEquals(machine, host.profilingHostMachine); + assertEquals(provider, host.instanceType.provider); + assertNotNull(host.instanceType.region); + assertEquals("", host.instanceType.name); + // region isn't tested because of the combinatorial nature of this test + }); + } + } + + public void testCreateFromSourceAzure() { + final String hostID = "1440256254710195396"; + final String machine = "x86_64"; + final String provider = "azure"; + final String region = "eastus2"; + + // tag::noformat + HostMetadata host = HostMetadata.fromSource( + Map.of( + "host.id", hostID, + "profiling.host.machine", machine, + "azure.compute.location", region + ) + ); + // end::noformat + + assertEquals(hostID, host.hostID); + assertEquals(machine, host.profilingHostMachine); + assertEquals(provider, host.instanceType.provider); + assertEquals(region, host.instanceType.region); + assertEquals("", host.instanceType.name); + } + + public void testCreateFromSourceECS() { + final String hostID = "1440256254710195396"; + final String machine = "x86_64"; + final String provider = "any-provider"; + final String region = "any-region"; + + // tag::noformat + HostMetadata host = HostMetadata.fromSource( + Map.of( + "host.id", hostID, + "profiling.host.machine", machine, + "profiling.host.tags", Arrays.asList( + "cloud_provider:"+provider, "cloud_environment:qa", "cloud_region:"+region) + ) + ); + // end::noformat + + assertEquals(hostID, host.hostID); + assertEquals(machine, host.profilingHostMachine); + assertEquals(provider, host.instanceType.provider); + assertEquals(region, host.instanceType.region); + assertEquals("", host.instanceType.name); + } + + public void testCreateFromSourceNoProvider() { + final String hostID = "1440256254710195396"; + final String machine = "x86_64"; + + // tag::noformat + HostMetadata host = HostMetadata.fromSource( + Map.of( + "host.id", hostID, + "profiling.host.machine", machine + ) + ); + // end::noformat + + assertEquals(hostID, host.hostID); + assertEquals(machine, host.profilingHostMachine); + assertEquals("", host.instanceType.provider); + assertEquals("", host.instanceType.region); + assertEquals("", host.instanceType.name); + } } diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/async/AsyncTaskManagementService.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/async/AsyncTaskManagementService.java index 29705d9e4b116..d10ed1775b024 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/async/AsyncTaskManagementService.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/async/AsyncTaskManagementService.java @@ -215,6 +215,7 @@ private ActionListener wrapStoringListener( acquiredListener.onResponse(operation.initialResponse(searchTask)); } }, waitForCompletionTimeout, threadPool.executor(ThreadPool.Names.SEARCH)); + // This will be performed at the end of normal execution return ActionListener.wrap(response -> { ActionListener acquiredListener = exclusiveListener.getAndSet(null); @@ -234,7 +235,11 @@ private ActionListener wrapStoringListener( } } else { // We finished after timeout - saving results - storeResults(searchTask, new StoredAsyncResponse<>(response, threadPool.absoluteTimeInMillis() + keepAlive.getMillis())); + storeResults( + searchTask, + new StoredAsyncResponse<>(response, threadPool.absoluteTimeInMillis() + keepAlive.getMillis()), + ActionListener.running(response::decRef) + ); } }, e -> { ActionListener acquiredListener = exclusiveListener.getAndSet(null); @@ -272,6 +277,7 @@ private void storeResults(T searchTask, StoredAsyncResponse storedResp ActionListener.wrap( // We should only unregister after the result is saved resp -> { + // TODO: generalize the logging, not just eql logger.trace(() -> "stored eql search results for [" + searchTask.getExecutionId().getEncoded() + "]"); taskManager.unregister(searchTask); if (storedResponse.getException() != null) { @@ -290,6 +296,7 @@ private void storeResults(T searchTask, StoredAsyncResponse storedResp if (cause instanceof DocumentMissingException == false && cause instanceof VersionConflictEngineException == false) { logger.error( + // TODO: generalize the logging, not just eql () -> format("failed to store eql search results for [%s]", searchTask.getExecutionId().getEncoded()), exc ); diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/planner/ExpressionTranslators.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/planner/ExpressionTranslators.java index c2c51863dbb77..0d659c5dbfb2d 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/planner/ExpressionTranslators.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/planner/ExpressionTranslators.java @@ -133,7 +133,7 @@ public static Query doTranslate(RegexMatch e, TranslatorHandler handler) { Expression field = e.field(); if (field instanceof FieldAttribute fa) { - q = translateField(e, handler.nameOf(fa.exactAttribute())); + return handler.wrapFunctionQuery(e, fa, () -> translateField(e, handler.nameOf(fa.exactAttribute()))); } else if (field instanceof MetadataAttribute ma) { q = translateField(e, handler.nameOf(ma)); } else { diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/rule/RuleExecutor.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/rule/RuleExecutor.java index cb13cfd651ed3..2ccdd66089c79 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/rule/RuleExecutor.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/rule/RuleExecutor.java @@ -67,6 +67,10 @@ public Batch(String name, Rule... rules) { public String name() { return name; } + + public Rule[] rules() { + return rules; + } } private Iterable> batches = null; diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/util/SpatialCoordinateTypes.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/util/SpatialCoordinateTypes.java index 57e472cd5bb17..4be9ddd1f3d21 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/util/SpatialCoordinateTypes.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/util/SpatialCoordinateTypes.java @@ -111,7 +111,7 @@ public SpatialPoint stringAsPoint(String string) { throw new IllegalArgumentException("Unsupported geometry type " + geometry.type()); } } catch (Exception e) { - throw new RuntimeException("Failed to parse WKT: " + e.getMessage(), e); + throw new IllegalArgumentException("Failed to parse WKT: " + e.getMessage(), e); } } diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/util/StringUtils.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/util/StringUtils.java index 83c731ce4e7a9..dad3c8574dc4a 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/util/StringUtils.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/util/StringUtils.java @@ -44,7 +44,7 @@ private StringUtils() {} private static final String INVALID_REGEX_SEQUENCE = "Invalid sequence - escape character is not followed by special wildcard char"; - // CamelCase to camel_case + // CamelCase to camel_case (and isNaN to is_nan) public static String camelCaseToUnderscore(String string) { if (Strings.hasText(string) == false) { return EMPTY; @@ -57,7 +57,8 @@ public static String camelCaseToUnderscore(String string) { char ch = s.charAt(i); if (Character.isAlphabetic(ch)) { if (Character.isUpperCase(ch)) { - if (i > 0 && previousCharWasUp == false) { + // append `_` when encountering a capital after a small letter, but only if not the last letter. + if (i > 0 && i < s.length() - 1 && previousCharWasUp == false) { sb.append("_"); } previousCharWasUp = true; diff --git a/x-pack/plugin/ql/test-fixtures/src/main/java/org/elasticsearch/xpack/ql/CsvSpecReader.java b/x-pack/plugin/ql/test-fixtures/src/main/java/org/elasticsearch/xpack/ql/CsvSpecReader.java index 7ddd660645a7c..a3b5147988b13 100644 --- a/x-pack/plugin/ql/test-fixtures/src/main/java/org/elasticsearch/xpack/ql/CsvSpecReader.java +++ b/x-pack/plugin/ql/test-fixtures/src/main/java/org/elasticsearch/xpack/ql/CsvSpecReader.java @@ -10,6 +10,7 @@ import java.util.ArrayList; import java.util.List; import java.util.Locale; +import java.util.function.Function; import static org.hamcrest.CoreMatchers.is; import static org.junit.Assert.assertThat; @@ -119,6 +120,16 @@ public List expectedWarnings(boolean forEmulated) { } return warnings; } + + /** + * Modifies the expected warnings. + * In some cases, we modify the query to run against multiple clusters. As a result, the line/column positions + * of the expected warnings no longer match the actual warnings. To enable reusing of spec tests, this method + * allows adjusting the expected warnings. + */ + public void adjustExpectedWarnings(Function updater) { + expectedWarnings.replaceAll(updater::apply); + } } } diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupResponseTranslator.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupResponseTranslator.java index e90ad56e3395a..ed3a3f294c65c 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupResponseTranslator.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupResponseTranslator.java @@ -34,7 +34,6 @@ import org.elasticsearch.search.aggregations.metrics.Min; import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.xpack.core.rollup.RollupField; import java.nio.charset.StandardCharsets; @@ -340,20 +339,15 @@ private static SearchResponse mergeFinalResponse( isTerminatedEarly = isTerminatedEarly && liveResponse.isTerminatedEarly(); numReducePhases += liveResponse.getNumReducePhases(); } - - InternalSearchResponse combinedInternal = new InternalSearchResponse( + // Shard failures are ignored atm, so returning an empty array is fine + return new SearchResponse( SearchHits.EMPTY_WITH_TOTAL_HITS, aggs, null, - null, isTimedOut, isTerminatedEarly, - numReducePhases - ); - - // Shard failures are ignored atm, so returning an empty array is fine - return new SearchResponse( - combinedInternal, + null, + numReducePhases, null, totalShards, sucessfulShards, diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/rest/RestPutRollupJobAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/rest/RestPutRollupJobAction.java index d6c00e3e89682..e434da37b7585 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/rest/RestPutRollupJobAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/rest/RestPutRollupJobAction.java @@ -29,7 +29,10 @@ public List routes() { @Override protected RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { final String id = request.param("id"); - final PutRollupJobAction.Request putRollupJobRequest = PutRollupJobAction.Request.fromXContent(request.contentParser(), id); + final PutRollupJobAction.Request putRollupJobRequest; + try (var parser = request.contentParser()) { + putRollupJobRequest = PutRollupJobAction.Request.fromXContent(parser, id); + } return channel -> client.execute(PutRollupJobAction.INSTANCE, putRollupJobRequest, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/rest/RestRollupSearchAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/rest/RestRollupSearchAction.java index 693daeaee030a..266f515d1dbb6 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/rest/RestRollupSearchAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/rest/RestRollupSearchAction.java @@ -10,7 +10,7 @@ import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.xpack.core.rollup.action.RollupSearchAction; @@ -48,7 +48,11 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient ) ); RestSearchAction.validateSearchRequest(restRequest, searchRequest); - return channel -> client.execute(RollupSearchAction.INSTANCE, searchRequest, new RestChunkedToXContentListener<>(channel)); + return channel -> client.execute( + RollupSearchAction.INSTANCE, + searchRequest, + new RestRefCountedChunkedToXContentListener<>(channel) + ); } @Override diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupResponseTranslationTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupResponseTranslationTests.java index 44f5f51668ea3..7e814230a2223 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupResponseTranslationTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupResponseTranslationTests.java @@ -72,7 +72,6 @@ import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator.PipelineTree; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValueType; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.InternalAggregationTestCase; import org.elasticsearch.xpack.core.rollup.RollupField; @@ -516,15 +515,13 @@ public void testMismatch() throws IOException { // TODO SearchResponse.Clusters is not public, using null for now. Should fix upstream. MultiSearchResponse.Item unrolledItem = new MultiSearchResponse.Item( new SearchResponse( - new InternalSearchResponse( - null, - InternalAggregations.from(Collections.singletonList(responses.get(0))), - null, - null, - false, - false, - 1 - ), + null, + InternalAggregations.from(Collections.singletonList(responses.get(0))), + null, + false, + false, + null, + 1, null, 1, 1, @@ -537,15 +534,13 @@ public void testMismatch() throws IOException { ); MultiSearchResponse.Item rolledItem = new MultiSearchResponse.Item( new SearchResponse( - new InternalSearchResponse( - null, - InternalAggregations.from(Collections.singletonList(responses.get(1))), - null, - null, - false, - false, - 1 - ), + null, + InternalAggregations.from(Collections.singletonList(responses.get(1))), + null, + false, + false, + null, + 1, null, 1, 1, diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java index 16034354d0ff2..6d7b1d943f10a 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java @@ -27,7 +27,6 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchResponseSections; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.Rounding; import org.elasticsearch.common.time.DateFormatter; @@ -866,16 +865,22 @@ protected void doNextSearch(long waitTimeInNanos, ActionListener } catch (IOException e) { listener.onFailure(e); } - SearchResponseSections sections = new SearchResponseSections( + SearchResponse response = new SearchResponse( null, new Aggregations(Collections.singletonList(result)), null, false, null, null, - 1 + 1, + null, + 1, + 1, + 0, + 0, + ShardSearchFailure.EMPTY_ARRAY, + null ); - SearchResponse response = new SearchResponse(sections, null, 1, 1, 0, 0, ShardSearchFailure.EMPTY_ARRAY, null); listener.onResponse(response); } diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java index 6fb40541330b2..f858544e4dd2b 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerStateTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchResponseSections; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; @@ -106,16 +105,22 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return null; } })); - final SearchResponseSections sections = new SearchResponseSections( + final SearchResponse response = new SearchResponse( new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), 0), aggs, null, false, null, null, - 1 + 1, + null, + 1, + 1, + 0, + 0, + new ShardSearchFailure[0], + null ); - final SearchResponse response = new SearchResponse(sections, null, 1, 1, 0, 0, new ShardSearchFailure[0], null); nextPhase.onResponse(response); } @@ -222,8 +227,7 @@ protected void doNextSearch(long waitTimeInNanos, ActionListener } try { - SearchResponse response = searchFunction.apply(buildSearchRequest()); - nextPhase.onResponse(response); + ActionListener.respondAndRelease(nextPhase, searchFunction.apply(buildSearchRequest())); } catch (Exception e) { nextPhase.onFailure(e); } @@ -473,17 +477,25 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return null; } })); - final SearchResponseSections sections = new SearchResponseSections( - new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), 0), - aggs, - null, - false, - null, - null, - 1 + ActionListener.respondAndRelease( + nextPhase, + new SearchResponse( + new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), 0), + aggs, + null, + false, + null, + null, + 1, + null, + 1, + 1, + 0, + 0, + ShardSearchFailure.EMPTY_ARRAY, + null + ) ); - final SearchResponse response = new SearchResponse(sections, null, 1, 1, 0, 0, ShardSearchFailure.EMPTY_ARRAY, null); - nextPhase.onResponse(response); } @Override @@ -684,16 +696,22 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return null; } })); - final SearchResponseSections sections = new SearchResponseSections( + return new SearchResponse( new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), 0), aggs, null, false, null, null, - 1 + 1, + null, + 1, + 1, + 0, + 0, + ShardSearchFailure.EMPTY_ARRAY, + null ); - return new SearchResponse(sections, null, 1, 1, 0, 0, ShardSearchFailure.EMPTY_ARRAY, null); }; Function bulkFunction = bulkRequest -> new BulkResponse(new BulkItemResponse[0], 100); @@ -808,16 +826,22 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return null; } })); - final SearchResponseSections sections = new SearchResponseSections( + return new SearchResponse( new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), 0), aggs, null, false, null, null, - 1 + 1, + null, + 1, + 1, + 0, + 0, + ShardSearchFailure.EMPTY_ARRAY, + null ); - return new SearchResponse(sections, null, 1, 1, 0, 0, ShardSearchFailure.EMPTY_ARRAY, null); }; Function bulkFunction = bulkRequest -> new BulkResponse(new BulkItemResponse[0], 100); @@ -981,16 +1005,22 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return null; } })); - final SearchResponseSections sections = new SearchResponseSections( + return new SearchResponse( new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), 0), aggs, null, false, null, null, - 1 + 1, + null, + 1, + 1, + 0, + 0, + ShardSearchFailure.EMPTY_ARRAY, + null ); - return new SearchResponse(sections, null, 1, 1, 0, 0, ShardSearchFailure.EMPTY_ARRAY, null); }; Function bulkFunction = bulkRequest -> { diff --git a/x-pack/plugin/searchable-snapshots/qa/url/build.gradle b/x-pack/plugin/searchable-snapshots/qa/url/build.gradle index 12fc0873958e1..850fe85ece3cd 100644 --- a/x-pack/plugin/searchable-snapshots/qa/url/build.gradle +++ b/x-pack/plugin/searchable-snapshots/qa/url/build.gradle @@ -1,12 +1,11 @@ import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE -apply plugin: 'elasticsearch.legacy-java-rest-test' +apply plugin: 'elasticsearch.internal-java-rest-test' apply plugin: 'elasticsearch.rest-resources' -final Project fixture = project(':test:fixtures:nginx-fixture') - dependencies { javaRestTestImplementation(testArtifact(project(xpackModule('searchable-snapshots')))) + javaRestTestImplementation project(':test:fixtures:url-fixture') } restResources { @@ -15,34 +14,6 @@ restResources { } } -apply plugin: 'elasticsearch.test.fixtures' -testFixtures.useFixture(fixture.path, 'nginx-fixture') - -def fixtureAddress = { fixtureName -> - int ephemeralPort = fixture.postProcessFixture.ext."test.fixtures.${fixtureName}.tcp.80" - assert ephemeralPort > 0 - 'http://127.0.0.1:' + ephemeralPort -} - -File repositoryDir = fixture.fsRepositoryDir as File - tasks.named("javaRestTest").configure { - dependsOn fixture.getTasks().named("postProcessFixture") - - nonInputProperties.systemProperty 'test.url.fs.repo.dir', repositoryDir.absolutePath - nonInputProperties.systemProperty 'test.url.http', "${-> fixtureAddress('nginx-fixture')}" -} - -testClusters.matching { it.name == "javaRestTest" }.configureEach { - testDistribution = 'DEFAULT' - setting 'path.repo', repositoryDir.absolutePath, IGNORE_VALUE - setting 'repositories.url.allowed_urls', { "${-> fixtureAddress('nginx-fixture')}" }, IGNORE_VALUE - - setting 'xpack.license.self_generated.type', 'trial' - - setting 'xpack.searchable.snapshot.shared_cache.size', '16MB' - setting 'xpack.searchable.snapshot.shared_cache.region_size', '256KB' - setting 'xpack.searchable_snapshots.cache_fetch_async_thread_pool.keep_alive', '0ms' - - setting 'xpack.security.enabled', 'false' + usesDefaultDistribution() } diff --git a/x-pack/plugin/searchable-snapshots/qa/url/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/URLSearchableSnapshotsIT.java b/x-pack/plugin/searchable-snapshots/qa/url/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/URLSearchableSnapshotsIT.java index b37b71cf95a31..b59dcb3a9d210 100644 --- a/x-pack/plugin/searchable-snapshots/qa/url/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/URLSearchableSnapshotsIT.java +++ b/x-pack/plugin/searchable-snapshots/qa/url/src/javaRestTest/java/org/elasticsearch/xpack/searchablesnapshots/URLSearchableSnapshotsIT.java @@ -7,14 +7,37 @@ package org.elasticsearch.xpack.searchablesnapshots; +import fixture.url.URLFixture; + import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.fs.FsRepository; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.junit.ClassRule; +import org.junit.rules.RuleChain; +import org.junit.rules.TestRule; import static org.hamcrest.Matchers.blankOrNullString; import static org.hamcrest.Matchers.not; public class URLSearchableSnapshotsIT extends AbstractSearchableSnapshotsRestTestCase { + public static URLFixture urlFixture = new URLFixture(); + + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .distribution(DistributionType.DEFAULT) + .setting("xpack.license.self_generated.type", "trial") + .setting("repositories.url.allowed_urls", () -> urlFixture.getAddress()) + .setting("path.repo", () -> urlFixture.getRepositoryDir()) + .setting("xpack.searchable.snapshot.shared_cache.size", "16MB") + .setting("xpack.searchable.snapshot.shared_cache.region_size", "256KB") + .setting("xpack.searchable_snapshots.cache_fetch_async_thread_pool.keep_alive", "0ms") + .setting("xpack.security.enabled", "false") + .build(); + + @ClassRule + public static TestRule ruleChain = RuleChain.outerRule(urlFixture).around(cluster); + @Override protected String writeRepositoryType() { return FsRepository.TYPE; @@ -22,7 +45,7 @@ protected String writeRepositoryType() { @Override protected Settings writeRepositorySettings() { - final String repoDirectory = System.getProperty("test.url.fs.repo.dir"); + final String repoDirectory = urlFixture.getRepositoryDir(); assertThat(repoDirectory, not(blankOrNullString())); return Settings.builder().put("location", repoDirectory).build(); @@ -40,9 +63,14 @@ protected String readRepositoryType() { @Override protected Settings readRepositorySettings() { - final String url = System.getProperty("test.url.http"); + final String url = urlFixture.getAddress(); assertThat(url, not(blankOrNullString())); return Settings.builder().put("url", url).build(); } + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } } diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/FrozenSearchableSnapshotsIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/FrozenSearchableSnapshotsIntegTests.java index 5ef524f8211c1..18b4e6ed7cb31 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/FrozenSearchableSnapshotsIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/FrozenSearchableSnapshotsIntegTests.java @@ -208,12 +208,12 @@ public void testCreateAndRestorePartialSearchableSnapshot() throws Exception { for (ShardStats shardStats : indicesStatsResponse.getShards()) { StoreStats store = shardStats.getStats().getStore(); - assertThat(shardStats.getShardRouting().toString(), store.getReservedSize().getBytes(), equalTo(0L)); - assertThat(shardStats.getShardRouting().toString(), store.getSize().getBytes(), equalTo(0L)); + assertThat(shardStats.getShardRouting().toString(), store.reservedSizeInBytes(), equalTo(0L)); + assertThat(shardStats.getShardRouting().toString(), store.sizeInBytes(), equalTo(0L)); } if (indicesStatsResponse.getShards().length > 0) { - assertThat(indicesStatsResponse.getTotal().getStore().getReservedSize().getBytes(), equalTo(0L)); - assertThat(indicesStatsResponse.getTotal().getStore().getSize().getBytes(), equalTo(0L)); + assertThat(indicesStatsResponse.getTotal().getStore().reservedSizeInBytes(), equalTo(0L)); + assertThat(indicesStatsResponse.getTotal().getStore().sizeInBytes(), equalTo(0L)); } } }, "test-stats-watcher"); @@ -251,8 +251,8 @@ public void testCreateAndRestorePartialSearchableSnapshot() throws Exception { StoreStats store = shardStats.getStats().getStore(); final ShardRouting shardRouting = shardStats.getShardRouting(); - assertThat(shardRouting.toString(), store.getReservedSize().getBytes(), equalTo(0L)); - assertThat(shardRouting.toString(), store.getSize().getBytes(), equalTo(0L)); + assertThat(shardRouting.toString(), store.reservedSizeInBytes(), equalTo(0L)); + assertThat(shardRouting.toString(), store.sizeInBytes(), equalTo(0L)); // the original shard size from the snapshot final long originalSize = snapshotShards.get(shardRouting.getId()).getStats().getTotalSize(); @@ -273,11 +273,11 @@ public void testCreateAndRestorePartialSearchableSnapshot() throws Exception { final ByteBuffersDirectory inMemoryDir = (ByteBuffersDirectory) unwrappedDir; assertThat(inMemoryDir.listAll(), arrayWithSize(1)); - assertThat(shardRouting.toString(), store.getTotalDataSetSize().getBytes(), equalTo(originalSize)); + assertThat(shardRouting.toString(), store.totalDataSetSizeInBytes(), equalTo(originalSize)); } final StoreStats store = indicesStatsResponse.getTotal().getStore(); - assertThat(store.getTotalDataSetSize().getBytes(), equalTo(totalExpectedSize)); + assertThat(store.totalDataSetSizeInBytes(), equalTo(totalExpectedSize)); statsWatcherRunning.set(false); statsWatcher.join(); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/RetrySearchIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/RetrySearchIntegTests.java index c80cf3c3d62e3..e3b631ba69c8a 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/RetrySearchIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/RetrySearchIntegTests.java @@ -143,23 +143,18 @@ public void testRetryPointInTime() throws Exception { ).keepAlive(TimeValue.timeValueMinutes(2)); final String pitId = client().execute(TransportOpenPointInTimeAction.TYPE, openRequest).actionGet().getPointInTimeId(); try { - assertNoFailuresAndResponse( - prepareSearch().setIndices(indexName).setPreference(null).setPointInTime(new PointInTimeBuilder(pitId)), - resp -> { - assertThat(resp.pointInTimeId(), equalTo(pitId)); - assertHitCount(resp, docCount); - } - ); + assertNoFailuresAndResponse(prepareSearch().setPointInTime(new PointInTimeBuilder(pitId)), resp -> { + assertThat(resp.pointInTimeId(), equalTo(pitId)); + assertHitCount(resp, docCount); + }); final Set allocatedNodes = internalCluster().nodesInclude(indexName); for (String allocatedNode : allocatedNodes) { internalCluster().restartNode(allocatedNode); } ensureGreen(indexName); assertNoFailuresAndResponse( - prepareSearch().setIndices(indexName) - .setQuery(new RangeQueryBuilder("created_date").gte("2011-01-01").lte("2011-12-12")) + prepareSearch().setQuery(new RangeQueryBuilder("created_date").gte("2011-01-01").lte("2011-12-12")) .setSearchType(SearchType.QUERY_THEN_FETCH) - .setPreference(null) .setPreFilterShardSize(between(1, 10)) .setAllowPartialSearchResults(true) .setPointInTime(new PointInTimeBuilder(pitId)), diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java index 876ff9ebdb86f..38222f64b282b 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java @@ -498,7 +498,7 @@ private Map getMaxShardSizeByNodeInBytes(String indexName) { IndexStats indexStats = indicesStats.getIndex(indexName); Map maxShardSizeByNode = new HashMap<>(); for (ShardStats shard : indexStats.getShards()) { - long sizeInBytes = shard.getStats().getStore().getSizeInBytes(); + long sizeInBytes = shard.getStats().getStore().sizeInBytes(); if (sizeInBytes > 0) { maxShardSizeByNode.compute( shard.getShardRouting().currentNodeId(), diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/AbstractRemoteClusterSecurityDlsAndFlsRestIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/AbstractRemoteClusterSecurityDlsAndFlsRestIT.java index 9f93392ad13d7..6ffa09dc1f265 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/AbstractRemoteClusterSecurityDlsAndFlsRestIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/AbstractRemoteClusterSecurityDlsAndFlsRestIT.java @@ -14,6 +14,7 @@ import org.elasticsearch.core.Strings; import org.elasticsearch.core.Tuple; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.test.rest.ObjectPath; import java.io.IOException; @@ -172,12 +173,17 @@ protected void assertSearchResponseContainsExpectedIndicesAndFields( ) { try { assertOK(searchResponse); - final var searchResult = Arrays.stream(SearchResponse.fromXContent(responseAsParser(searchResponse)).getHits().getHits()) - .collect(Collectors.toMap(SearchHit::getIndex, SearchHit::getSourceAsMap)); + var response = SearchResponseUtils.responseAsSearchResponse(searchResponse); + try { + final var searchResult = Arrays.stream(response.getHits().getHits()) + .collect(Collectors.toMap(SearchHit::getIndex, SearchHit::getSourceAsMap)); - assertThat(searchResult.keySet(), containsInAnyOrder(expectedRemoteIndices)); - for (String remoteIndex : expectedRemoteIndices) { - assertThat(searchResult.get(remoteIndex).keySet(), containsInAnyOrder(expectedFields)); + assertThat(searchResult.keySet(), containsInAnyOrder(expectedRemoteIndices)); + for (String remoteIndex : expectedRemoteIndices) { + assertThat(searchResult.get(remoteIndex).keySet(), containsInAnyOrder(expectedFields)); + } + } finally { + response.decRef(); } } catch (IOException e) { throw new UncheckedIOException(e); @@ -195,13 +201,18 @@ protected void assertSearchResponseContainsExpectedIndicesAndFields( ) { try { assertOK(searchResponse); - final var searchResult = Arrays.stream(SearchResponse.fromXContent(responseAsParser(searchResponse)).getHits().getHits()) - .collect(Collectors.toMap(SearchHit::getIndex, SearchHit::getSourceAsMap)); + var response = SearchResponseUtils.responseAsSearchResponse(searchResponse); + try { + final var searchResult = Arrays.stream(response.getHits().getHits()) + .collect(Collectors.toMap(SearchHit::getIndex, SearchHit::getSourceAsMap)); - assertThat(searchResult.keySet(), equalTo(expectedRemoteIndicesAndFields.keySet())); - for (String remoteIndex : expectedRemoteIndicesAndFields.keySet()) { - Set expectedFields = expectedRemoteIndicesAndFields.get(remoteIndex); - assertThat(searchResult.get(remoteIndex).keySet(), equalTo(expectedFields)); + assertThat(searchResult.keySet(), equalTo(expectedRemoteIndicesAndFields.keySet())); + for (String remoteIndex : expectedRemoteIndicesAndFields.keySet()) { + Set expectedFields = expectedRemoteIndicesAndFields.get(remoteIndex); + assertThat(searchResult.get(remoteIndex).keySet(), equalTo(expectedFields)); + } + } finally { + response.decRef(); } } catch (IOException e) { throw new UncheckedIOException(e); @@ -211,7 +222,7 @@ protected void assertSearchResponseContainsExpectedIndicesAndFields( protected void assertSearchResponseContainsEmptyResult(Response response) { try { assertOK(response); - SearchResponse searchResponse = SearchResponse.fromXContent(responseAsParser(response)); + SearchResponse searchResponse = SearchResponseUtils.responseAsSearchResponse(response); try { assertThat(searchResponse.getHits().getTotalHits().value, equalTo(0L)); } finally { diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/AbstractRemoteClusterSecurityWithMultipleRemotesRestIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/AbstractRemoteClusterSecurityWithMultipleRemotesRestIT.java index 536176ed4c833..aa65edae88506 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/AbstractRemoteClusterSecurityWithMultipleRemotesRestIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/AbstractRemoteClusterSecurityWithMultipleRemotesRestIT.java @@ -214,7 +214,10 @@ private static void searchAndExpect403(String searchPath) { static void searchAndAssertIndicesFound(String searchPath, String... expectedIndices) throws IOException { final Response response = performRequestWithRemoteSearchUser(new Request("GET", searchPath)); assertOK(response); - final SearchResponse searchResponse = SearchResponse.fromXContent(responseAsParser(response)); + final SearchResponse searchResponse; + try (var parser = responseAsParser(response)) { + searchResponse = SearchResponse.fromXContent(parser); + } try { final List actualIndices = Arrays.stream(searchResponse.getHits().getHits()) .map(SearchHit::getIndex) diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityApiKeyRestIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityApiKeyRestIT.java index 4227354561178..d103e3c50ef7e 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityApiKeyRestIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityApiKeyRestIT.java @@ -183,7 +183,10 @@ public void testCrossClusterSearchWithApiKey() throws Exception { ); final Response response = performRequestWithApiKey(searchRequest, apiKeyEncoded); assertOK(response); - final SearchResponse searchResponse = SearchResponse.fromXContent(responseAsParser(response)); + final SearchResponse searchResponse; + try (var parser = responseAsParser(response)) { + searchResponse = SearchResponse.fromXContent(parser); + } try { final List actualIndices = Arrays.stream(searchResponse.getHits().getHits()) .map(SearchHit::getIndex) diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityBwcRestIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityBwcRestIT.java index 8c01398dd2969..5c4b61537e9a5 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityBwcRestIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityBwcRestIT.java @@ -189,7 +189,10 @@ public void testBwcWithLegacyCrossClusterSearch() throws Exception { ? performRequestWithRemoteAccessUser(searchRequest) : performRequestWithApiKey(searchRequest, apiKeyEncoded); assertOK(response); - final SearchResponse searchResponse = SearchResponse.fromXContent(responseAsParser(response)); + final SearchResponse searchResponse; + try (var parser = responseAsParser(response)) { + searchResponse = SearchResponse.fromXContent(parser); + } try { final List actualIndices = Arrays.stream(searchResponse.getHits().getHits()) .map(SearchHit::getIndex) diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityCcrIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityCcrIT.java index 03489f6365dd1..d4321f63017ad 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityCcrIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityCcrIT.java @@ -276,7 +276,10 @@ private void verifyReplicatedDocuments(long numberOfDocs, String... indices) thr throw new AssertionError(e); } assertOK(response); - final SearchResponse searchResponse = SearchResponse.fromXContent(responseAsParser(response)); + final SearchResponse searchResponse; + try (var parser = responseAsParser(response)) { + searchResponse = SearchResponse.fromXContent(parser); + } try { assertThat(searchResponse.getHits().getTotalHits().value, equalTo(numberOfDocs)); assertThat( diff --git a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java index 6e78eb2fb5b83..cab0c2bff28f0 100644 --- a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java +++ b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java @@ -255,6 +255,7 @@ public class Constants { "cluster:admin/xpack/security/profile/suggest", "cluster:admin/xpack/security/profile/set_enabled", "cluster:admin/xpack/security/realm/cache/clear", + "cluster:admin/xpack/security/remote_cluster_credentials/reload", "cluster:admin/xpack/security/role/delete", "cluster:admin/xpack/security/role/get", "cluster:admin/xpack/security/role/put", @@ -516,6 +517,7 @@ public class Constants { "indices:data/read/eql", "indices:data/read/eql/async/get", "indices:data/read/esql", + "indices:data/read/esql/async/get", "indices:data/read/explain", "indices:data/read/field_caps", "indices:data/read/get", diff --git a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/crossclusteraccess/CrossClusterAccessHeadersForCcsRestIT.java b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/crossclusteraccess/CrossClusterAccessHeadersForCcsRestIT.java index 97b52a699749e..51358d82bb238 100644 --- a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/crossclusteraccess/CrossClusterAccessHeadersForCcsRestIT.java +++ b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/crossclusteraccess/CrossClusterAccessHeadersForCcsRestIT.java @@ -38,7 +38,6 @@ import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.ObjectPath; import org.elasticsearch.test.transport.MockTransportService; @@ -1152,15 +1151,13 @@ private static MockTransportService startTransport( ); channel.sendResponse( new SearchResponse( - new InternalSearchResponse( - new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), Float.NaN), - InternalAggregations.EMPTY, - null, - null, - false, - null, - 1 - ), + new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), Float.NaN), + InternalAggregations.EMPTY, + null, + false, + null, + null, + 1, null, 1, 1, diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/MultipleIndicesPermissionsTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/MultipleIndicesPermissionsTests.java index 08fb0c79a076c..af54f71779f08 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/MultipleIndicesPermissionsTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/MultipleIndicesPermissionsTests.java @@ -12,7 +12,9 @@ import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; +import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.client.Request; @@ -33,6 +35,7 @@ import org.junit.Before; import java.util.Collections; +import java.util.concurrent.TimeUnit; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; @@ -204,11 +207,10 @@ public void testMonitorRestrictedWildcards() throws Exception { assertThat(getSettingsResponse.getIndexToSettings().containsKey("foobar"), is(true)); assertThat(getSettingsResponse.getIndexToSettings().containsKey("foobarfoo"), is(true)); - final IndicesShardStoresResponse indicesShardsStoresResponse = client.admin() - .indices() - .prepareShardStores(randomFrom("*", "_all", "foo*")) - .setShardStatuses("all") - .get(); + final IndicesShardStoresResponse indicesShardsStoresResponse = client.execute( + TransportIndicesShardStoresAction.TYPE, + new IndicesShardStoresRequest(randomFrom("*", "_all", "foo*")).shardStatuses("all") + ).actionGet(10, TimeUnit.SECONDS); assertThat(indicesShardsStoresResponse.getStoreStatuses().size(), is(3)); assertThat(indicesShardsStoresResponse.getStoreStatuses().containsKey("foo"), is(true)); assertThat(indicesShardsStoresResponse.getStoreStatuses().containsKey("foobar"), is(true)); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/ReloadRemoteClusterCredentialsIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/ReloadRemoteClusterCredentialsIT.java new file mode 100644 index 0000000000000..336257ff76069 --- /dev/null +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/ReloadRemoteClusterCredentialsIT.java @@ -0,0 +1,313 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security; + +import org.apache.lucene.search.TotalHits; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsRequest; +import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsResponse; +import org.elasticsearch.action.admin.cluster.node.reload.TransportNodesReloadSecureSettingsAction; +import org.elasticsearch.action.admin.cluster.remote.RemoteClusterNodesAction; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchShardsRequest; +import org.elasticsearch.action.search.SearchShardsResponse; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.action.search.TransportSearchAction; +import org.elasticsearch.action.search.TransportSearchShardsAction; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.VersionInformation; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.KeyStoreWrapper; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.env.Environment; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.test.SecuritySingleNodeTestCase; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.RemoteClusterCredentialsManager; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.security.authc.ApiKeyService; +import org.elasticsearch.xpack.security.authc.CrossClusterAccessHeaders; +import org.junit.BeforeClass; + +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + +public class ReloadRemoteClusterCredentialsIT extends SecuritySingleNodeTestCase { + private static final String CLUSTER_ALIAS = "my_remote_cluster"; + + @BeforeClass + public static void disableInFips() { + assumeFalse( + "Cannot run in FIPS mode since the keystore will be password protected and sending a password in the reload" + + "settings api call, require TLS to be configured for the transport layer", + inFipsJvm() + ); + } + + @Override + public String configRoles() { + return org.elasticsearch.core.Strings.format(""" + user: + cluster: [ "ALL" ] + indices: + - names: '*' + privileges: [ "ALL" ] + remote_indices: + - names: '*' + privileges: [ "ALL" ] + clusters: ["*"] + """); + } + + @Override + public void tearDown() throws Exception { + try { + clearRemoteCluster(); + super.tearDown(); + } finally { + ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); + } + } + + private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); + + public void testReloadRemoteClusterCredentials() throws Exception { + final String credentials = randomAlphaOfLength(42); + writeCredentialsToKeyStore(credentials); + final RemoteClusterCredentialsManager clusterCredentialsManager = getInstanceFromNode(TransportService.class) + .getRemoteClusterService() + .getRemoteClusterCredentialsManager(); + // Until we reload, credentials written to keystore are not loaded into the credentials manager + assertThat(clusterCredentialsManager.hasCredentials(CLUSTER_ALIAS), is(false)); + reloadSecureSettings(); + assertThat(clusterCredentialsManager.resolveCredentials(CLUSTER_ALIAS), equalTo(credentials)); + + // Check that credentials get used for a remote connection, once we configure it + final BlockingQueue> capturedHeaders = ConcurrentCollections.newBlockingQueue(); + try (MockTransportService remoteTransport = startTransport("remoteNodeA", threadPool, capturedHeaders)) { + final TransportAddress remoteAddress = remoteTransport.getOriginalTransport() + .profileBoundAddresses() + .get("_remote_cluster") + .publishAddress(); + + configureRemoteCluster(remoteAddress); + + // Run search to trigger header capturing on the receiving side + client().search(new SearchRequest(CLUSTER_ALIAS + ":index-a")).get().decRef(); + + assertHeadersContainCredentialsThenClear(credentials, capturedHeaders); + + // Update credentials and ensure they are used + final String updatedCredentials = randomAlphaOfLength(41); + writeCredentialsToKeyStore(updatedCredentials); + reloadSecureSettings(); + + client().search(new SearchRequest(CLUSTER_ALIAS + ":index-a")).get().decRef(); + + assertHeadersContainCredentialsThenClear(updatedCredentials, capturedHeaders); + } + } + + private void assertHeadersContainCredentialsThenClear(String credentials, BlockingQueue> capturedHeaders) { + assertThat(capturedHeaders, is(not(empty()))); + for (Map actualHeaders : capturedHeaders) { + assertThat(actualHeaders, hasKey(CrossClusterAccessHeaders.CROSS_CLUSTER_ACCESS_CREDENTIALS_HEADER_KEY)); + assertThat( + actualHeaders.get(CrossClusterAccessHeaders.CROSS_CLUSTER_ACCESS_CREDENTIALS_HEADER_KEY), + equalTo(ApiKeyService.withApiKeyPrefix(credentials)) + ); + } + capturedHeaders.clear(); + assertThat(capturedHeaders, is(empty())); + } + + private void clearRemoteCluster() throws InterruptedException, ExecutionException { + final var builder = Settings.builder() + .putNull("cluster.remote." + CLUSTER_ALIAS + ".mode") + .putNull("cluster.remote." + CLUSTER_ALIAS + ".seeds") + .putNull("cluster.remote." + CLUSTER_ALIAS + ".proxy_address"); + clusterAdmin().updateSettings(new ClusterUpdateSettingsRequest().persistentSettings(builder)).get(); + } + + @Override + protected Settings nodeSettings() { + return Settings.builder().put(super.nodeSettings()).put("xpack.security.remote_cluster_client.ssl.enabled", false).build(); + } + + private void configureRemoteCluster(TransportAddress remoteAddress) throws InterruptedException, ExecutionException { + final Settings.Builder builder = Settings.builder(); + if (randomBoolean()) { + builder.put("cluster.remote." + CLUSTER_ALIAS + ".mode", "sniff") + .put("cluster.remote." + CLUSTER_ALIAS + ".seeds", remoteAddress.toString()) + .putNull("cluster.remote." + CLUSTER_ALIAS + ".proxy_address"); + } else { + builder.put("cluster.remote." + CLUSTER_ALIAS + ".mode", "proxy") + .put("cluster.remote." + CLUSTER_ALIAS + ".proxy_address", remoteAddress.toString()) + .putNull("cluster.remote." + CLUSTER_ALIAS + ".seeds"); + } + clusterAdmin().updateSettings(new ClusterUpdateSettingsRequest().persistentSettings(builder)).get(); + } + + private void writeCredentialsToKeyStore(String credentials) throws Exception { + final Environment environment = getInstanceFromNode(Environment.class); + final KeyStoreWrapper keyStoreWrapper = KeyStoreWrapper.create(); + keyStoreWrapper.setString("cluster.remote." + CLUSTER_ALIAS + ".credentials", credentials.toCharArray()); + keyStoreWrapper.save(environment.configFile(), new char[0], false); + } + + public static MockTransportService startTransport( + final String nodeName, + final ThreadPool threadPool, + final BlockingQueue> capturedHeaders + ) { + boolean success = false; + final Settings settings = Settings.builder() + .put("node.name", nodeName) + .put("remote_cluster_server.enabled", "true") + .put("remote_cluster.port", "0") + .put("xpack.security.remote_cluster_server.ssl.enabled", "false") + .build(); + final MockTransportService service = MockTransportService.createNewService( + settings, + VersionInformation.CURRENT, + TransportVersion.current(), + threadPool, + null + ); + try { + service.registerRequestHandler( + ClusterStateAction.NAME, + EsExecutors.DIRECT_EXECUTOR_SERVICE, + ClusterStateRequest::new, + (request, channel, task) -> { + capturedHeaders.add(Map.copyOf(threadPool.getThreadContext().getHeaders())); + channel.sendResponse( + new ClusterStateResponse(ClusterName.DEFAULT, ClusterState.builder(ClusterName.DEFAULT).build(), false) + ); + } + ); + service.registerRequestHandler( + RemoteClusterNodesAction.TYPE.name(), + EsExecutors.DIRECT_EXECUTOR_SERVICE, + RemoteClusterNodesAction.Request::new, + (request, channel, task) -> { + capturedHeaders.add(Map.copyOf(threadPool.getThreadContext().getHeaders())); + channel.sendResponse(new RemoteClusterNodesAction.Response(List.of())); + } + ); + service.registerRequestHandler( + TransportSearchShardsAction.TYPE.name(), + EsExecutors.DIRECT_EXECUTOR_SERVICE, + SearchShardsRequest::new, + (request, channel, task) -> { + capturedHeaders.add(Map.copyOf(threadPool.getThreadContext().getHeaders())); + channel.sendResponse(new SearchShardsResponse(List.of(), List.of(), Collections.emptyMap())); + } + ); + service.registerRequestHandler( + TransportSearchAction.TYPE.name(), + EsExecutors.DIRECT_EXECUTOR_SERVICE, + SearchRequest::new, + (request, channel, task) -> { + capturedHeaders.add(Map.copyOf(threadPool.getThreadContext().getHeaders())); + channel.sendResponse( + new SearchResponse( + new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), Float.NaN), + InternalAggregations.EMPTY, + null, + false, + null, + null, + 1, + null, + 1, + 1, + 0, + 100, + ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY + ) + ); + } + ); + service.start(); + service.acceptIncomingRequests(); + success = true; + return service; + } finally { + if (success == false) { + service.close(); + } + } + } + + private void reloadSecureSettings() { + final AtomicReference reloadSettingsError = new AtomicReference<>(); + final CountDownLatch latch = new CountDownLatch(1); + final SecureString emptyPassword = randomBoolean() ? new SecureString(new char[0]) : null; + + final var request = new NodesReloadSecureSettingsRequest(Strings.EMPTY_ARRAY); + request.setSecureStorePassword(emptyPassword); + client().execute(TransportNodesReloadSecureSettingsAction.TYPE, request, new ActionListener<>() { + @Override + public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { + try { + assertThat(nodesReloadResponse, notNullValue()); + final Map nodesMap = nodesReloadResponse.getNodesMap(); + assertThat(nodesMap.size(), equalTo(1)); + for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) { + assertThat(nodeResponse.reloadException(), nullValue()); + } + } catch (final AssertionError e) { + reloadSettingsError.set(e); + } finally { + latch.countDown(); + } + } + + @Override + public void onFailure(Exception e) { + reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + latch.countDown(); + } + }); + safeAwait(latch); + if (reloadSettingsError.get() != null) { + throw reloadSettingsError.get(); + } + } +} diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/ScrollHelperIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/ScrollHelperIntegTests.java index 7fc4c1520f9c6..e481cf70b9afe 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/ScrollHelperIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/ScrollHelperIntegTests.java @@ -20,7 +20,6 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.security.ScrollHelper; @@ -83,22 +82,28 @@ public void testFetchAllByEntityWithBrokenScroll() { String scrollId = randomAlphaOfLength(5); SearchHit[] hits = new SearchHit[] { new SearchHit(1), new SearchHit(2) }; - InternalSearchResponse internalResponse = new InternalSearchResponse( - new SearchHits(hits, new TotalHits(3, TotalHits.Relation.EQUAL_TO), 1), - null, - null, - null, - false, - false, - 1 - ); Answer returnResponse = invocation -> { @SuppressWarnings("unchecked") ActionListener listener = (ActionListener) invocation.getArguments()[1]; ActionListener.respondAndRelease( listener, - new SearchResponse(internalResponse, scrollId, 1, 1, 0, 0, ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY) + new SearchResponse( + new SearchHits(hits, new TotalHits(3, TotalHits.Relation.EQUAL_TO), 1), + null, + null, + false, + false, + null, + 1, + scrollId, + 1, + 1, + 0, + 0, + ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY + ) ); return null; }; diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java index 19d29ef251dd1..faa85150dca31 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java @@ -44,8 +44,10 @@ import java.time.temporal.ChronoUnit; import java.util.ArrayList; import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -635,11 +637,11 @@ public void testRefreshingMultipleTimesWithinWindowSucceeds() throws Exception { } } logger.info("received access token [{}] and refresh token [{}]", result.accessToken(), result.getRefreshToken()); - completedLatch.countDown(); } catch (IOException e) { failed.set(true); - completedLatch.countDown(); logger.error("caught exception", e); + } finally { + completedLatch.countDown(); } })); } @@ -655,7 +657,9 @@ public void testRefreshingMultipleTimesWithinWindowSucceeds() throws Exception { assertThat(failed.get(), equalTo(false)); // Assert that we only ever got one token/refresh_token pair synchronized (tokens) { - assertThat((int) tokens.stream().distinct().count(), equalTo(1)); + Set uniqueTokens = new HashSet<>(tokens); + logger.info("Unique tokens received from refreshToken call [{}]", uniqueTokens); + assertThat(uniqueTokens.size(), equalTo(1)); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 51a902d7e12c0..1d849055c70a5 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -13,6 +13,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.TransportVersion; @@ -21,6 +22,7 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.support.ActionFilter; import org.elasticsearch.action.support.DestructiveOperations; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; @@ -110,6 +112,7 @@ import org.elasticsearch.xpack.core.security.SecurityExtension; import org.elasticsearch.xpack.core.security.SecurityField; import org.elasticsearch.xpack.core.security.SecuritySettings; +import org.elasticsearch.xpack.core.security.action.ActionTypes; import org.elasticsearch.xpack.core.security.action.ClearSecurityCacheAction; import org.elasticsearch.xpack.core.security.action.DelegatePkiAuthenticationAction; import org.elasticsearch.xpack.core.security.action.apikey.BulkUpdateApiKeyAction; @@ -244,6 +247,7 @@ import org.elasticsearch.xpack.security.action.service.TransportGetServiceAccountCredentialsAction; import org.elasticsearch.xpack.security.action.service.TransportGetServiceAccountNodesCredentialsAction; import org.elasticsearch.xpack.security.action.settings.TransportGetSecuritySettingsAction; +import org.elasticsearch.xpack.security.action.settings.TransportReloadRemoteClusterCredentialsAction; import org.elasticsearch.xpack.security.action.settings.TransportUpdateSecuritySettingsAction; import org.elasticsearch.xpack.security.action.token.TransportCreateTokenAction; import org.elasticsearch.xpack.security.action.token.TransportInvalidateTokenAction; @@ -364,7 +368,6 @@ import org.elasticsearch.xpack.security.support.CacheInvalidatorRegistry; import org.elasticsearch.xpack.security.support.ExtensionComponents; import org.elasticsearch.xpack.security.support.SecuritySystemIndices; -import org.elasticsearch.xpack.security.transport.RemoteClusterCredentialsResolver; import org.elasticsearch.xpack.security.transport.SecurityHttpSettings; import org.elasticsearch.xpack.security.transport.SecurityServerTransportInterceptor; import org.elasticsearch.xpack.security.transport.filter.IPFilter; @@ -372,6 +375,7 @@ import java.io.IOException; import java.net.InetSocketAddress; +import java.security.Provider; import java.time.Clock; import java.util.ArrayList; import java.util.Arrays; @@ -386,6 +390,7 @@ import java.util.Objects; import java.util.Set; import java.util.concurrent.Executor; +import java.util.concurrent.TimeUnit; import java.util.function.BiConsumer; import java.util.function.Function; import java.util.function.Predicate; @@ -554,6 +559,7 @@ public class Security extends Plugin private final SetOnce reservedRoleMappingAction = new SetOnce<>(); private final SetOnce workflowService = new SetOnce<>(); private final SetOnce realms = new SetOnce<>(); + private final SetOnce client = new SetOnce<>(); public Security(Settings settings) { this(settings, Collections.emptyList()); @@ -573,25 +579,30 @@ public Security(Settings settings) { runStartupChecks(settings); Automatons.updateConfiguration(settings); } else { - final List remoteClusterCredentialsSettingKeys = RemoteClusterService.REMOTE_CLUSTER_CREDENTIALS.getAllConcreteSettings( - settings - ).map(Setting::getKey).sorted().toList(); - if (false == remoteClusterCredentialsSettingKeys.isEmpty()) { - throw new IllegalArgumentException( - format( - "Found [%s] remote clusters with credentials [%s]. Security [%s] must be enabled to connect to them. " - + "Please either enable security or remove these settings from the keystore.", - remoteClusterCredentialsSettingKeys.size(), - Strings.collectionToCommaDelimitedString(remoteClusterCredentialsSettingKeys), - XPackSettings.SECURITY_ENABLED.getKey() - ) - ); - } + ensureNoRemoteClusterCredentialsOnDisabledSecurity(settings); this.bootstrapChecks.set(Collections.emptyList()); } this.securityExtensions.addAll(extensions); } + private void ensureNoRemoteClusterCredentialsOnDisabledSecurity(Settings settings) { + assert false == enabled; + final List remoteClusterCredentialsSettingKeys = RemoteClusterService.REMOTE_CLUSTER_CREDENTIALS.getAllConcreteSettings( + settings + ).map(Setting::getKey).sorted().toList(); + if (false == remoteClusterCredentialsSettingKeys.isEmpty()) { + throw new IllegalArgumentException( + format( + "Found [%s] remote clusters with credentials [%s]. Security [%s] must be enabled to connect to them. " + + "Please either enable security or remove these settings from the keystore.", + remoteClusterCredentialsSettingKeys.size(), + Strings.collectionToCommaDelimitedString(remoteClusterCredentialsSettingKeys), + XPackSettings.SECURITY_ENABLED.getKey() + ) + ); + } + } + private static void runStartupChecks(Settings settings) { validateRealmSettings(settings); if (XPackSettings.FIPS_MODE_ENABLED.get(settings)) { @@ -616,6 +627,14 @@ protected XPackLicenseState getLicenseState() { return XPackPlugin.getSharedLicenseState(); } + protected Client getClient() { + return client.get(); + } + + protected Realms getRealms() { + return realms.get(); + } + @Override public Collection createComponents(PluginServices services) { try { @@ -654,6 +673,8 @@ Collection createComponents( return Collections.singletonList(new SecurityUsageServices(null, null, null, null, null, null)); } + this.client.set(client); + // The settings in `environment` may have additional values over what was provided during construction // See Plugin#additionalSettings() this.settings = environment.settings(); @@ -980,8 +1001,6 @@ Collection createComponents( ipFilter.set(new IPFilter(settings, auditTrailService, clusterService.getClusterSettings(), getLicenseState())); components.add(ipFilter.get()); - final RemoteClusterCredentialsResolver remoteClusterCredentialsResolver = new RemoteClusterCredentialsResolver(settings); - DestructiveOperations destructiveOperations = new DestructiveOperations(settings, clusterService.getClusterSettings()); crossClusterAccessAuthcService.set(new CrossClusterAccessAuthenticationService(clusterService, apiKeyService, authcService.get())); components.add(crossClusterAccessAuthcService.get()); @@ -995,7 +1014,6 @@ Collection createComponents( securityContext.get(), destructiveOperations, crossClusterAccessAuthcService.get(), - remoteClusterCredentialsResolver, getLicenseState() ) ); @@ -1161,6 +1179,7 @@ public static List> getSettings(List securityExten // The following just apply in node mode settingsList.add(XPackSettings.FIPS_MODE_ENABLED); + settingsList.add(XPackSettings.FIPS_REQUIRED_PROVIDERS); SSLService.registerSettings(settingsList); // IP Filter settings @@ -1348,6 +1367,7 @@ public void onIndexModule(IndexModule module) { new ActionHandler<>(SetProfileEnabledAction.INSTANCE, TransportSetProfileEnabledAction.class), new ActionHandler<>(GetSecuritySettingsAction.INSTANCE, TransportGetSecuritySettingsAction.class), new ActionHandler<>(UpdateSecuritySettingsAction.INSTANCE, TransportUpdateSecuritySettingsAction.class), + new ActionHandler<>(ActionTypes.RELOAD_REMOTE_CLUSTER_CREDENTIALS_ACTION, TransportReloadRemoteClusterCredentialsAction.class), usageAction, infoAction ).filter(Objects::nonNull).toList(); @@ -1543,6 +1563,30 @@ static void validateForFips(Settings settings) { } }); + Set foundProviders = new HashSet<>(); + for (Provider provider : java.security.Security.getProviders()) { + foundProviders.add(provider.getName().toLowerCase(Locale.ROOT)); + if (logger.isTraceEnabled()) { + logger.trace("Security Provider: " + provider.getName() + ", Version: " + provider.getVersionStr()); + provider.entrySet().forEach(entry -> { logger.trace("\t" + entry.getKey()); }); + } + } + + final List requiredProviders = XPackSettings.FIPS_REQUIRED_PROVIDERS.get(settings); + logger.info("JVM Security Providers: " + foundProviders); + if (requiredProviders != null && requiredProviders.isEmpty() == false) { + List unsatisfiedProviders = requiredProviders.stream() + .map(s -> s.toLowerCase(Locale.ROOT)) + .filter(element -> foundProviders.contains(element) == false) + .toList(); + + if (unsatisfiedProviders.isEmpty() == false) { + String errorMessage = "Could not find required FIPS security provider: " + unsatisfiedProviders; + logger.error(errorMessage); + validationErrors.add(errorMessage); + } + } + if (validationErrors.isEmpty() == false) { final StringBuilder sb = new StringBuilder(); sb.append("Validation for FIPS 140 mode failed: \n"); @@ -1887,16 +1931,56 @@ public BiConsumer getJoinValidator() { @Override public void reload(Settings settings) throws Exception { if (enabled) { - realms.get().stream().filter(r -> JwtRealmSettings.TYPE.equals(r.realmRef().getType())).forEach(realm -> { - if (realm instanceof JwtRealm jwtRealm) { - jwtRealm.rotateClientSecret( - CLIENT_AUTHENTICATION_SHARED_SECRET.getConcreteSettingForNamespace(realm.realmRef().getName()).get(settings) - ); - } - }); + final List reloadExceptions = new ArrayList<>(); + try { + reloadRemoteClusterCredentials(settings); + } catch (Exception ex) { + reloadExceptions.add(ex); + } + + try { + reloadSharedSecretsForJwtRealms(settings); + } catch (Exception ex) { + reloadExceptions.add(ex); + } + + if (false == reloadExceptions.isEmpty()) { + final var combinedException = new ElasticsearchException( + "secure settings reload failed for one or more security components" + ); + reloadExceptions.forEach(combinedException::addSuppressed); + throw combinedException; + } + } else { + ensureNoRemoteClusterCredentialsOnDisabledSecurity(settings); } } + private void reloadSharedSecretsForJwtRealms(Settings settingsWithKeystore) { + getRealms().stream().filter(r -> JwtRealmSettings.TYPE.equals(r.realmRef().getType())).forEach(realm -> { + if (realm instanceof JwtRealm jwtRealm) { + jwtRealm.rotateClientSecret( + CLIENT_AUTHENTICATION_SHARED_SECRET.getConcreteSettingForNamespace(realm.realmRef().getName()).get(settingsWithKeystore) + ); + } + }); + } + + /** + * This method uses a transport action internally to access classes that are injectable but not part of the plugin contract. + * See {@link TransportReloadRemoteClusterCredentialsAction} for more context. + */ + private void reloadRemoteClusterCredentials(Settings settingsWithKeystore) { + final PlainActionFuture future = new PlainActionFuture<>(); + getClient().execute( + ActionTypes.RELOAD_REMOTE_CLUSTER_CREDENTIALS_ACTION, + new TransportReloadRemoteClusterCredentialsAction.Request(settingsWithKeystore), + future + ); + assert future.isDone() : "expecting local-only action call to return immediately on invocation"; + future.actionGet(0, TimeUnit.NANOSECONDS); + } + static final class ValidateLicenseForFIPS implements BiConsumer { private final boolean inFipsMode; private final LicenseService licenseService; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/settings/TransportReloadRemoteClusterCredentialsAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/settings/TransportReloadRemoteClusterCredentialsAction.java new file mode 100644 index 0000000000000..d6f54e9d3e9e1 --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/settings/TransportReloadRemoteClusterCredentialsAction.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.action.settings; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.transport.RemoteClusterService; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.ActionTypes; +import org.elasticsearch.xpack.security.Security; + +import java.io.IOException; + +/** + * This is a local-only action which updates remote cluster credentials for remote cluster connections, from keystore settings reloaded via + * a call to {@link org.elasticsearch.rest.action.admin.cluster.RestReloadSecureSettingsAction}. + * + * It's invoked as part of the {@link Security#reload(Settings)} call. + * + * This action is largely an implementation detail to work around the fact that Security is a plugin without direct access to many core + * classes, including the {@link RemoteClusterService} which is required for a credentials reload. A transport action gives us access to + * the {@link RemoteClusterService} which is injectable but not part of the plugin contract. + */ +public class TransportReloadRemoteClusterCredentialsAction extends TransportAction< + TransportReloadRemoteClusterCredentialsAction.Request, + ActionResponse.Empty> { + + private final RemoteClusterService remoteClusterService; + + @Inject + public TransportReloadRemoteClusterCredentialsAction(TransportService transportService, ActionFilters actionFilters) { + super(ActionTypes.RELOAD_REMOTE_CLUSTER_CREDENTIALS_ACTION.name(), actionFilters, transportService.getTaskManager()); + this.remoteClusterService = transportService.getRemoteClusterService(); + } + + @Override + protected void doExecute(Task task, Request request, ActionListener listener) { + // We avoid stashing and marking context as system to keep the action as minimal as possible (i.e., avoid copying context) + remoteClusterService.updateRemoteClusterCredentials(request.getSettings()); + listener.onResponse(ActionResponse.Empty.INSTANCE); + } + + public static class Request extends ActionRequest { + private final Settings settings; + + public Request(Settings settings) { + this.settings = settings; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + public Settings getSettings() { + return settings; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + localOnly(); + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java index df507bbda4592..98e17ad022483 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ApiKeyService.java @@ -811,7 +811,7 @@ private static boolean isNoop( final Authentication authentication, final BaseUpdateApiKeyRequest request, final Set userRoleDescriptors - ) { + ) throws IOException { if (apiKeyDoc.version != targetDocVersion.id) { return false; } @@ -832,12 +832,11 @@ private static boolean isNoop( return false; } @SuppressWarnings("unchecked") - final var currentRealmDomain = RealmDomain.fromXContent( - XContentHelper.mapToXContentParser( - XContentParserConfiguration.EMPTY, - (Map) currentCreator.get("realm_domain") - ) - ); + var m = (Map) currentCreator.get("realm_domain"); + final RealmDomain currentRealmDomain; + try (var parser = XContentHelper.mapToXContentParser(XContentParserConfiguration.EMPTY, m)) { + currentRealmDomain = RealmDomain.fromXContent(parser); + } if (sourceRealm.getDomain().equals(currentRealmDomain) == false) { return false; } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java index 18397bed2426d..d49b4d1c09c15 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java @@ -210,7 +210,7 @@ public final class TokenService { static final TransportVersion VERSION_ACCESS_TOKENS_AS_UUIDS = TransportVersions.V_7_2_0; static final TransportVersion VERSION_MULTIPLE_CONCURRENT_REFRESHES = TransportVersions.V_7_2_0; static final TransportVersion VERSION_CLIENT_AUTH_FOR_REFRESH = TransportVersions.V_8_2_0; - static final TransportVersion VERSION_GET_TOKEN_DOC_FOR_REFRESH = TransportVersions.V_8_500_040; + static final TransportVersion VERSION_GET_TOKEN_DOC_FOR_REFRESH = TransportVersions.V_8_500_061; private static final Logger logger = LogManager.getLogger(TokenService.class); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/service/ElasticServiceAccounts.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/service/ElasticServiceAccounts.java index 8942be0bee29c..777fe5f71b0a0 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/service/ElasticServiceAccounts.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/service/ElasticServiceAccounts.java @@ -157,7 +157,7 @@ final class ElasticServiceAccounts { new String[] { "monitor", "manage_own_api_key" }, new RoleDescriptor.IndicesPrivileges[] { RoleDescriptor.IndicesPrivileges.builder() - .indices("logs-*", "metrics-*") + .indices("logs-*", "metrics-*", "traces-*") .privileges("write", "create_index", "auto_configure") .build(), }, null, diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java index 8a0a9c09b7d1a..e4e9bc453ee83 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.datastreams.MigrateToDataStreamAction; import org.elasticsearch.action.delete.TransportDeleteAction; import org.elasticsearch.action.index.TransportIndexAction; +import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.GroupedActionListener; import org.elasticsearch.action.support.replication.TransportReplicationAction.ConcreteShardRequest; import org.elasticsearch.action.update.TransportUpdateAction; @@ -471,6 +472,11 @@ private void authorizeAction( } else if (isIndexAction(action)) { final Metadata metadata = clusterService.state().metadata(); final AsyncSupplier resolvedIndicesAsyncSupplier = new CachingAsyncSupplier<>(resolvedIndicesListener -> { + if (request instanceof SearchRequest searchRequest && searchRequest.pointInTimeBuilder() != null) { + var resolvedIndices = indicesAndAliasesResolver.resolvePITIndices(searchRequest); + resolvedIndicesListener.onResponse(resolvedIndices); + return; + } final ResolvedIndices resolvedIndices = IndicesAndAliasesResolver.tryResolveWithoutWildcards(action, request); if (resolvedIndices != null) { resolvedIndicesListener.onResponse(resolvedIndices); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java index 16258e71e85b8..a4163b6f10fc0 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolver.java @@ -11,6 +11,8 @@ import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.action.search.SearchContextId; +import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.metadata.AliasMetadata; import org.elasticsearch.cluster.metadata.IndexAbstraction; @@ -176,6 +178,24 @@ static ResolvedIndices resolveIndicesAndAliasesWithoutWildcards(String action, I return new ResolvedIndices(localIndices, List.of()); } + /** + * Returns the resolved indices from the {@link SearchContextId} within the provided {@link SearchRequest}. + */ + ResolvedIndices resolvePITIndices(SearchRequest request) { + assert request.pointInTimeBuilder() != null; + var indices = SearchContextId.decodeIndices(request.pointInTimeBuilder().getEncodedId()); + final ResolvedIndices split; + if (request.allowsRemoteIndices()) { + split = remoteClusterResolver.splitLocalAndRemoteIndexNames(indices); + } else { + split = new ResolvedIndices(Arrays.asList(indices), Collections.emptyList()); + } + if (split.isEmpty()) { + return new ResolvedIndices(List.of(NO_INDEX_PLACEHOLDER), Collections.emptyList()); + } + return split; + } + private static void throwOnUnexpectedWildcards(String action, String[] indices) { final List wildcards = Stream.of(indices).filter(Regex::isSimpleMatchPattern).toList(); assert wildcards.isEmpty() == false : "we already know that there's at least one wildcard in the indices"; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/FileRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/FileRolesStore.java index 03ac7d5e0fa36..70d086cc5a831 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/FileRolesStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/FileRolesStore.java @@ -280,8 +280,7 @@ static RoleDescriptor parseRoleDescriptor( String roleName = null; XContentParserConfiguration parserConfig = XContentParserConfiguration.EMPTY.withRegistry(xContentRegistry) .withDeprecationHandler(LoggingDeprecationHandler.INSTANCE); - try { - XContentParser parser = YamlXContent.yamlXContent.createParser(parserConfig, segment); + try (XContentParser parser = YamlXContent.yamlXContent.createParser(parserConfig, segment)) { XContentParser.Token token = parser.nextToken(); if (token == XContentParser.Token.START_OBJECT) { token = parser.nextToken(); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/profile/RestUpdateProfileDataAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/profile/RestUpdateProfileDataAction.java index 6e178f30fe1b3..899d68063cf3b 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/profile/RestUpdateProfileDataAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/profile/RestUpdateProfileDataAction.java @@ -63,7 +63,10 @@ protected RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClien final long ifPrimaryTerm = request.paramAsLong("if_primary_term", -1); final long ifSeqNo = request.paramAsLong("if_seq_no", -1); final RefreshPolicy refreshPolicy = RefreshPolicy.parse(request.param("refresh", "wait_for")); - final Payload payload = PARSER.parse(request.contentParser(), null); + final Payload payload; + try (var parser = request.contentParser()) { + payload = PARSER.parse(parser, null); + } final UpdateProfileDataRequest updateProfileDataRequest = new UpdateProfileDataRequest( uid, diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/settings/RestUpdateSecuritySettingsAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/settings/RestUpdateSecuritySettingsAction.java index 7ee8ea5d41a63..b2e8719b25c24 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/settings/RestUpdateSecuritySettingsAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/rest/action/settings/RestUpdateSecuritySettingsAction.java @@ -36,7 +36,10 @@ public List routes() { @Override protected RestChannelConsumer innerPrepareRequest(RestRequest request, NodeClient client) throws IOException { - UpdateSecuritySettingsAction.Request req = UpdateSecuritySettingsAction.Request.parse(request.contentParser()); + UpdateSecuritySettingsAction.Request req; + try (var parser = request.contentParser()) { + req = UpdateSecuritySettingsAction.Request.parse(parser); + } return restChannel -> client.execute(UpdateSecuritySettingsAction.INSTANCE, req, new RestToXContentListener<>(restChannel)); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/RemoteClusterCredentialsResolver.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/RemoteClusterCredentialsResolver.java deleted file mode 100644 index 93735a700bf92..0000000000000 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/RemoteClusterCredentialsResolver.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.security.transport; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.SecureString; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.xpack.security.authc.ApiKeyService; - -import java.util.Map; -import java.util.Optional; - -import static org.elasticsearch.transport.RemoteClusterService.REMOTE_CLUSTER_CREDENTIALS; - -public class RemoteClusterCredentialsResolver { - - private static final Logger logger = LogManager.getLogger(RemoteClusterCredentialsResolver.class); - - private final Map clusterCredentials; - - public RemoteClusterCredentialsResolver(final Settings settings) { - this.clusterCredentials = REMOTE_CLUSTER_CREDENTIALS.getAsMap(settings); - logger.debug( - "Read cluster credentials for remote clusters [{}]", - Strings.collectionToCommaDelimitedString(clusterCredentials.keySet()) - ); - } - - public Optional resolve(final String clusterAlias) { - final SecureString apiKey = clusterCredentials.get(clusterAlias); - if (apiKey == null) { - return Optional.empty(); - } else { - return Optional.of(new RemoteClusterCredentials(clusterAlias, ApiKeyService.withApiKeyPrefix(apiKey.toString()))); - } - } - - record RemoteClusterCredentials(String clusterAlias, String credentials) { - @Override - public String toString() { - return "RemoteClusterCredentials{clusterAlias='" + clusterAlias + "', credentials='::es_redacted::'}"; - } - } -} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java index 53dd31fe46793..162cabf5297ce 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; import org.elasticsearch.action.support.DestructiveOperations; +import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.ssl.SslConfiguration; import org.elasticsearch.common.util.Maps; @@ -24,6 +25,7 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteConnectionManager; +import org.elasticsearch.transport.RemoteConnectionManager.RemoteClusterAliasWithCredentials; import org.elasticsearch.transport.SendRequestTransportException; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportChannel; @@ -46,6 +48,7 @@ import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.security.Security; import org.elasticsearch.xpack.security.audit.AuditUtil; +import org.elasticsearch.xpack.security.authc.ApiKeyService; import org.elasticsearch.xpack.security.authc.AuthenticationService; import org.elasticsearch.xpack.security.authc.CrossClusterAccessAuthenticationService; import org.elasticsearch.xpack.security.authc.CrossClusterAccessHeaders; @@ -63,7 +66,6 @@ import static org.elasticsearch.transport.RemoteClusterPortSettings.REMOTE_CLUSTER_PROFILE; import static org.elasticsearch.transport.RemoteClusterPortSettings.REMOTE_CLUSTER_SERVER_ENABLED; import static org.elasticsearch.transport.RemoteClusterPortSettings.TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY; -import static org.elasticsearch.xpack.security.transport.RemoteClusterCredentialsResolver.RemoteClusterCredentials; public class SecurityServerTransportInterceptor implements TransportInterceptor { @@ -85,8 +87,7 @@ public class SecurityServerTransportInterceptor implements TransportInterceptor private final Settings settings; private final SecurityContext securityContext; private final CrossClusterAccessAuthenticationService crossClusterAccessAuthcService; - private final RemoteClusterCredentialsResolver remoteClusterCredentialsResolver; - private final Function> remoteClusterAliasResolver; + private final Function> remoteClusterCredentialsResolver; private final XPackLicenseState licenseState; public SecurityServerTransportInterceptor( @@ -98,7 +99,6 @@ public SecurityServerTransportInterceptor( SecurityContext securityContext, DestructiveOperations destructiveOperations, CrossClusterAccessAuthenticationService crossClusterAccessAuthcService, - RemoteClusterCredentialsResolver remoteClusterCredentialsResolver, XPackLicenseState licenseState ) { this( @@ -110,9 +110,8 @@ public SecurityServerTransportInterceptor( securityContext, destructiveOperations, crossClusterAccessAuthcService, - remoteClusterCredentialsResolver, licenseState, - RemoteConnectionManager::resolveRemoteClusterAlias + RemoteConnectionManager::resolveRemoteClusterAliasWithCredentials ); } @@ -125,10 +124,9 @@ public SecurityServerTransportInterceptor( SecurityContext securityContext, DestructiveOperations destructiveOperations, CrossClusterAccessAuthenticationService crossClusterAccessAuthcService, - RemoteClusterCredentialsResolver remoteClusterCredentialsResolver, XPackLicenseState licenseState, // Inject for simplified testing - Function> remoteClusterAliasResolver + Function> remoteClusterCredentialsResolver ) { this.settings = settings; this.threadPool = threadPool; @@ -139,7 +137,6 @@ public SecurityServerTransportInterceptor( this.crossClusterAccessAuthcService = crossClusterAccessAuthcService; this.licenseState = licenseState; this.remoteClusterCredentialsResolver = remoteClusterCredentialsResolver; - this.remoteClusterAliasResolver = remoteClusterAliasResolver; this.profileFilters = initializeProfileFilters(destructiveOperations); } @@ -159,7 +156,8 @@ public void sendRequest( TransportResponseHandler handler ) { assertNoCrossClusterAccessHeadersInContext(); - final Optional remoteClusterAlias = remoteClusterAliasResolver.apply(connection); + final Optional remoteClusterAlias = remoteClusterCredentialsResolver.apply(connection) + .map(RemoteClusterAliasWithCredentials::clusterAlias); if (PreAuthorizationUtils.shouldRemoveParentAuthorizationFromThreadContext(remoteClusterAlias, action, securityContext)) { securityContext.executeAfterRemovingParentAuthorization(original -> { sendRequestInner( @@ -278,22 +276,23 @@ public void sendRequest( * Returns cluster credentials if the connection is remote, and cluster credentials are set up for the target cluster. */ private Optional getRemoteClusterCredentials(Transport.Connection connection) { - final Optional optionalRemoteClusterAlias = remoteClusterAliasResolver.apply(connection); - if (optionalRemoteClusterAlias.isEmpty()) { + final Optional remoteClusterAliasWithCredentials = remoteClusterCredentialsResolver + .apply(connection); + if (remoteClusterAliasWithCredentials.isEmpty()) { logger.trace("Connection is not remote"); return Optional.empty(); } - final String remoteClusterAlias = optionalRemoteClusterAlias.get(); - final Optional remoteClusterCredentials = remoteClusterCredentialsResolver.resolve( - remoteClusterAlias - ); - if (remoteClusterCredentials.isEmpty()) { + final String remoteClusterAlias = remoteClusterAliasWithCredentials.get().clusterAlias(); + final SecureString remoteClusterCredentials = remoteClusterAliasWithCredentials.get().credentials(); + if (remoteClusterCredentials == null) { logger.trace("No cluster credentials are configured for remote cluster [{}]", remoteClusterAlias); return Optional.empty(); } - return remoteClusterCredentials; + return Optional.of( + new RemoteClusterCredentials(remoteClusterAlias, ApiKeyService.withApiKeyPrefix(remoteClusterCredentials.toString())) + ); } private void sendWithCrossClusterAccessHeaders( @@ -442,7 +441,7 @@ private void sendWithUser( throw new IllegalStateException("there should always be a user when sending a message for action [" + action + "]"); } - assert securityContext.getParentAuthorization() == null || remoteClusterAliasResolver.apply(connection).isPresent() == false + assert securityContext.getParentAuthorization() == null || remoteClusterCredentialsResolver.apply(connection).isEmpty() : "parent authorization header should not be set for remote cluster requests"; try { @@ -663,4 +662,12 @@ public void onFailure(Exception e) { } } } + + record RemoteClusterCredentials(String clusterAlias, String credentials) { + + @Override + public String toString() { + return "RemoteClusterCredentials{clusterAlias='" + clusterAlias + "', credentials='::es_redacted::'}"; + } + } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/LocalStateSecurity.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/LocalStateSecurity.java index d44e7c27d760e..a2aa04e0f56c3 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/LocalStateSecurity.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/LocalStateSecurity.java @@ -16,6 +16,7 @@ import org.elasticsearch.license.LicenseService; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.ReloadablePlugin; import org.elasticsearch.protocol.xpack.XPackInfoRequest; import org.elasticsearch.protocol.xpack.XPackInfoResponse; import org.elasticsearch.protocol.xpack.XPackUsageRequest; @@ -36,7 +37,7 @@ import java.util.Collections; import java.util.List; -public class LocalStateSecurity extends LocalStateCompositeXPackPlugin { +public class LocalStateSecurity extends LocalStateCompositeXPackPlugin implements ReloadablePlugin { public static class SecurityTransportXPackUsageAction extends TransportXPackUsageAction { @Inject @@ -130,4 +131,15 @@ protected Class> public List plugins() { return plugins; } + + @Override + public void reload(Settings settings) throws Exception { + plugins.stream().filter(p -> p instanceof ReloadablePlugin).forEach(p -> { + try { + ((ReloadablePlugin) p).reload(settings); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java index 6773da137ac96..18929c70cbe7d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java @@ -9,10 +9,13 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionModule; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -72,6 +75,7 @@ import org.elasticsearch.xpack.core.security.SecurityContext; import org.elasticsearch.xpack.core.security.SecurityExtension; import org.elasticsearch.xpack.core.security.SecurityField; +import org.elasticsearch.xpack.core.security.action.ActionTypes; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.AuthenticationTestHelper; import org.elasticsearch.xpack.core.security.authc.Realm; @@ -116,6 +120,7 @@ import java.util.function.Function; import java.util.function.Predicate; import java.util.stream.Collectors; +import java.util.stream.Stream; import static java.util.Collections.emptyMap; import static org.elasticsearch.xpack.core.security.authc.RealmSettings.getFullSettingKey; @@ -133,7 +138,10 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class SecurityTests extends ESTestCase { @@ -574,6 +582,32 @@ public void testValidateForFipsInvalidPasswordHashingAlgorithm() { assertThat(iae.getMessage(), containsString("Only PBKDF2 is allowed for stored credential hashing in a FIPS 140 JVM.")); } + public void testValidateForFipsRequiredProvider() { + final Settings settings = Settings.builder() + .put(XPackSettings.FIPS_MODE_ENABLED.getKey(), true) + .putList(XPackSettings.FIPS_REQUIRED_PROVIDERS.getKey(), List.of("BCFIPS")) + .build(); + if (inFipsJvm()) { + Security.validateForFips(settings); + // no exceptions since gradle has wired in the bouncy castle FIPS provider + } else { + final IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> Security.validateForFips(settings)); + assertThat(iae.getMessage(), containsString("Could not find required FIPS security provider: [bcfips]")); + } + + final Settings settings2 = Settings.builder() + .put(XPackSettings.FIPS_MODE_ENABLED.getKey(), true) + .putList(XPackSettings.FIPS_REQUIRED_PROVIDERS.getKey(), List.of("junk0", "BCFIPS", "junk1", "junk2")) + .build(); + if (inFipsJvm()) { + final IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> Security.validateForFips(settings2)); + assertThat(iae.getMessage(), containsString("Could not find required FIPS security provider: [junk0, junk1, junk2]")); + } else { + final IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> Security.validateForFips(settings2)); + assertThat(iae.getMessage(), containsString("Could not find required FIPS security provider: [junk0, bcfips, junk1, junk2]")); + } + } + public void testValidateForFipsMultipleValidationErrors() { final Settings settings = Settings.builder() .put(XPackSettings.FIPS_MODE_ENABLED.getKey(), true) @@ -877,6 +911,23 @@ public void testSecurityMustBeEnableToConnectRemoteClusterWithCredentials() { + "Please either enable security or remove these settings from the keystore." ) ); + + // Security off, remote cluster with credentials on reload call + final MockSecureSettings secureSettings5 = new MockSecureSettings(); + secureSettings5.setString("cluster.remote.my1.credentials", randomAlphaOfLength(20)); + secureSettings5.setString("cluster.remote.my2.credentials", randomAlphaOfLength(20)); + final Settings.Builder builder5 = Settings.builder().setSecureSettings(secureSettings5); + // Use builder with security disabled to construct valid Security instance + final var security = new Security(builder2.build()); + final IllegalArgumentException e5 = expectThrows(IllegalArgumentException.class, () -> security.reload(builder5.build())); + assertThat( + e5.getMessage(), + containsString( + "Found [2] remote clusters with credentials [cluster.remote.my1.credentials,cluster.remote.my2.credentials]. " + + "Security [xpack.security.enabled] must be enabled to connect to them. " + + "Please either enable security or remove these settings from the keystore." + ) + ); } public void testLoadExtensions() throws Exception { @@ -905,6 +956,98 @@ public List loadExtensions(Class extensionPointType) { assertThat(registry, instanceOf(DummyOperatorOnlyRegistry.class)); } + public void testReload() throws Exception { + final Settings settings = Settings.builder().put("xpack.security.enabled", true).put("path.home", createTempDir()).build(); + + final PlainActionFuture value = new PlainActionFuture<>(); + final Client mockedClient = mock(Client.class); + + final Realms mockedRealms = mock(Realms.class); + when(mockedRealms.stream()).thenReturn(Stream.of()); + + doAnswer((inv) -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) inv.getArguments()[2]; + listener.onResponse(ActionResponse.Empty.INSTANCE); + return null; + }).when(mockedClient).execute(eq(ActionTypes.RELOAD_REMOTE_CLUSTER_CREDENTIALS_ACTION), any(), any()); + + security = new Security(settings, Collections.emptyList()) { + @Override + protected Client getClient() { + return mockedClient; + } + + @Override + protected Realms getRealms() { + return mockedRealms; + } + }; + + final Settings inputSettings = Settings.EMPTY; + security.reload(inputSettings); + + verify(mockedClient).execute(eq(ActionTypes.RELOAD_REMOTE_CLUSTER_CREDENTIALS_ACTION), any(), any()); + verify(mockedRealms).stream(); + } + + public void testReloadWithFailures() { + final Settings settings = Settings.builder().put("xpack.security.enabled", true).put("path.home", createTempDir()).build(); + + final boolean failRemoteClusterCredentialsReload = randomBoolean(); + final Client mockedClient = mock(Client.class); + if (failRemoteClusterCredentialsReload) { + doAnswer((inv) -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) inv.getArguments()[2]; + listener.onFailure(new RuntimeException("failed remote cluster credentials reload")); + return null; + }).when(mockedClient).execute(eq(ActionTypes.RELOAD_REMOTE_CLUSTER_CREDENTIALS_ACTION), any(), any()); + } else { + doAnswer((inv) -> { + @SuppressWarnings("unchecked") + ActionListener listener = (ActionListener) inv.getArguments()[2]; + listener.onResponse(ActionResponse.Empty.INSTANCE); + return null; + }).when(mockedClient).execute(eq(ActionTypes.RELOAD_REMOTE_CLUSTER_CREDENTIALS_ACTION), any(), any()); + } + + final Realms mockedRealms = mock(Realms.class); + final boolean failRealmsReload = (false == failRemoteClusterCredentialsReload) || randomBoolean(); + if (failRealmsReload) { + when(mockedRealms.stream()).thenThrow(new RuntimeException("failed jwt realms reload")); + } else { + when(mockedRealms.stream()).thenReturn(Stream.of()); + } + security = new Security(settings, Collections.emptyList()) { + @Override + protected Client getClient() { + return mockedClient; + } + + @Override + protected Realms getRealms() { + return mockedRealms; + } + }; + + final Settings inputSettings = Settings.EMPTY; + final var exception = expectThrows(ElasticsearchException.class, () -> security.reload(inputSettings)); + + assertThat(exception.getMessage(), containsString("secure settings reload failed for one or more security component")); + if (failRemoteClusterCredentialsReload) { + assertThat(exception.getSuppressed()[0].getMessage(), containsString("failed remote cluster credentials reload")); + if (failRealmsReload) { + assertThat(exception.getSuppressed()[1].getMessage(), containsString("failed jwt realms reload")); + } + } else { + assertThat(exception.getSuppressed()[0].getMessage(), containsString("failed jwt realms reload")); + } + // Verify both called despite failure + verify(mockedClient).execute(eq(ActionTypes.RELOAD_REMOTE_CLUSTER_CREDENTIALS_ACTION), any(), any()); + verify(mockedRealms).stream(); + } + public void testLoadNoExtensions() throws Exception { Settings settings = Settings.builder() .put("xpack.security.enabled", true) diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java index 60bc0f4d04f0b..c68ebd0a4d48d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java @@ -28,7 +28,6 @@ import org.elasticsearch.action.search.ClearScrollResponse; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchResponseSections; import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.action.search.TransportClearScrollAction; import org.elasticsearch.action.search.TransportSearchAction; @@ -201,15 +200,13 @@ protected void ActionListener.respondAndRelease( listener, (Response) new SearchResponse( - new SearchResponseSections( - new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), - null, - null, - false, - false, - null, - 1 - ), + new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), + null, + null, + false, + false, + null, + 1, "_scrollId1", 1, 1, @@ -225,15 +222,13 @@ protected void ActionListener.respondAndRelease( listener, (Response) new SearchResponse( - new SearchResponseSections( - new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), - null, - null, - false, - false, - null, - 1 - ), + new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), + null, + null, + false, + false, + null, + 1, "_scrollId1", 1, 1, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java index 517c29a8820d9..3f12c7d630dd3 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java @@ -60,7 +60,6 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLogAppender; @@ -411,25 +410,31 @@ public void testInvalidateApiKeysWillSetInvalidatedFlagAndRecordTimestamp() { builder.map(buildApiKeySourceDoc("some_hash".toCharArray())); searchHit.sourceRef(BytesReference.bytes(builder)); } - final var internalSearchResponse = new InternalSearchResponse( - new SearchHits( - new SearchHit[] { searchHit }, - new TotalHits(1, TotalHits.Relation.EQUAL_TO), - randomFloat(), + ActionListener.respondAndRelease( + listener, + new SearchResponse( + new SearchHits( + new SearchHit[] { searchHit }, + new TotalHits(1, TotalHits.Relation.EQUAL_TO), + randomFloat(), + null, + null, + null + ), + null, + null, + false, + null, null, + 0, + randomAlphaOfLengthBetween(3, 8), + 1, + 1, + 0, + 10, null, null - ), - null, - null, - null, - false, - null, - 0 - ); - ActionListener.respondAndRelease( - listener, - new SearchResponse(internalSearchResponse, randomAlphaOfLengthBetween(3, 8), 1, 1, 0, 10, null, null) + ) ); return null; }).when(client).search(any(SearchRequest.class), anyActionListener()); @@ -753,22 +758,20 @@ public void testCrossClusterApiKeyUsageStats() { ActionListener.respondAndRelease( listener, new SearchResponse( - new InternalSearchResponse( - new SearchHits( - searchHits.toArray(SearchHit[]::new), - new TotalHits(searchHits.size(), TotalHits.Relation.EQUAL_TO), - randomFloat(), - null, - null, - null - ), - null, - null, + new SearchHits( + searchHits.toArray(SearchHit[]::new), + new TotalHits(searchHits.size(), TotalHits.Relation.EQUAL_TO), + randomFloat(), null, - false, null, - 0 + null ), + null, + null, + false, + null, + null, + 0, randomAlphaOfLengthBetween(3, 8), 1, 1, @@ -2243,13 +2246,11 @@ public void testMaybeBuildUpdatedDocument() throws IOException { assertEquals(realm.getType(), updatedApiKeyDoc.creator.get("realm_type")); if (realm.getDomain() != null) { @SuppressWarnings("unchecked") - final var actualRealmDomain = RealmDomain.fromXContent( - XContentHelper.mapToXContentParser( - XContentParserConfiguration.EMPTY, - (Map) updatedApiKeyDoc.creator.get("realm_domain") - ) - ); - assertEquals(realm.getDomain(), actualRealmDomain); + var m = (Map) updatedApiKeyDoc.creator.get("realm_domain"); + try (var p = XContentHelper.mapToXContentParser(XContentParserConfiguration.EMPTY, m)) { + final var actualRealmDomain = RealmDomain.fromXContent(p); + assertEquals(realm.getDomain(), actualRealmDomain); + } } else { assertFalse(updatedApiKeyDoc.creator.containsKey("realm_domain")); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/IndexServiceAccountTokenStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/IndexServiceAccountTokenStoreTests.java index 8d5d89b4c5054..3a9fee4288bf2 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/IndexServiceAccountTokenStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/service/IndexServiceAccountTokenStoreTests.java @@ -42,7 +42,6 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.XContentTestUtils; import org.elasticsearch.threadpool.ThreadPool; @@ -270,19 +269,24 @@ public void testFindTokensFor() { ) ) .toArray(SearchHit[]::new); - final InternalSearchResponse internalSearchResponse; - internalSearchResponse = new InternalSearchResponse( - new SearchHits(hits, new TotalHits(nhits, TotalHits.Relation.EQUAL_TO), randomFloat(), null, null, null), - null, - null, - null, - false, - null, - 0 - ); ActionListener.respondAndRelease( l, - new SearchResponse(internalSearchResponse, randomAlphaOfLengthBetween(3, 8), 1, 1, 0, 10, null, null) + new SearchResponse( + new SearchHits(hits, new TotalHits(nhits, TotalHits.Relation.EQUAL_TO), randomFloat(), null, null, null), + null, + null, + false, + null, + null, + 0, + randomAlphaOfLengthBetween(3, 8), + 1, + 1, + 0, + 10, + null, + null + ) ); } else if (r instanceof ClearScrollRequest) { l.onResponse(new ClearScrollResponse(true, 1)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java index 3b52f86c00ba8..169275ccc3ee3 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java @@ -27,7 +27,6 @@ import org.elasticsearch.script.mustache.MustacheScriptEngine; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.ToXContent; @@ -355,25 +354,31 @@ private void doAnswerWithSearchResult(Client client, ExpressionRoleMapping mappi mapping.toXContent(builder, ToXContent.EMPTY_PARAMS); searchHit.sourceRef(BytesReference.bytes(builder)); } - final var internalSearchResponse = new InternalSearchResponse( - new SearchHits( - new SearchHit[] { searchHit }, - new TotalHits(1, TotalHits.Relation.EQUAL_TO), - randomFloat(), + ActionListener.respondAndRelease( + listener, + new SearchResponse( + new SearchHits( + new SearchHit[] { searchHit }, + new TotalHits(1, TotalHits.Relation.EQUAL_TO), + randomFloat(), + null, + null, + null + ), + null, + null, + false, + null, null, + 0, + randomAlphaOfLengthBetween(3, 8), + 1, + 1, + 0, + 10, null, null - ), - null, - null, - null, - false, - null, - 0 - ); - ActionListener.respondAndRelease( - listener, - new SearchResponse(internalSearchResponse, randomAlphaOfLengthBetween(3, 8), 1, 1, 0, 10, null, null) + ) ); return null; }).when(client).search(any(SearchRequest.class), anyActionListener()); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java index e1ee174c4c29a..7f76e31f4f8c5 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.security.authz; import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.CompositeIndicesRequest; import org.elasticsearch.action.DocWriteRequest; @@ -35,8 +36,8 @@ import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; -import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresAction; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest; +import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateAction; @@ -61,6 +62,7 @@ import org.elasticsearch.action.search.MultiSearchRequest; import org.elasticsearch.action.search.OpenPointInTimeRequest; import org.elasticsearch.action.search.ParsedScrollId; +import org.elasticsearch.action.search.SearchContextId; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.action.search.SearchTransportService; @@ -102,6 +104,7 @@ import org.elasticsearch.core.Releasable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.bulk.stats.BulkOperationListener; @@ -111,7 +114,12 @@ import org.elasticsearch.license.MockLicenseState; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.search.SearchPhaseResult; +import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.builder.PointInTimeBuilder; +import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.internal.AliasFilter; +import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.internal.ShardSearchRequest; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -1233,6 +1241,72 @@ public void testSearchAgainstIndex() { verifyNoMoreInteractions(auditTrail); } + public void testSearchPITAgainstIndex() { + RoleDescriptor role = new RoleDescriptor( + "search_index", + null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices("index-*").privileges("read").build() }, + null + ); + roleMap.put(role.getName(), role); + final Authentication authentication = createAuthentication(new User("test search user", role.getName())); + + final String requestId = AuditUtil.getOrGenerateRequestId(threadContext); + final String indexName = "index-" + randomAlphaOfLengthBetween(1, 5); + + final ClusterState clusterState = mockMetadataWithIndex(indexName); + final IndexMetadata indexMetadata = clusterState.metadata().index(indexName); + + PointInTimeBuilder pit = new PointInTimeBuilder(createEncodedPIT(indexMetadata.getIndex())); + SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder().pointInTimeBuilder(pit)) + .allowPartialSearchResults(false); + final ShardSearchRequest shardRequest = new ShardSearchRequest( + new OriginalIndices(new String[] { indexName }, searchRequest.indicesOptions()), + searchRequest, + new ShardId(indexMetadata.getIndex(), 0), + 0, + 1, + AliasFilter.EMPTY, + 1.0f, + System.currentTimeMillis(), + null + ); + this.setFakeOriginatingAction = false; + authorize(authentication, TransportSearchAction.TYPE.name(), searchRequest, true, () -> { + verify(rolesStore).getRoles(Mockito.same(authentication), Mockito.any()); + IndicesAccessControl iac = threadContext.getTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY); + // Successful search action authorization should set a parent authorization header. + assertThat(securityContext.getParentAuthorization().action(), equalTo(TransportSearchAction.TYPE.name())); + // Within the action handler, execute a child action (the query phase of search) + authorize(authentication, SearchTransportService.QUERY_ACTION_NAME, shardRequest, false, () -> { + // This child action triggers a second interaction with the role store (which is cached) + verify(rolesStore, times(2)).getRoles(Mockito.same(authentication), Mockito.any()); + // But it does not create a new IndicesAccessControl + assertThat(threadContext.getTransient(AuthorizationServiceField.INDICES_PERMISSIONS_KEY), sameInstance(iac)); + // The parent authorization header should only be present for direct child actions + // and not be carried over for a child of a child actions. + // Meaning, only query phase action should be pre-authorized in this case and potential sub-actions should not. + assertThat(securityContext.getParentAuthorization(), nullValue()); + }); + }); + assertThat(searchRequest.indices().length, equalTo(0)); + verify(auditTrail).accessGranted( + eq(requestId), + eq(authentication), + eq(TransportSearchAction.TYPE.name()), + eq(searchRequest), + authzInfoRoles(new String[] { role.getName() }) + ); + verify(auditTrail).accessGranted( + eq(requestId), + eq(authentication), + eq(SearchTransportService.QUERY_ACTION_NAME), + eq(shardRequest), + authzInfoRoles(new String[] { role.getName() }) + ); + verifyNoMoreInteractions(auditTrail); + } + public void testScrollRelatedRequestsAllowed() { RoleDescriptor role = new RoleDescriptor( "a_all", @@ -2049,7 +2123,7 @@ public void testGrantAllRestrictedUserCannotExecuteOperationAgainstSecurityIndic ); requests.add( new Tuple<>( - IndicesShardStoresAction.NAME, + TransportIndicesShardStoresAction.TYPE.name(), new IndicesShardStoresRequest().indices(randomFrom(SECURITY_MAIN_ALIAS, INTERNAL_SECURITY_MAIN_INDEX_7)) ) ); @@ -2132,7 +2206,9 @@ public void testMonitoringOperationsAgainstSecurityIndexRequireAllowRestricted() requests.add(new Tuple<>(RecoveryAction.NAME, new RecoveryRequest().indices(SECURITY_MAIN_ALIAS))); requests.add(new Tuple<>(IndicesSegmentsAction.NAME, new IndicesSegmentsRequest().indices(SECURITY_MAIN_ALIAS))); requests.add(new Tuple<>(GetSettingsAction.NAME, new GetSettingsRequest().indices(SECURITY_MAIN_ALIAS))); - requests.add(new Tuple<>(IndicesShardStoresAction.NAME, new IndicesShardStoresRequest().indices(SECURITY_MAIN_ALIAS))); + requests.add( + new Tuple<>(TransportIndicesShardStoresAction.TYPE.name(), new IndicesShardStoresRequest().indices(SECURITY_MAIN_ALIAS)) + ); for (final Tuple requestTuple : requests) { final String action = requestTuple.v1(); @@ -3577,6 +3653,26 @@ static AuthorizationInfo authzInfoRoles(String[] expectedRoles) { return ArgumentMatchers.argThat(new RBACAuthorizationInfoRoleMatcher(expectedRoles)); } + private static class TestSearchPhaseResult extends SearchPhaseResult { + final DiscoveryNode node; + + TestSearchPhaseResult(ShardSearchContextId contextId, DiscoveryNode node) { + this.contextId = contextId; + this.node = node; + } + } + + private static String createEncodedPIT(Index index) { + DiscoveryNode node1 = DiscoveryNodeUtils.create("node_1"); + TestSearchPhaseResult testSearchPhaseResult1 = new TestSearchPhaseResult(new ShardSearchContextId("a", 1), node1); + testSearchPhaseResult1.setSearchShardTarget( + new SearchShardTarget("node_1", new ShardId(index.getName(), index.getUUID(), 0), null) + ); + List results = new ArrayList<>(); + results.add(testSearchPhaseResult1); + return SearchContextId.encode(results, Collections.emptyMap(), TransportVersion.current()); + } + private static class RBACAuthorizationInfoRoleMatcher implements ArgumentMatcher { private final String[] wanted; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java index d229124419cb2..afe5f32f70d28 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java @@ -22,7 +22,6 @@ import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchResponseSections; import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.WriteRequest; @@ -822,15 +821,13 @@ private SearchHit[] buildHits(List sourcePrivile private static SearchResponse buildSearchResponse(SearchHit[] hits) { return new SearchResponse( - new SearchResponseSections( - new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), - null, - null, - false, - false, - null, - 1 - ), + new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), + null, + null, + false, + false, + null, + 1, "_scrollId1", 1, 1, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/SecurityRestFilterTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/SecurityRestFilterTests.java index 69884cd1e6dbd..d3b46f5847636 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/SecurityRestFilterTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/rest/SecurityRestFilterTests.java @@ -230,13 +230,17 @@ public Set getFilteredFields() { assertEquals(restRequest, handlerRequest.get()); assertEquals(restRequest.content(), handlerRequest.get().content()); - Map original = XContentType.JSON.xContent() - .createParser( - NamedXContentRegistry.EMPTY, - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - handlerRequest.get().content().streamInput() - ) - .map(); + Map original; + try ( + var parser = XContentType.JSON.xContent() + .createParser( + NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + handlerRequest.get().content().streamInput() + ) + ) { + original = parser.map(); + } assertEquals(2, original.size()); assertEquals(SecuritySettingsSourceField.TEST_PASSWORD, original.get("password")); assertEquals("bar", original.get("foo")); @@ -244,13 +248,17 @@ public Set getFilteredFields() { assertNotEquals(restRequest, auditTrailRequest.get()); assertNotEquals(restRequest.content(), auditTrailRequest.get().content()); - Map map = XContentType.JSON.xContent() - .createParser( - NamedXContentRegistry.EMPTY, - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - auditTrailRequest.get().content().streamInput() - ) - .map(); + Map map; + try ( + var parser = XContentType.JSON.xContent() + .createParser( + NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + auditTrailRequest.get().content().streamInput() + ) + ) { + map = parser.map(); + } assertEquals(1, map.size()); assertEquals("bar", map.get("foo")); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/RemoteClusterCredentialsResolverTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/RemoteClusterCredentialsResolverTests.java deleted file mode 100644 index debb50384e217..0000000000000 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/RemoteClusterCredentialsResolverTests.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.security.transport; - -import org.elasticsearch.common.settings.MockSecureSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.security.authc.ApiKeyService; - -import java.util.Optional; - -import static org.elasticsearch.xpack.security.transport.RemoteClusterCredentialsResolver.RemoteClusterCredentials; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; - -public class RemoteClusterCredentialsResolverTests extends ESTestCase { - - public void testResolveRemoteClusterCredentials() { - final String clusterNameA = "clusterA"; - final String clusterDoesNotExist = randomAlphaOfLength(10); - final Settings.Builder builder = Settings.builder(); - - final String secret = randomAlphaOfLength(20); - final MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString("cluster.remote." + clusterNameA + ".credentials", secret); - final Settings settings = builder.setSecureSettings(secureSettings).build(); - RemoteClusterCredentialsResolver remoteClusterAuthorizationResolver = new RemoteClusterCredentialsResolver(settings); - final Optional remoteClusterCredentials = remoteClusterAuthorizationResolver.resolve(clusterNameA); - assertThat(remoteClusterCredentials.isPresent(), is(true)); - assertThat(remoteClusterCredentials.get().clusterAlias(), equalTo(clusterNameA)); - assertThat(remoteClusterCredentials.get().credentials(), equalTo(ApiKeyService.withApiKeyPrefix(secret))); - assertThat(remoteClusterAuthorizationResolver.resolve(clusterDoesNotExist), is(Optional.empty())); - } -} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java index 57e48581d159c..46b0fac78ad8e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.ssl.SslClientAuthenticationMode; import org.elasticsearch.common.ssl.SslConfiguration; @@ -33,6 +34,7 @@ import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteClusterPortSettings; +import org.elasticsearch.transport.RemoteConnectionManager.RemoteClusterAliasWithCredentials; import org.elasticsearch.transport.SendRequestTransportException; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.Transport.Connection; @@ -77,6 +79,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; +import java.util.function.Function; import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.DATA_STREAM_LIFECYCLE_ORIGIN; import static org.elasticsearch.test.ActionListenerUtils.anyActionListener; @@ -87,7 +90,6 @@ import static org.elasticsearch.xpack.core.security.authc.CrossClusterAccessSubjectInfo.CROSS_CLUSTER_ACCESS_SUBJECT_INFO_HEADER_KEY; import static org.elasticsearch.xpack.core.security.authz.RoleDescriptorTests.randomUniquelyNamedRoleDescriptors; import static org.elasticsearch.xpack.security.authc.CrossClusterAccessHeaders.CROSS_CLUSTER_ACCESS_CREDENTIALS_HEADER_KEY; -import static org.elasticsearch.xpack.security.transport.RemoteClusterCredentialsResolver.RemoteClusterCredentials; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; @@ -153,7 +155,6 @@ public void testSendAsync() throws Exception { new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) ), mock(CrossClusterAccessAuthenticationService.class), - new RemoteClusterCredentialsResolver(settings), mockLicenseState ); ClusterServiceUtils.setState(clusterService, clusterService.state()); // force state update to trigger listener @@ -205,7 +206,6 @@ public void testSendAsyncSwitchToSystem() throws Exception { new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) ), mock(CrossClusterAccessAuthenticationService.class), - new RemoteClusterCredentialsResolver(settings), mockLicenseState ); ClusterServiceUtils.setState(clusterService, clusterService.state()); // force state update to trigger listener @@ -250,7 +250,6 @@ public void testSendWithoutUser() throws Exception { new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) ), mock(CrossClusterAccessAuthenticationService.class), - new RemoteClusterCredentialsResolver(settings), mockLicenseState ) { @Override @@ -313,7 +312,6 @@ public void testSendToNewerVersionSetsCorrectVersion() throws Exception { new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) ), mock(CrossClusterAccessAuthenticationService.class), - new RemoteClusterCredentialsResolver(settings), mockLicenseState ); ClusterServiceUtils.setState(clusterService, clusterService.state()); // force state update to trigger listener @@ -382,7 +380,6 @@ public void testSendToOlderVersionSetsCorrectVersion() throws Exception { new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) ), mock(CrossClusterAccessAuthenticationService.class), - new RemoteClusterCredentialsResolver(settings), mockLicenseState ); ClusterServiceUtils.setState(clusterService, clusterService.state()); // force state update to trigger listener @@ -449,7 +446,6 @@ public void testSetUserBasedOnActionOrigin() { new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) ), mock(CrossClusterAccessAuthenticationService.class), - new RemoteClusterCredentialsResolver(settings), mockLicenseState ); @@ -604,7 +600,6 @@ public void testSendWithCrossClusterAccessHeadersWithUnsupportedLicense() throws AuthenticationTestHelper.builder().build().writeToContext(threadContext); final String remoteClusterAlias = randomAlphaOfLengthBetween(5, 10); - final RemoteClusterCredentialsResolver remoteClusterCredentialsResolver = mockRemoteClusterCredentialsResolver(remoteClusterAlias); final SecurityServerTransportInterceptor interceptor = new SecurityServerTransportInterceptor( settings, @@ -618,9 +613,8 @@ public void testSendWithCrossClusterAccessHeadersWithUnsupportedLicense() throws new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) ), mock(CrossClusterAccessAuthenticationService.class), - remoteClusterCredentialsResolver, unsupportedLicenseState, - ignored -> Optional.of(remoteClusterAlias) + mockRemoteClusterCredentialsResolver(remoteClusterAlias) ); final AsyncSender sender = interceptor.interceptSender(mock(AsyncSender.class, ignored -> { @@ -661,18 +655,16 @@ public TransportResponse read(StreamInput in) { actualException.get().getCause().getMessage(), equalTo("current license is non-compliant for [" + Security.ADVANCED_REMOTE_CLUSTER_SECURITY_FEATURE.getName() + "]") ); - verify(remoteClusterCredentialsResolver, times(1)).resolve(eq(remoteClusterAlias)); assertThat(securityContext.getThreadContext().getHeader(CROSS_CLUSTER_ACCESS_SUBJECT_INFO_HEADER_KEY), nullValue()); assertThat(securityContext.getThreadContext().getHeader(CROSS_CLUSTER_ACCESS_CREDENTIALS_HEADER_KEY), nullValue()); } - private RemoteClusterCredentialsResolver mockRemoteClusterCredentialsResolver(String remoteClusterAlias) { - final RemoteClusterCredentialsResolver remoteClusterCredentialsResolver = mock(RemoteClusterCredentialsResolver.class); - final String remoteClusterCredential = ApiKeyService.withApiKeyPrefix(randomAlphaOfLengthBetween(10, 42)); - when(remoteClusterCredentialsResolver.resolve(any())).thenReturn( - Optional.of(new RemoteClusterCredentials(remoteClusterAlias, remoteClusterCredential)) + private Function> mockRemoteClusterCredentialsResolver( + String remoteClusterAlias + ) { + return connection -> Optional.of( + new RemoteClusterAliasWithCredentials(remoteClusterAlias, new SecureString(randomAlphaOfLengthBetween(10, 42).toCharArray())) ); - return remoteClusterCredentialsResolver; } public void testSendWithCrossClusterAccessHeadersForSystemUserRegularAction() throws Exception { @@ -736,12 +728,9 @@ private void doTestSendWithCrossClusterAccessHeaders( ) throws IOException { authentication.writeToContext(threadContext); final String expectedRequestId = AuditUtil.getOrGenerateRequestId(threadContext); - final RemoteClusterCredentialsResolver remoteClusterCredentialsResolver = mock(RemoteClusterCredentialsResolver.class); final String remoteClusterAlias = randomAlphaOfLengthBetween(5, 10); - final String remoteClusterCredential = ApiKeyService.withApiKeyPrefix(randomAlphaOfLengthBetween(10, 42)); - when(remoteClusterCredentialsResolver.resolve(any())).thenReturn( - Optional.of(new RemoteClusterCredentials(remoteClusterAlias, remoteClusterCredential)) - ); + final String encodedApiKey = randomAlphaOfLengthBetween(10, 42); + final String remoteClusterCredential = ApiKeyService.withApiKeyPrefix(encodedApiKey); final AuthorizationService authzService = mock(AuthorizationService.class); // We capture the listener so that we can complete the full flow, by calling onResponse further down @SuppressWarnings("unchecked") @@ -760,9 +749,8 @@ private void doTestSendWithCrossClusterAccessHeaders( new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) ), mock(CrossClusterAccessAuthenticationService.class), - remoteClusterCredentialsResolver, mockLicenseState, - ignored -> Optional.of(remoteClusterAlias) + ignored -> Optional.of(new RemoteClusterAliasWithCredentials(remoteClusterAlias, new SecureString(encodedApiKey.toCharArray()))) ); final AtomicBoolean calledWrappedSender = new AtomicBoolean(false); @@ -861,7 +849,6 @@ public TransportResponse read(StreamInput in) { } assertThat(sentCredential.get(), equalTo(remoteClusterCredential)); verify(securityContext, never()).executeAsInternalUser(any(), any(), anyConsumer()); - verify(remoteClusterCredentialsResolver, times(1)).resolve(eq(remoteClusterAlias)); assertThat(securityContext.getThreadContext().getHeader(CROSS_CLUSTER_ACCESS_SUBJECT_INFO_HEADER_KEY), nullValue()); assertThat(securityContext.getThreadContext().getHeader(CROSS_CLUSTER_ACCESS_CREDENTIALS_HEADER_KEY), nullValue()); assertThat(AuditUtil.extractRequestId(securityContext.getThreadContext()), equalTo(expectedRequestId)); @@ -874,15 +861,9 @@ public void testSendWithUserIfCrossClusterAccessHeadersConditionNotMet() throws if (false == (notRemoteConnection || noCredential)) { noCredential = true; } + final boolean finalNoCredential = noCredential; final String remoteClusterAlias = randomAlphaOfLengthBetween(5, 10); - final RemoteClusterCredentialsResolver remoteClusterCredentialsResolver = mock(RemoteClusterCredentialsResolver.class); - when(remoteClusterCredentialsResolver.resolve(any())).thenReturn( - noCredential - ? Optional.empty() - : Optional.of( - new RemoteClusterCredentials(remoteClusterAlias, ApiKeyService.withApiKeyPrefix(randomAlphaOfLengthBetween(10, 42))) - ) - ); + final String encodedApiKey = randomAlphaOfLengthBetween(10, 42); final AuthenticationTestHelper.AuthenticationTestBuilder builder = AuthenticationTestHelper.builder(); final Authentication authentication = randomFrom( builder.apiKey().build(), @@ -904,9 +885,12 @@ public void testSendWithUserIfCrossClusterAccessHeadersConditionNotMet() throws new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) ), mock(CrossClusterAccessAuthenticationService.class), - remoteClusterCredentialsResolver, mockLicenseState, - ignored -> notRemoteConnection ? Optional.empty() : Optional.of(remoteClusterAlias) + ignored -> notRemoteConnection + ? Optional.empty() + : (finalNoCredential + ? Optional.of(new RemoteClusterAliasWithCredentials(remoteClusterAlias, null)) + : Optional.of(new RemoteClusterAliasWithCredentials(remoteClusterAlias, new SecureString(encodedApiKey.toCharArray())))) ); final AtomicBoolean calledWrappedSender = new AtomicBoolean(false); @@ -944,12 +928,9 @@ public void testSendWithCrossClusterAccessHeadersThrowsOnOldConnection() throws .realm() .build(); authentication.writeToContext(threadContext); - final RemoteClusterCredentialsResolver remoteClusterCredentialsResolver = mock(RemoteClusterCredentialsResolver.class); final String remoteClusterAlias = randomAlphaOfLengthBetween(5, 10); - final String remoteClusterCredential = ApiKeyService.withApiKeyPrefix(randomAlphaOfLengthBetween(10, 42)); - when(remoteClusterCredentialsResolver.resolve(any())).thenReturn( - Optional.of(new RemoteClusterCredentials(remoteClusterAlias, remoteClusterCredential)) - ); + final String encodedApiKey = randomAlphaOfLengthBetween(10, 42); + final String remoteClusterCredential = ApiKeyService.withApiKeyPrefix(encodedApiKey); final SecurityServerTransportInterceptor interceptor = new SecurityServerTransportInterceptor( settings, @@ -963,9 +944,8 @@ public void testSendWithCrossClusterAccessHeadersThrowsOnOldConnection() throws new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) ), mock(CrossClusterAccessAuthenticationService.class), - remoteClusterCredentialsResolver, mockLicenseState, - ignored -> Optional.of(remoteClusterAlias) + ignored -> Optional.of(new RemoteClusterAliasWithCredentials(remoteClusterAlias, new SecureString(encodedApiKey.toCharArray()))) ); final AsyncSender sender = interceptor.interceptSender(new AsyncSender() { @@ -1029,7 +1009,6 @@ public TransportResponse read(StreamInput in) { + "] does not support receiving them" ) ); - verify(remoteClusterCredentialsResolver, times(1)).resolve(eq(remoteClusterAlias)); assertThat(securityContext.getThreadContext().getHeader(CROSS_CLUSTER_ACCESS_SUBJECT_INFO_HEADER_KEY), nullValue()); assertThat(securityContext.getThreadContext().getHeader(CROSS_CLUSTER_ACCESS_CREDENTIALS_HEADER_KEY), nullValue()); } @@ -1040,12 +1019,9 @@ public void testSendRemoteRequestFailsIfUserHasNoRemoteIndicesPrivileges() throw .realm() .build(); authentication.writeToContext(threadContext); - final RemoteClusterCredentialsResolver remoteClusterCredentialsResolver = mock(RemoteClusterCredentialsResolver.class); final String remoteClusterAlias = randomAlphaOfLengthBetween(5, 10); - final String remoteClusterCredential = ApiKeyService.withApiKeyPrefix(randomAlphaOfLengthBetween(10, 42)); - when(remoteClusterCredentialsResolver.resolve(any())).thenReturn( - Optional.of(new RemoteClusterCredentials(remoteClusterAlias, remoteClusterCredential)) - ); + final String encodedApiKey = randomAlphaOfLengthBetween(10, 42); + final String remoteClusterCredential = ApiKeyService.withApiKeyPrefix(encodedApiKey); final AuthorizationService authzService = mock(AuthorizationService.class); doAnswer(invocation -> { @@ -1067,9 +1043,8 @@ public void testSendRemoteRequestFailsIfUserHasNoRemoteIndicesPrivileges() throw new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) ), mock(CrossClusterAccessAuthenticationService.class), - remoteClusterCredentialsResolver, mockLicenseState, - ignored -> Optional.of(remoteClusterAlias) + ignored -> Optional.of(new RemoteClusterAliasWithCredentials(remoteClusterAlias, new SecureString(encodedApiKey.toCharArray()))) ); final AsyncSender sender = interceptor.interceptSender(new AsyncSender() { @@ -1171,7 +1146,6 @@ public void testProfileFiltersCreatedDifferentlyForDifferentTransportAndRemoteCl new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) ), mock(CrossClusterAccessAuthenticationService.class), - new RemoteClusterCredentialsResolver(settings), mockLicenseState ); @@ -1225,7 +1199,6 @@ public void testNoProfileFilterForRemoteClusterWhenTheFeatureIsDisabled() { new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) ), mock(CrossClusterAccessAuthenticationService.class), - new RemoteClusterCredentialsResolver(settings), mockLicenseState ); diff --git a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/RestGetShutdownStatusAction.java b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/RestGetShutdownStatusAction.java index 5d10f1a3d517e..babe2174b0952 100644 --- a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/RestGetShutdownStatusAction.java +++ b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/RestGetShutdownStatusAction.java @@ -14,7 +14,7 @@ import org.elasticsearch.rest.Scope; import org.elasticsearch.rest.ServerlessScope; import org.elasticsearch.rest.action.RestCancellableNodeClient; -import org.elasticsearch.rest.action.RestChunkedToXContentListener; +import org.elasticsearch.rest.action.RestRefCountedChunkedToXContentListener; import java.util.List; @@ -40,7 +40,7 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli return channel -> new RestCancellableNodeClient(client, request.getHttpChannel()).execute( GetShutdownStatusAction.INSTANCE, new GetShutdownStatusAction.Request(nodeIds), - new RestChunkedToXContentListener<>(channel) + new RestRefCountedChunkedToXContentListener<>(channel) ); } } diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/history/SnapshotLifecycleTemplateRegistry.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/history/SnapshotLifecycleTemplateRegistry.java index a72283682b258..31c624df67813 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/history/SnapshotLifecycleTemplateRegistry.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/history/SnapshotLifecycleTemplateRegistry.java @@ -42,10 +42,11 @@ public class SnapshotLifecycleTemplateRegistry extends IndexTemplateRegistry { // version 4:converted data stream // version 5: add `allow_auto_create` setting // version 6: manage by data stream lifecycle - public static final int INDEX_TEMPLATE_VERSION = 6; + // version 7: version the index template name so we can upgrade existing deployments + public static final int INDEX_TEMPLATE_VERSION = 7; public static final String SLM_TEMPLATE_VERSION_VARIABLE = "xpack.slm.template.version"; - public static final String SLM_TEMPLATE_NAME = ".slm-history"; + public static final String SLM_TEMPLATE_NAME = ".slm-history-" + INDEX_TEMPLATE_VERSION; public static final String SLM_POLICY_NAME = "slm-history-ilm-policy"; diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/history/SnapshotLifecycleTemplateRegistryTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/history/SnapshotLifecycleTemplateRegistryTests.java index 813d239ba9099..3601b3c010739 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/history/SnapshotLifecycleTemplateRegistryTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/history/SnapshotLifecycleTemplateRegistryTests.java @@ -64,6 +64,7 @@ import static org.elasticsearch.xpack.slm.history.SnapshotLifecycleTemplateRegistry.SLM_POLICY_NAME; import static org.elasticsearch.xpack.slm.history.SnapshotLifecycleTemplateRegistry.SLM_TEMPLATE_NAME; import static org.hamcrest.Matchers.anEmptyMap; +import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; @@ -317,6 +318,10 @@ public void testValidate() { ); } + public void testTemplateNameIsVersioned() { + assertThat(SLM_TEMPLATE_NAME, endsWith("-" + INDEX_TEMPLATE_VERSION)); + } + // ------------- /** diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldTypeTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldTypeTests.java index 8364ad3d4c027..7c5fa5053222b 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldTypeTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/GeoShapeWithDocValuesFieldTypeTests.java @@ -25,16 +25,18 @@ import org.elasticsearch.index.mapper.MapperBuilderContext; import org.elasticsearch.xpack.vectortile.SpatialGeometryFormatterExtension; import org.elasticsearch.xpack.vectortile.feature.FeatureFactory; -import org.hamcrest.Matchers; import java.io.IOException; import java.nio.ByteOrder; import java.util.List; import java.util.Map; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; + public class GeoShapeWithDocValuesFieldTypeTests extends FieldTypeTestCase { - public void testFetchSourceValue() throws IOException { + public void testFetchSourceValue() throws Exception { final GeoFormatterFactory geoFormatterFactory = new GeoFormatterFactory<>( new SpatialGeometryFormatterExtension().getGeometryFormatterFactories() ); @@ -53,26 +55,43 @@ public void testFetchSourceValue() throws IOException { String wktLineString = "LINESTRING (42.0 27.1, 30.0 50.0)"; String wktPoint = "POINT (14.0 15.0)"; String wktMalformed = "POINT foo"; + byte[] wkbLine = WellKnownBinary.toWKB( + WellKnownText.fromWKT(StandardValidator.NOOP, false, wktLineString), + ByteOrder.LITTLE_ENDIAN + ); + byte[] wkbPoint = WellKnownBinary.toWKB(WellKnownText.fromWKT(StandardValidator.NOOP, false, wktPoint), ByteOrder.LITTLE_ENDIAN); // Test a single shape in geojson format. Object sourceValue = jsonLineString; assertEquals(List.of(jsonLineString), fetchSourceValue(mapper, sourceValue, null)); assertEquals(List.of(wktLineString), fetchSourceValue(mapper, sourceValue, "wkt")); + List wkb = fetchSourceValue(mapper, sourceValue, "wkb"); + assertThat(wkb.size(), equalTo(1)); + assertThat(wkb.get(0), equalTo(wkbLine)); // Test a malformed single shape in geojson format sourceValue = jsonMalformed; assertEquals(List.of(), fetchSourceValue(mapper, sourceValue, null)); assertEquals(List.of(), fetchSourceValue(mapper, sourceValue, "wkt")); + assertEquals(List.of(), fetchSourceValue(mapper, sourceValue, "wkb")); // Test a list of shapes in geojson format. sourceValue = List.of(jsonLineString, jsonPoint); assertEquals(List.of(jsonLineString, jsonPoint), fetchSourceValue(mapper, sourceValue, null)); assertEquals(List.of(wktLineString, wktPoint), fetchSourceValue(mapper, sourceValue, "wkt")); + wkb = fetchSourceValue(mapper, sourceValue, "wkb"); + assertThat(wkb.size(), equalTo(2)); + assertThat(wkb.get(0), equalTo(wkbLine)); + assertThat(wkb.get(1), equalTo(wkbPoint)); // Test a list of shapes including one malformed in geojson format sourceValue = List.of(jsonLineString, jsonMalformed, jsonPoint); assertEquals(List.of(jsonLineString, jsonPoint), fetchSourceValue(mapper, sourceValue, null)); assertEquals(List.of(wktLineString, wktPoint), fetchSourceValue(mapper, sourceValue, "wkt")); + wkb = fetchSourceValue(mapper, sourceValue, "wkb"); + assertThat(wkb.size(), equalTo(2)); + assertThat(wkb.get(0), equalTo(wkbLine)); + assertThat(wkb.get(1), equalTo(wkbPoint)); // Test a single shape in wkt format. sourceValue = wktLineString; @@ -109,26 +128,31 @@ public void testFetchStoredValue() throws IOException { geoFormatterFactory ).setStored(true).build(MapperBuilderContext.root(randomBoolean(), false)).fieldType(); - ByteOrder byteOrder = randomBoolean() ? ByteOrder.BIG_ENDIAN : ByteOrder.LITTLE_ENDIAN; - Map jsonLineString = Map.of("type", "LineString", "coordinates", List.of(List.of(42.0, 27.1), List.of(30.0, 50.0))); Map jsonPoint = Map.of("type", "Point", "coordinates", List.of(14.0, 15.0)); String wktLineString = "LINESTRING (42.0 27.1, 30.0 50.0)"; String wktPoint = "POINT (14.0 15.0)"; BytesRef wkbLineString = new BytesRef( - WellKnownBinary.toWKB(new Line(new double[] { 42.0, 30.0 }, new double[] { 27.1, 50.0 }), byteOrder) + WellKnownBinary.toWKB(new Line(new double[] { 42.0, 30.0 }, new double[] { 27.1, 50.0 }), ByteOrder.LITTLE_ENDIAN) ); - BytesRef wkbPoint = new BytesRef(WellKnownBinary.toWKB(new Point(14.0, 15.0), byteOrder)); + BytesRef wkbPoint = new BytesRef(WellKnownBinary.toWKB(new Point(14.0, 15.0), ByteOrder.LITTLE_ENDIAN)); // Test a single shape in wkb format. List storedValues = List.of(wkbLineString); assertEquals(List.of(jsonLineString), fetchStoredValue(mapper, storedValues, null)); assertEquals(List.of(wktLineString), fetchStoredValue(mapper, storedValues, "wkt")); + List wkb = fetchStoredValue(mapper, storedValues, "wkb"); + assertThat(wkb.size(), equalTo(1)); + assertThat(wkb.get(0), equalTo(wkbLineString.bytes)); // Test a list of shapes in wkb format. storedValues = List.of(wkbLineString, wkbPoint); assertEquals(List.of(jsonLineString, jsonPoint), fetchStoredValue(mapper, storedValues, null)); assertEquals(List.of(wktLineString, wktPoint), fetchStoredValue(mapper, storedValues, "wkt")); + wkb = fetchStoredValue(mapper, storedValues, "wkb"); + assertThat(wkb.size(), equalTo(2)); + assertThat(wkb.get(0), equalTo(wkbLineString.bytes)); + assertThat(wkb.get(1), equalTo(wkbPoint.bytes)); } public void testFetchVectorTile() throws IOException { @@ -180,9 +204,9 @@ private void fetchVectorTile(Geometry geometry) throws IOException { // happen that the geometry is out of range (close to the poles). features = List.of(); } - assertThat(features.size(), Matchers.equalTo(sourceValue.size())); + assertThat(features.size(), equalTo(sourceValue.size())); for (int i = 0; i < features.size(); i++) { - assertThat(sourceValue.get(i), Matchers.equalTo(features.get(i))); + assertThat(sourceValue.get(i), equalTo(features.get(i))); } } @@ -308,10 +332,10 @@ private void assertFetchSourceMVT(Object sourceValue, String mvtEquivalentAsWKT) final int extent = randomIntBetween(256, 4096); List mvtExpected = fetchSourceValue(mapper, mvtEquivalentAsWKT, "mvt(0/0/0@" + extent + ")"); List mvt = fetchSourceValue(mapper, sourceValue, "mvt(0/0/0@" + extent + ")"); - assertThat(mvt.size(), Matchers.equalTo(1)); - assertThat(mvt.size(), Matchers.equalTo(mvtExpected.size())); - assertThat(mvtExpected.get(0), Matchers.instanceOf(byte[].class)); - assertThat(mvt.get(0), Matchers.instanceOf(byte[].class)); - assertThat((byte[]) mvt.get(0), Matchers.equalTo((byte[]) mvtExpected.get(0))); + assertThat(mvt.size(), equalTo(1)); + assertThat(mvt.size(), equalTo(mvtExpected.size())); + assertThat(mvtExpected.get(0), instanceOf(byte[].class)); + assertThat(mvt.get(0), instanceOf(byte[].class)); + assertThat((byte[]) mvt.get(0), equalTo((byte[]) mvtExpected.get(0))); } } diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldTypeTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldTypeTests.java index ed902b0f8cfe1..6524860e9438c 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldTypeTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/PointFieldTypeTests.java @@ -7,14 +7,19 @@ package org.elasticsearch.xpack.spatial.index.mapper; +import org.elasticsearch.geometry.Point; +import org.elasticsearch.geometry.utils.WellKnownBinary; import org.elasticsearch.index.mapper.FieldTypeTestCase; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperBuilderContext; import java.io.IOException; +import java.nio.ByteOrder; import java.util.List; import java.util.Map; +import static org.hamcrest.Matchers.equalTo; + public class PointFieldTypeTests extends FieldTypeTestCase { public void testFetchSourceValue() throws IOException { @@ -24,26 +29,39 @@ public void testFetchSourceValue() throws IOException { String wktPoint = "POINT (42.0 27.1)"; Map otherJsonPoint = Map.of("type", "Point", "coordinates", List.of(30.0, 50.0)); String otherWktPoint = "POINT (30.0 50.0)"; + byte[] wkbPoint = WellKnownBinary.toWKB(new Point(42.0, 27.1), ByteOrder.LITTLE_ENDIAN); + byte[] otherWkbPoint = WellKnownBinary.toWKB(new Point(30.0, 50.0), ByteOrder.LITTLE_ENDIAN); // Test a single point in [x, y] array format. Object sourceValue = List.of(42.0, 27.1); assertEquals(List.of(jsonPoint), fetchSourceValue(mapper, sourceValue, null)); assertEquals(List.of(wktPoint), fetchSourceValue(mapper, sourceValue, "wkt")); + List wkb = fetchSourceValue(mapper, sourceValue, "wkb"); + assertThat(wkb.size(), equalTo(1)); + assertThat(wkb.get(0), equalTo(wkbPoint)); // Test a single point in "x, y" string format. sourceValue = "42.0,27.1"; assertEquals(List.of(jsonPoint), fetchSourceValue(mapper, sourceValue, null)); assertEquals(List.of(wktPoint), fetchSourceValue(mapper, sourceValue, "wkt")); + wkb = fetchSourceValue(mapper, sourceValue, "wkb"); + assertThat(wkb.size(), equalTo(1)); + assertThat(wkb.get(0), equalTo(wkbPoint)); // Test a malformed single point sourceValue = "foo"; assertEquals(List.of(), fetchSourceValue(mapper, sourceValue, null)); assertEquals(List.of(), fetchSourceValue(mapper, sourceValue, "wkt")); + assertEquals(List.of(), fetchSourceValue(mapper, sourceValue, "wkb")); // Test a list of points in [x, y] array format. sourceValue = List.of(List.of(42.0, 27.1), List.of(30.0, 50.0)); assertEquals(List.of(jsonPoint, otherJsonPoint), fetchSourceValue(mapper, sourceValue, null)); assertEquals(List.of(wktPoint, otherWktPoint), fetchSourceValue(mapper, sourceValue, "wkt")); + wkb = fetchSourceValue(mapper, sourceValue, "wkb"); + assertThat(wkb.size(), equalTo(2)); + assertThat(wkb.get(0), equalTo(wkbPoint)); + assertThat(wkb.get(1), equalTo(otherWkbPoint)); // Test a single point in well-known text format. sourceValue = "POINT (42.0 27.1)"; diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldTypeTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldTypeTests.java index 1050c9acef11a..c7d87a6c6e8f5 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldTypeTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/mapper/ShapeFieldTypeTests.java @@ -7,18 +7,23 @@ package org.elasticsearch.xpack.spatial.index.mapper; +import org.elasticsearch.geometry.utils.StandardValidator; +import org.elasticsearch.geometry.utils.WellKnownBinary; +import org.elasticsearch.geometry.utils.WellKnownText; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.mapper.FieldTypeTestCase; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperBuilderContext; -import java.io.IOException; +import java.nio.ByteOrder; import java.util.List; import java.util.Map; +import static org.hamcrest.Matchers.equalTo; + public class ShapeFieldTypeTests extends FieldTypeTestCase { - public void testFetchSourceValue() throws IOException { + public void testFetchSourceValue() throws Exception { MappedFieldType mapper = new ShapeFieldMapper.Builder("field", IndexVersion.current(), false, true).build( MapperBuilderContext.root(false, false) ).fieldType(); @@ -29,26 +34,43 @@ public void testFetchSourceValue() throws IOException { String wktLineString = "LINESTRING (42.0 27.1, 30.0 50.0)"; String wktPoint = "POINT (14.3 15.0)"; String wktMalformed = "POINT foo"; + byte[] wkbLine = WellKnownBinary.toWKB( + WellKnownText.fromWKT(StandardValidator.NOOP, false, wktLineString), + ByteOrder.LITTLE_ENDIAN + ); + byte[] wkbPoint = WellKnownBinary.toWKB(WellKnownText.fromWKT(StandardValidator.NOOP, false, wktPoint), ByteOrder.LITTLE_ENDIAN); // Test a single shape in geojson format. Object sourceValue = jsonLineString; assertEquals(List.of(jsonLineString), fetchSourceValue(mapper, sourceValue, null)); assertEquals(List.of(wktLineString), fetchSourceValue(mapper, sourceValue, "wkt")); + List wkb = fetchSourceValue(mapper, sourceValue, "wkb"); + assertThat(wkb.size(), equalTo(1)); + assertThat(wkb.get(0), equalTo(wkbLine)); // Test a malformed single shape in geojson format sourceValue = jsonMalformed; assertEquals(List.of(), fetchSourceValue(mapper, sourceValue, null)); assertEquals(List.of(), fetchSourceValue(mapper, sourceValue, "wkt")); + assertEquals(List.of(), fetchSourceValue(mapper, sourceValue, "wkb")); // Test a list of shapes in geojson format. sourceValue = List.of(jsonLineString, jsonPoint); assertEquals(List.of(jsonLineString, jsonPoint), fetchSourceValue(mapper, sourceValue, null)); assertEquals(List.of(wktLineString, wktPoint), fetchSourceValue(mapper, sourceValue, "wkt")); + wkb = fetchSourceValue(mapper, sourceValue, "wkb"); + assertThat(wkb.size(), equalTo(2)); + assertThat(wkb.get(0), equalTo(wkbLine)); + assertThat(wkb.get(1), equalTo(wkbPoint)); // Test a list of shapes including one malformed in geojson format sourceValue = List.of(jsonLineString, jsonMalformed, jsonPoint); assertEquals(List.of(jsonLineString, jsonPoint), fetchSourceValue(mapper, sourceValue, null)); assertEquals(List.of(wktLineString, wktPoint), fetchSourceValue(mapper, sourceValue, "wkt")); + wkb = fetchSourceValue(mapper, sourceValue, "wkb"); + assertThat(wkb.size(), equalTo(2)); + assertThat(wkb.get(0), equalTo(wkbLine)); + assertThat(wkb.get(1), equalTo(wkbPoint)); // Test a single shape in wkt format. sourceValue = wktLineString; diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianShapeCentroidAggregatorTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianShapeCentroidAggregatorTests.java index d8f00edd7873d..5cd28233a776f 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianShapeCentroidAggregatorTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianShapeCentroidAggregatorTests.java @@ -162,8 +162,9 @@ public void testSingleValuedField() throws Exception { w.addDocument(document); if (targetShapeType.compareTo(calculator.getDimensionalShapeType()) == 0) { double weight = calculator.sumWeight(); - compensatedSumLat.add(weight * calculator.getY()); - compensatedSumLon.add(weight * calculator.getX()); + // compute the centroid of centroids in float space + compensatedSumLat.add(weight * (float) calculator.getY()); + compensatedSumLon.add(weight * (float) calculator.getX()); compensatedSumWeight.add(weight); } } diff --git a/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlTestUtils.java b/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlTestUtils.java index bafdbeed8f1a4..42f42e2a26c03 100644 --- a/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlTestUtils.java +++ b/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlTestUtils.java @@ -103,10 +103,13 @@ static XContentBuilder toXContentBuilder(XContentBuilder builder, CheckedConsume objectGenerator.accept(generator); generator.close(); // System.out.println(out.toString(StandardCharsets.UTF_8)); - XContentParser parser = builder.contentType() - .xContent() - .createParser(XContentParserConfiguration.EMPTY, new ByteArrayInputStream(out.toByteArray())); - builder.copyCurrentStructure(parser); + try ( + XContentParser parser = builder.contentType() + .xContent() + .createParser(XContentParserConfiguration.EMPTY, new ByteArrayInputStream(out.toByteArray())) + ) { + builder.copyCurrentStructure(parser); + } builder.flush(); ByteArrayOutputStream stream = (ByteArrayOutputStream) builder.getOutputStream(); assertEquals("serialized objects differ", out.toString(StandardCharsets.UTF_8), stream.toString(StandardCharsets.UTF_8)); diff --git a/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/SqlActionIT.java b/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/SqlActionIT.java index 3fc152f7ab4c5..2599a9213ea80 100644 --- a/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/SqlActionIT.java +++ b/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/SqlActionIT.java @@ -14,7 +14,6 @@ import org.elasticsearch.xpack.sql.proto.Mode; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; @@ -63,6 +62,6 @@ public void testSqlActionOutdatedVersion() { SqlQueryRequestBuilder request = new SqlQueryRequestBuilder(client()).query("SELECT true") .mode(randomFrom(Mode.CLI, Mode.JDBC)) .version("1.2.3"); - assertRequestBuilderThrows(request, org.elasticsearch.action.ActionRequestValidationException.class); + expectThrows(org.elasticsearch.action.ActionRequestValidationException.class, request); } } diff --git a/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/SqlLicenseIT.java b/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/SqlLicenseIT.java index c16b0554d8738..a24d5ada5746e 100644 --- a/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/SqlLicenseIT.java +++ b/x-pack/plugin/sql/src/internalClusterTest/java/org/elasticsearch/xpack/sql/action/SqlLicenseIT.java @@ -111,7 +111,7 @@ public void testSqlQueryActionLicense() throws Exception { ElasticsearchSecurityException e = expectThrows( ElasticsearchSecurityException.class, - () -> new SqlQueryRequestBuilder(client()).query("SELECT * FROM test").get() + new SqlQueryRequestBuilder(client()).query("SELECT * FROM test") ); assertThat(e.getMessage(), equalTo("current license is non-compliant for [sql]")); enableSqlLicensing(); @@ -126,7 +126,7 @@ public void testSqlQueryActionJdbcModeLicense() throws Exception { ElasticsearchSecurityException e = expectThrows( ElasticsearchSecurityException.class, - () -> new SqlQueryRequestBuilder(client()).query("SELECT * FROM test").mode("jdbc").get() + new SqlQueryRequestBuilder(client()).query("SELECT * FROM test").mode("jdbc") ); assertThat(e.getMessage(), equalTo("current license is non-compliant for [jdbc]")); enableJdbcLicensing(); @@ -141,7 +141,7 @@ public void testSqlTranslateActionLicense() throws Exception { ElasticsearchSecurityException e = expectThrows( ElasticsearchSecurityException.class, - () -> new SqlTranslateRequestBuilder(client()).query("SELECT * FROM test").get() + new SqlTranslateRequestBuilder(client()).query("SELECT * FROM test") ); assertThat(e.getMessage(), equalTo("current license is non-compliant for [sql]")); enableSqlLicensing(); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/SqlServerException.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/SqlServerException.java deleted file mode 100644 index e3a85b03e7a00..0000000000000 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/SqlServerException.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.sql; - -import org.elasticsearch.xpack.ql.QlServerException; - -public abstract class SqlServerException extends QlServerException { - - protected SqlServerException(String message, Object... args) { - super(message, args); - } - - protected SqlServerException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) { - super(message, cause, enableSuppression, writableStackTrace); - } - - protected SqlServerException(String message, Throwable cause) { - super(message, cause); - } - - protected SqlServerException(Throwable cause, String message, Object... args) { - super(cause, message, args); - } - - protected SqlServerException(Throwable cause) { - super(cause); - } -} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java index a0da67f3006a3..936f4aa23cd57 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/Querier.java @@ -161,6 +161,7 @@ private void searchWithPointInTime(SearchRequest search, ActionListener { String pitId = openPointInTimeResponse.getPointInTimeId(); + search.indicesOptions(SearchRequest.DEFAULT_INDICES_OPTIONS); search.indices(Strings.EMPTY_ARRAY); search.source().pointInTimeBuilder(new PointInTimeBuilder(pitId)); ActionListener closePitOnErrorListener = wrap(searchResponse -> { @@ -201,13 +202,14 @@ public static SearchRequest prepareRequest(SearchSourceBuilder source, SqlConfig source.timeout(cfg.requestTimeout()); SearchRequest searchRequest = new SearchRequest(INTRODUCING_UNSIGNED_LONG); - searchRequest.indices(indices); + if (source.pointInTimeBuilder() == null) { + searchRequest.indices(indices); + searchRequest.indicesOptions( + includeFrozen ? IndexResolver.FIELD_CAPS_FROZEN_INDICES_OPTIONS : IndexResolver.FIELD_CAPS_INDICES_OPTIONS + ); + } searchRequest.source(source); searchRequest.allowPartialSearchResults(cfg.allowPartialSearchResults()); - searchRequest.indicesOptions( - includeFrozen ? IndexResolver.FIELD_CAPS_FROZEN_INDICES_OPTIONS : IndexResolver.FIELD_CAPS_INDICES_OPTIONS - ); - return searchRequest; } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SchemaDelegatingRowSet.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SchemaDelegatingRowSet.java deleted file mode 100644 index f7097b7bebfae..0000000000000 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SchemaDelegatingRowSet.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.sql.execution.search; - -import org.elasticsearch.xpack.ql.type.Schema; -import org.elasticsearch.xpack.sql.session.RowSet; -import org.elasticsearch.xpack.sql.session.SchemaRowSet; - -class SchemaDelegatingRowSet implements SchemaRowSet { - - private final Schema schema; - private final RowSet delegate; - - SchemaDelegatingRowSet(Schema schema, RowSet delegate) { - this.schema = schema; - this.delegate = delegate; - } - - @Override - public Schema schema() { - return schema; - } - - @Override - public boolean hasCurrentRow() { - return delegate.hasCurrentRow(); - } - - @Override - public boolean advanceRow() { - return delegate.advanceRow(); - } - - @Override - public int size() { - return delegate.size(); - } - - @Override - public void reset() { - delegate.reset(); - } - - @Override - public Object column(int index) { - return delegate.column(index); - } -} diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/api_key/10_basic.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/api_key/10_basic.yml index b575ddccb449a..09f08d59049ec 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/api_key/10_basic.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/api_key/10_basic.yml @@ -191,6 +191,7 @@ teardown: Authorization: "Basic YXBpX2tleV91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" # api_key_user security.get_api_key: owner: true + active_only: true - length: { "api_keys" : 1 } - match: { "api_keys.0.username": "api_key_user" } - match: { "api_keys.0.invalidated": false } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/counted_keyword/10_basic.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/counted_keyword/10_basic.yml index d0f7c7636582f..e5ad63fa31153 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/counted_keyword/10_basic.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/counted_keyword/10_basic.yml @@ -29,6 +29,16 @@ setup: - do: indices.refresh: { } +--- +"Counted keyword is searchable by default": + - do: + field_caps: + index: test-events + fields: [ events ] + + - match: { fields.events.counted_keyword.searchable: true } + - match: { fields.events.counted_keyword.aggregatable: true } + --- "Counted Terms agg": @@ -49,3 +59,22 @@ setup: - match: { aggregations.event_terms.buckets.2.key: "c" } - match: { aggregations.event_terms.buckets.2.doc_count: 2 } - length: { aggregations.event_terms.buckets: 3 } + +--- +# Use a probability of 1.0 to ensure a consistent bucket count. +"Sampled Counted Terms agg": + - do: + search: + index: test-events + body: + size: 0 + aggs: + sample: + random_sampler: + probability: 1.0 + aggs: + event_terms: + counted_terms: + field: events + + - length: { aggregations.sample.event_terms.buckets: 3 } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/counted_keyword/20_no_index.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/counted_keyword/20_no_index.yml new file mode 100644 index 0000000000000..1fe48207b5586 --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/counted_keyword/20_no_index.yml @@ -0,0 +1,54 @@ +setup: + + - skip: + version: " - 8.12.99" + reason: "index option on counted_keyword was added in 8.13" + + - do: + indices.create: + index: test-events-no-index + body: + mappings: + properties: + events: + type: counted_keyword + index: false + + - do: + index: + index: test-events-no-index + id: "1" + body: { "events": [ "a", "a", "b" ] } + + + - do: + indices.refresh: { } + +--- +"Counted keyword with index false is not searchable": + - do: + field_caps: + index: test-events-no-index + fields: [ events ] + + - match: { fields.events.counted_keyword.searchable: false } + - match: { fields.events.counted_keyword.aggregatable: true } + +--- +"Counted Terms agg only relies on doc values": +# although the field is not indexed, the counted_terms agg should still work + - do: + search: + index: test-events-no-index + body: + size: 0 + aggs: + event_terms: + counted_terms: + field: events + + - match: { aggregations.event_terms.buckets.0.key: "a" } + - match: { aggregations.event_terms.buckets.0.doc_count: 2 } + - match: { aggregations.event_terms.buckets.1.key: "b" } + - match: { aggregations.event_terms.buckets.1.doc_count: 1 } + - length: { aggregations.event_terms.buckets: 2 } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/100_bug_fix.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/100_bug_fix.yml index e768a6b348959..6cbc9a225588b 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/100_bug_fix.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/100_bug_fix.yml @@ -1,10 +1,8 @@ --- "Coalesce and to_ip functions": - skip: - version: all - reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/102871" - # version: " - 8.11.99" - # reason: "fixes in 8.12 or later" + version: " - 8.11.99" + reason: "fixes in 8.12 or later" features: warnings - do: bulk: @@ -126,3 +124,71 @@ - match: { values.2.0: null } - match: { values.2.1: null } - match: { values.2.2: index1 } + + +--- +"null MappedFieldType on single value detection #103141": + - skip: + version: " - 8.12.99" + reason: "fixes in 8.13 or later" + - do: + indices.create: + index: npe_single_value_1 + body: + mappings: + properties: + field1: + type: long + - do: + indices.create: + index: npe_single_value_2 + body: + mappings: + properties: + field2: + type: long + - do: + indices.create: + index: npe_single_value_3 + body: + mappings: + properties: + field3: + type: long + + - do: + bulk: + refresh: true + body: + - { "index": { "_index": "npe_single_value_1" } } + - { "field1": 10 } + - { "index": { "_index": "npe_single_value_2" } } + - { "field2": 20 } + - { "index": { "_index": "npe_single_value_3" } } + - { "field3": 30 } + - do: + esql.query: + body: + query: 'from npe_single_value* | stats x = avg(field1) | limit 10' + - match: { columns.0.name: x } + - match: { columns.0.type: double } + - length: { values: 1 } + - match: { values.0.0: 10.0 } + + - do: + esql.query: + body: + query: 'from npe_single_value* | stats x = avg(field2) | limit 10' + - match: { columns.0.name: x } + - match: { columns.0.type: double } + - length: { values: 1 } + - match: { values.0.0: 20.0 } + + - do: + esql.query: + body: + query: 'from npe_single_value* | stats x = avg(field3) | limit 10' + - match: { columns.0.name: x } + - match: { columns.0.type: double } + - length: { values: 1 } + - match: { values.0.0: 30.0 } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/40_unsupported_types.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/40_unsupported_types.yml index 181cf52b66c7c..83aa31ed5eb32 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/40_unsupported_types.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/esql/40_unsupported_types.yml @@ -394,7 +394,7 @@ unsupported with sort: --- spatial types unsupported in 8.11: - skip: - version: " - 8.10.99, 8.12.0 - " + version: " - " reason: "Elasticsearch 8.11 did not support any spatial types" - do: diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/change_point_agg.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/change_point_agg.yml index 3dc4fb54dbc13..a54d26057ee5b 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/change_point_agg.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/change_point_agg.yml @@ -48,11 +48,11 @@ setup: {"index":{}} {"cost":250,"time":1587501273000,"kind":"changed"} {"index":{}} - {"cost":580,"time":1587501283000,"kind":"changed"} + {"cost":380,"time":1587501283000,"kind":"changed"} {"index":{}} - {"cost":600,"time":1587501293000,"kind":"changed"} + {"cost":450,"time":1587501293000,"kind":"changed"} {"index":{}} - {"cost":600,"time":1587501303000,"kind":"changed"} + {"cost":550,"time":1587501303000,"kind":"changed"} {"index":{}} {"cost":600,"time":1587501313000,"kind":"changed"} {"index":{}} diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/validate.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/validate.yml index 10de6e2c22d9e..1df34a64f860a 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/validate.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/validate.yml @@ -2,7 +2,7 @@ "Test valid job config": - do: ml.validate: - body: > + body: > { "analysis_config": { "bucket_span": "1h", @@ -21,7 +21,7 @@ - do: catch: /.data_description. failed to parse field .format./ ml.validate: - body: > + body: > { "analysis_config": { "bucket_span": "1h", @@ -38,7 +38,7 @@ "Test valid job config with job ID": - do: ml.validate: - body: > + body: > { "job_id": "validate-job-config-with-job-id", "analysis_config": { @@ -58,7 +58,7 @@ - do: catch: /Invalid job_id; '_' can contain lowercase alphanumeric \(a-z and 0-9\), hyphens or underscores; must start and end with alphanumeric/ ml.validate: - body: > + body: > { "job_id": "_", "analysis_config": { @@ -78,7 +78,7 @@ - do: catch: /illegal_argument_exception/ ml.validate: - body: > + body: > { "model_snapshot_id": "wont-create-with-this-setting", "analysis_config" : { @@ -92,7 +92,7 @@ - do: catch: /The job is configured with fields \[model_snapshot_id\] that are illegal to set at job creation/ ml.validate: - body: > + body: > { "model_snapshot_id": "wont-create-with-this-setting", "analysis_config" : { @@ -109,7 +109,7 @@ - do: catch: /illegal_argument_exception.*Duplicate detectors are not allowed/ ml.validate: - body: > + body: > { "analysis_config": { "bucket_span": "1h", @@ -126,7 +126,7 @@ - do: catch: /illegal_argument_exception.*Duplicate detectors are not allowed/ ml.validate: - body: > + body: > { "analysis_config": { "bucket_span": "1h", diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/profiling/10_basic.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/profiling/10_basic.yml new file mode 100644 index 0000000000000..8bc863e6fca9f --- /dev/null +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/profiling/10_basic.yml @@ -0,0 +1,177 @@ +--- +setup: + - skip: + version: " - 8.12.99" + reason: "Universal Profiling test infrastructure is available in 8.12+" + + - do: + cluster.put_settings: + body: + persistent: + xpack.profiling.templates.enabled: true + + - do: + profiling.status: + wait_for_resources_created: true + + - do: + bulk: + refresh: true + body: + - {"create": {"_index": "profiling-events-all"}} + - {"Stacktrace.count": [1], "profiling.project.id": ["100"], "os.kernel": ["9.9.9-0"], "tags": ["environment:qa", "region:eu-west-1"], "host.ip": ["192.168.1.2"], "@timestamp": ["1700504427"], "container.name": ["instance-0000000010"], "ecs.version": ["1.12.0"], "Stacktrace.id": ["S07KmaoGhvNte78xwwRbZQ"], "agent.version": ["head-be593ef3-1688111067"], "host.name": ["ip-192-168-1-2"], "host.id": ["8457605156473051743"], "process.thread.name": ["497295213074376"]} + - {"create": {"_index": "profiling-stacktraces", "_id": "S07KmaoGhvNte78xwwRbZQ"}} + - {"Stacktrace": {"frame": {"ids": "634wiWh6F21tPpXr0Zz3mgAAAAAAEfFi8NlMClggx8jaziUTJXlmWAAAAAAAAIYIZSkKN3zNxr0HYuO2pqe5hQAAAAAAwcBwZSkKN3zNxr0HYuO2pqe5hQAAAAAA5ECvZSkKN3zNxr0HYuO2pqe5hQAAAAAA4_9_ZSkKN3zNxr0HYuO2pqe5hQAAAAAAj7b5ZSkKN3zNxr0HYuO2pqe5hQAAAAAAgwXKZSkKN3zNxr0HYuO2pqe5hQAAAAAAgu3UAAAAAAAAV4sAAAAAAAAAHezOBBlhpr8qZgY89pr05YIxi0DTL7hyTAAAAAAAAAALzZZ6VCjFYAFVAKtY0XlyPwAAAAAAAAAFySPx-89oJ6TfXYn-uir7mQAAAAAAAABch4dwrMYlRFRjyfsvjXt4tgAAAAAAAAAg3V-8FLy1GH8nVRceMDeaiwAAAAAAAAABnVB2vvQdnm3M5BpEt6xnFAAAAAAAAAAV4j8yS0qsC_6XfTfMdPp5KQAAAAAAAAAQ9oBnE4xnAvOiOv1q-LbApgAAAAAAAAAEwRQstrBYZ0ShmJnYV-ADrQAAAAAAAAFLAFikCbtP_Dm7iUthjnlnEgAAAAAAAAEq56q5trA0bAF1B-Um6L_rqwAAAAAAAAAGgi_774C-EJhuJfyXXhzVgwAAAAAAAABEgvYbo0YBmE65VwrpTWYalQAAAAAAAAB2tMqbgEmfZJ47YRogSA-gKgAAAAAAAADlCQUIxcdtvT35ZznMVnzc_AAAAAAAAACXN4c5sJszjyVzcx3AmWN8pwAAAAAAAADS_GFFImAT2VE6Ar5VgmaN7QAAAAAAAAHywnSBrxGSumHiAQQABJeNtQAAAAAAAAAkPK6VPfk6aJqBe-5Qji8O5gAAAAAAAAAFEIxfgHbDbI5dElFzd3Ha-QAAAAAAAAAZFq10nEfKWtXEt510UwEUUAAAAAAAAAB7V_QMdmt4RxKxn4ZNgdvkJwAAAAAAAAAReNITicG0MvFr9HQHk70FLAAAAAAAAAAI9j0yGbd8eQNwdRhHZ159OQAAAAAAAAA9vzzPIR5tUnMkJ7d_ITdQRgAAAAAAAAAC6YIeLAztuVSewvuGh8XKXgAAAAAAAAAFIQvpHpp20NHD-0mZNf95oAAAAAAAAABp0vAOoRRxsQcS4vDapC3-mwAAAAAAAAANqnvWBP24iZLcQ-Wi76ZDxQAAAAAAAAAI3X9PCd1tVPhzrMiwigfodgAAAAAAAAAAZSkKN3zNxr0HYuO2pqe5hQAAAAAA52Uf8NlMClggx8jaziUTJXlmWAAAAAAAAQEslHp5_WAgpLy2alrUVab6HAAAAAAAwACLlHp5_WAgpLy2alrUVab6HAAAAAAAAEIGlHp5_WAgpLy2alrUVab6HAAAAAAAFFQelHp5_WAgpLy2alrUVab6HAAAAAAAFErelHp5_WAgpLy2alrUVab6HAAAAAAAFBtp", "types": "CAMfBQIDBQQ"}}, "ecs": {"version": "1.12.0"}} + - {"create": {"_index": "profiling-stackframes", "_id": "8NlMClggx8jaziUTJXlmWAAAAAAAAIYI"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["/build/glibc-sMfBJT/glibc-2.31/nptl/pthread_create.c"], "Stackframe.function.name": ["start_thread"], "Stackframe.line.number": [477]} + - { "create": { "_index": "profiling-stackframes", "_id": "AAAAAAAAV4sAAAAAAAAAHezOBBlhpr8q" } } + - { "ecs.version": "1.12.0", "Stackframe.file.name": [ "" ], "Stackframe.function.name": [ "StubRoutines (1)" ], "Stackframe.line.number": [ 0 ], "Stackframe.function.offset": [ 0 ] } + - { "create": { "_index": "profiling-stackframes", "_id": "ZgY89pr05YIxi0DTL7hyTAAAAAAAAAAL" } } + - { "ecs.version": "1.12.0", "Stackframe.file.name": [ "Thread.java" ], "Stackframe.function.name": [ "void java.lang.Thread.run()" ], "Stackframe.line.number": [ 833 ], "Stackframe.function.offset": [ 1 ] } + - {"create": {"_index": "profiling-stackframes", "_id": "zZZ6VCjFYAFVAKtY0XlyPwAAAAAAAAAF"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["ThreadPoolExecutor.java"], "Stackframe.function.name": ["void java.util.concurrent.ThreadPoolExecutor$Worker.run()"], "Stackframe.line.number": [635], "Stackframe.function.offset": [0]} + - {"create": {"_index": "profiling-stackframes", "_id": "ySPx-89oJ6TfXYn-uir7mQAAAAAAAABc"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["ThreadPoolExecutor.java"], "Stackframe.function.name": ["void java.util.concurrent.ThreadPoolExecutor.runWorker(java.util.concurrent.ThreadPoolExecutor$Worker)"], "Stackframe.line.number": [1136], "Stackframe.function.offset": [20]} + - {"create": {"_index": "profiling-stackframes", "_id": "h4dwrMYlRFRjyfsvjXt4tgAAAAAAAAAg"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["PrioritizedEsThreadPoolExecutor.java"], "Stackframe.function.name": ["void org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.run()"], "Stackframe.line.number": [225], "Stackframe.function.offset": [6]} + - {"create": {"_index": "profiling-stackframes", "_id": "3V-8FLy1GH8nVRceMDeaiwAAAAAAAAAB"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["PrioritizedEsThreadPoolExecutor.java"], "Stackframe.function.name": ["void org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor$TieBreakingPrioritizedRunnable.runAndClean(java.lang.Runnable)"], "Stackframe.line.number": [262], "Stackframe.function.offset": [0]} + - {"create": {"_index": "profiling-stackframes", "_id": "nVB2vvQdnm3M5BpEt6xnFAAAAAAAAAAV"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["ThreadContext.java"], "Stackframe.function.name": ["void org.elasticsearch.common.util.concurrent.ThreadContext$ContextPreservingRunnable.run()"], "Stackframe.line.number": [718], "Stackframe.function.offset": [2]} + - {"create": {"_index": "profiling-stackframes", "_id": "4j8yS0qsC_6XfTfMdPp5KQAAAAAAAAAQ"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["ClusterApplierService.java"], "Stackframe.function.name": ["void org.elasticsearch.cluster.service.ClusterApplierService$UpdateTask.run()"], "Stackframe.line.number": [154], "Stackframe.function.offset": [0]} + - {"create": {"_index": "profiling-stackframes", "_id": "9oBnE4xnAvOiOv1q-LbApgAAAAAAAAAE"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["ClusterApplierService.java"], "Stackframe.function.name": ["void org.elasticsearch.cluster.service.ClusterApplierService.access$000(org.elasticsearch.cluster.service.ClusterApplierService, java.lang.String, java.util.function.Function, org.elasticsearch.action.ActionListener)"], "Stackframe.line.number": [56], "Stackframe.function.offset": [0]} + - {"create": {"_index": "profiling-stackframes", "_id": "wRQstrBYZ0ShmJnYV-ADrQAAAAAAAAFL"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["ClusterApplierService.java"], "Stackframe.function.name": ["void org.elasticsearch.cluster.service.ClusterApplierService.runTask(java.lang.String, java.util.function.Function, org.elasticsearch.action.ActionListener)"], "Stackframe.line.number": [428], "Stackframe.function.offset": [44]} + - {"create": {"_index": "profiling-stackframes", "_id": "AFikCbtP_Dm7iUthjnlnEgAAAAAAAAEq"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["ClusterApplierService.java"], "Stackframe.function.name": ["void org.elasticsearch.cluster.service.ClusterApplierService.applyChanges(org.elasticsearch.cluster.ClusterState, org.elasticsearch.cluster.ClusterState, java.lang.String, org.elasticsearch.cluster.service.ClusterApplierRecordingService$Recorder)"], "Stackframe.line.number": [503], "Stackframe.function.offset": [25]} + - {"create": {"_index": "profiling-stackframes", "_id": "56q5trA0bAF1B-Um6L_rqwAAAAAAAAAG"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["ClusterApplierService.java"], "Stackframe.function.name": ["void org.elasticsearch.cluster.service.ClusterApplierService.callClusterStateAppliers(org.elasticsearch.cluster.ClusterChangedEvent, org.elasticsearch.cluster.service.ClusterApplierRecordingService$Recorder)"], "Stackframe.line.number": [539], "Stackframe.function.offset": [0]} + - {"create": {"_index": "profiling-stackframes", "_id": "gi_774C-EJhuJfyXXhzVgwAAAAAAAABE"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["ClusterApplierService.java"], "Stackframe.function.name": ["void org.elasticsearch.cluster.service.ClusterApplierService.callClusterStateAppliers(org.elasticsearch.cluster.ClusterChangedEvent, org.elasticsearch.cluster.service.ClusterApplierRecordingService$Recorder, java.util.Collection)"], "Stackframe.line.number": [553], "Stackframe.function.offset": [4]} + - {"create": {"_index": "profiling-stackframes", "_id": "gvYbo0YBmE65VwrpTWYalQAAAAAAAAB2"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["IndicesClusterStateService.java"], "Stackframe.function.name": ["void org.elasticsearch.indices.cluster.IndicesClusterStateService.applyClusterState(org.elasticsearch.cluster.ClusterChangedEvent)"], "Stackframe.line.number": [231], "Stackframe.function.offset": [31]} + - {"create": {"_index": "profiling-stackframes", "_id": "tMqbgEmfZJ47YRogSA-gKgAAAAAAAADl"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["IndicesClusterStateService.java"], "Stackframe.function.name": ["void org.elasticsearch.indices.cluster.IndicesClusterStateService.createOrUpdateShards(org.elasticsearch.cluster.ClusterState)"], "Stackframe.line.number": [556], "Stackframe.function.offset": [18]} + - {"create": {"_index": "profiling-stackframes", "_id": "CQUIxcdtvT35ZznMVnzc_AAAAAAAAACX"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["IndicesClusterStateService.java"], "Stackframe.function.name": ["void org.elasticsearch.indices.cluster.IndicesClusterStateService.updateShard(org.elasticsearch.cluster.node.DiscoveryNodes, org.elasticsearch.cluster.routing.ShardRouting, org.elasticsearch.indices.cluster.IndicesClusterStateService$Shard, org.elasticsearch.cluster.routing.RoutingTable, org.elasticsearch.cluster.ClusterState)"], "Stackframe.line.number": [614], "Stackframe.function.offset": [14]} + - {"create": {"_index": "profiling-stackframes", "_id": "N4c5sJszjyVzcx3AmWN8pwAAAAAAAADS"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["IndexShard.java"], "Stackframe.function.name": ["void org.elasticsearch.index.shard.IndexShard.updateShardState(org.elasticsearch.cluster.routing.ShardRouting, long, java.util.function.BiConsumer, long, java.util.Set, org.elasticsearch.cluster.routing.IndexShardRoutingTable)"], "Stackframe.line.number": [535], "Stackframe.function.offset": [24]} + - {"create": {"_index": "profiling-stackframes", "_id": "_GFFImAT2VE6Ar5VgmaN7QAAAAAAAAHy"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["ReplicationTracker.java"], "Stackframe.function.name": ["void org.elasticsearch.index.seqno.ReplicationTracker.updateFromMaster(long, java.util.Set, org.elasticsearch.cluster.routing.IndexShardRoutingTable)"], "Stackframe.line.number": [1198], "Stackframe.function.offset": [47]} + - {"create": {"_index": "profiling-stackframes", "_id": "wnSBrxGSumHiAQQABJeNtQAAAAAAAAAk"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["ReplicationTracker.java"], "Stackframe.function.name": ["void org.elasticsearch.index.seqno.ReplicationTracker.updateReplicationGroupAndNotify()"], "Stackframe.line.number": [994], "Stackframe.function.offset": [3]} + - {"create": {"_index": "profiling-stackframes", "_id": "PK6VPfk6aJqBe-5Qji8O5gAAAAAAAAAF"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["PendingReplicationActions.java"], "Stackframe.function.name": ["void org.elasticsearch.action.support.replication.PendingReplicationActions.accept(java.lang.Object)"], "Stackframe.line.number": [25], "Stackframe.function.offset": [0]} + - {"create": {"_index": "profiling-stackframes", "_id": "EIxfgHbDbI5dElFzd3Ha-QAAAAAAAAAZ"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["PendingReplicationActions.java"], "Stackframe.function.name": ["void org.elasticsearch.action.support.replication.PendingReplicationActions.accept(org.elasticsearch.index.shard.ReplicationGroup)"], "Stackframe.line.number": [71], "Stackframe.function.offset": [3]} + - {"create": {"_index": "profiling-stackframes", "_id": "Fq10nEfKWtXEt510UwEUUAAAAAAAAAB7"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["PendingReplicationActions.java"], "Stackframe.function.name": ["void org.elasticsearch.action.support.replication.PendingReplicationActions.acceptNewTrackedAllocationIds(java.util.Set)"], "Stackframe.line.number": [95], "Stackframe.function.offset": [10]} + - {"create": {"_index": "profiling-stackframes", "_id": "V_QMdmt4RxKxn4ZNgdvkJwAAAAAAAAAR"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["PendingReplicationActions.java"], "Stackframe.function.name": ["void org.elasticsearch.action.support.replication.PendingReplicationActions.cancelActions(java.util.ArrayList, java.lang.String)"], "Stackframe.line.number": [108], "Stackframe.function.offset": [1]} + - {"create": {"_index": "profiling-stackframes", "_id": "eNITicG0MvFr9HQHk70FLAAAAAAAAAAI"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["EsThreadPoolExecutor.java"], "Stackframe.function.name": ["void org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor.execute(java.lang.Runnable)"], "Stackframe.line.number": [95], "Stackframe.function.offset": [2]} + - {"create": {"_index": "profiling-stackframes", "_id": "9j0yGbd8eQNwdRhHZ159OQAAAAAAAAA9"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["ThreadPoolExecutor.java"], "Stackframe.function.name": ["void java.util.concurrent.ThreadPoolExecutor.execute(java.lang.Runnable)"], "Stackframe.line.number": [1357], "Stackframe.function.offset": [28]} + - {"create": {"_index": "profiling-stackframes", "_id": "vzzPIR5tUnMkJ7d_ITdQRgAAAAAAAAAC"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["EsExecutors.java"], "Stackframe.function.name": ["boolean org.elasticsearch.common.util.concurrent.EsExecutors$ExecutorScalingQueue.offer(java.lang.Object)"], "Stackframe.line.number": [363], "Stackframe.function.offset": [0]} + - {"create": {"_index": "profiling-stackframes", "_id": "6YIeLAztuVSewvuGh8XKXgAAAAAAAAAF"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["LinkedTransferQueue.java"], "Stackframe.function.name": ["boolean java.util.concurrent.LinkedTransferQueue.tryTransfer(java.lang.Object)"], "Stackframe.line.number": [1241], "Stackframe.function.offset": [0]} + - {"create": {"_index": "profiling-stackframes", "_id": "IQvpHpp20NHD-0mZNf95oAAAAAAAAABp"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["LinkedTransferQueue.java"], "Stackframe.function.name": ["java.lang.Object java.util.concurrent.LinkedTransferQueue.xfer(java.lang.Object, boolean, int, long)"], "Stackframe.line.number": [605], "Stackframe.function.offset": [10]} + - { "create": { "_index": "profiling-stackframes", "_id": "0vAOoRRxsQcS4vDapC3-mwAAAAAAAAAN" } } + - { "ecs.version": "1.12.0", "Stackframe.file.name": [ "LinkedTransferQueue.java" ], "Stackframe.function.name": [ "boolean java.util.concurrent.LinkedTransferQueue$Node.tryMatch(java.lang.Object, java.lang.Object)" ], "Stackframe.line.number": [ 448 ], "Stackframe.function.offset": [ 1 ] } + - {"create": {"_index": "profiling-stackframes", "_id": "qnvWBP24iZLcQ-Wi76ZDxQAAAAAAAAAI"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["LockSupport.java"], "Stackframe.function.name": ["void java.util.concurrent.locks.LockSupport.unpark(java.lang.Thread)"], "Stackframe.line.number": [177], "Stackframe.function.offset": [1]} + - {"create": {"_index": "profiling-stackframes", "_id": "3X9PCd1tVPhzrMiwigfodgAAAAAAAAAA"}} + - {"ecs.version": "1.12.0", "Stackframe.file.name": ["Unsafe.java"], "Stackframe.function.name": ["void jdk.internal.misc.Unsafe.unpark(java.lang.Object)"], "Stackframe.line.number": [0], "Stackframe.function.offset": [0]} + - {"create": {"_index": "profiling-stackframes", "_id": "lHp5_WAgpLy2alrUVab6HAAAAAAAwACL"}} + - {"ecs.version": "1.12.0", "Stackframe.function.name": ["entry_SYSCALL_64_after_hwframe"]} + - {"create": {"_index": "profiling-stackframes", "_id": "lHp5_WAgpLy2alrUVab6HAAAAAAAAEIG"}} + - {"ecs.version": "1.12.0", "Stackframe.function.name": ["do_syscall_64"]} + - {"create": {"_index": "profiling-stackframes", "_id": "lHp5_WAgpLy2alrUVab6HAAAAAAAFFQe"}} + - {"ecs.version": "1.12.0", "Stackframe.function.name": ["__x64_sys_futex"]} + - {"create": {"_index": "profiling-stackframes", "_id": "lHp5_WAgpLy2alrUVab6HAAAAAAAFEre"}} + - {"ecs.version": "1.12.0", "Stackframe.function.name": ["do_futex"]} + - {"create": {"_index": "profiling-stackframes", "_id": "lHp5_WAgpLy2alrUVab6HAAAAAAAFBtp"}} + - {"ecs.version": "1.12.0", "Stackframe.function.name": ["futex_wake"]} + - { "create": { "_index": "profiling-executables", "_id": "634wiWh6F21tPpXr0Zz3mg" } } + - { "@timestamp": "1698019200", "Executable": { "build": { "id": "9fdb74e7b217d06c93172a8243f8547f947ee6d1" }, "file": { "name": "libc-2.31.so" } }, "Symbolization": { "next_time": "4851892087" }, "ecs": { "version": "1.12.0" } } + - {"create": {"_index": "profiling-executables", "_id": "8NlMClggx8jaziUTJXlmWA"}} + - {"@timestamp": "1698019200", "Executable": {"build": {"id": "f0983025f0e0f327a6da752ff4ffa675e0be393f"}, "file": {"name": "libpthread-2.31.so"}}, "Symbolization": {"next_time": "4851892090"}, "ecs": {"version": "1.12.0"}} + - {"create": {"_index": "profiling-executables", "_id": "lHp5_WAgpLy2alrUVab6HA"}} + - {"@timestamp": "1698624000", "Executable": {"build": {"id": "c5f89ea1c68710d2a493bb604c343a92c4f8ddeb"}, "file": {"name": "vmlinux"}}, "Symbolization": {"next_time": "4852491791"}, "ecs": {"version": "1.12.0"}} + - {"create": {"_index": "profiling-hosts", "_id": "eLH27YsBj2lLi3tJYlvr"}} + - {"profiling.project.id": 100, "host.id": "8457605156473051743", "@timestamp": 1700504426, "ecs.version": "1.12.0", "profiling.agent.build_timestamp": 1688111067, "profiling.instance.private_ipv4s": ["192.168.1.2"], "ec2.instance_life_cycle": "on-demand", "profiling.agent.config.map_scale_factor": 0, "ec2.instance_type": "i3.2xlarge", "profiling.host.ip": "192.168.1.2", "profiling.agent.config.bpf_log_level": 0, "profiling.host.sysctl.net.core.bpf_jit_enable": 1, "profiling.agent.config.file": "/etc/prodfiler/prodfiler.conf", "ec2.local_ipv4": "192.168.1.2", "profiling.agent.config.no_kernel_version_check": false, "profiling.host.machine": "x86_64", "profiling.host.tags": ["cloud_provider:aws", "cloud_environment:qa", "cloud_region:eu-west-1"], "profiling.agent.config.probabilistic_threshold": 100, "profiling.agent.config.disable_tls": false, "profiling.agent.config.tracers": "all", "profiling.agent.start_time": 1700090045589, "profiling.agent.config.max_elements_per_interval": 800, "ec2.placement.region": "eu-west-1", "profiling.agent.config.present_cpu_cores": 8, "profiling.host.kernel_version": "9.9.9-0-aws", "profiling.agent.config.bpf_log_size": 65536, "profiling.agent.config.known_traces_entries": 65536, "profiling.host.sysctl.kernel.unprivileged_bpf_disabled": 1, "profiling.agent.config.verbose": false, "profiling.agent.config.probabilistic_interval": "1m0s", "ec2.placement.availability_zone_id": "euw1-az1", "ec2.security_groups": "", "ec2.local_hostname": "ip-192-168-1-2.eu-west-1.compute.internal", "ec2.placement.availability_zone": "eu-west-1c", "profiling.agent.config.upload_symbols": false, "profiling.host.sysctl.kernel.bpf_stats_enabled": 0, "profiling.host.name": "ip-192-168-1-2", "ec2.mac": "00:11:22:33:44:55", "profiling.host.kernel_proc_version": "Linux version 9.9.9-0-aws", "profiling.agent.config.cache_directory": "/var/cache/optimyze/", "profiling.agent.version": "v8.12.0", "ec2.hostname": "ip-192-168-1-2.eu-west-1.compute.internal", "profiling.agent.config.elastic_mode": false, "ec2.ami_id": "ami-aaaaaaaaaaa", "ec2.instance_id": "i-0b999999999999999" } + +--- +"Test Status": + - do: + profiling.status: {} + + - match: {profiling.enabled: true} + - match: {resource_management.enabled: true} + - match: {resources.created: true} + - match: {resources.pre_8_9_1_data: false} + - match: {resources.has_data: true} + +--- +"Test get stacktraces": + - do: + profiling.stacktraces: + body: > + { + "sample_size": 20000, + "requested_duration": 86400, + "query": { + "bool": { + "filter": [ + { + "range": { + "@timestamp": { + "gte": "2023-11-20", + "lt": "2023-11-21", + "format": "yyyy-MM-dd" + } + } + } + ] + } + } + } + - match: { stack_traces.S07KmaoGhvNte78xwwRbZQ.count: 1} + +--- +"Test flamegraph": + - do: + profiling.flamegraph: + body: > + { + "sample_size": 20000, + "requested_duration": 86400, + "query": { + "bool": { + "filter": [ + { + "range": { + "@timestamp": { + "gte": "2023-11-20", + "lt": "2023-11-21", + "format": "yyyy-MM-dd" + } + } + } + ] + } + } + } + - match: { Size: 47} + +--- +teardown: + - do: + cluster.put_settings: + body: + persistent: + xpack.profiling.templates.enabled: false diff --git a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/NdJsonTextStructureFinder.java b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/NdJsonTextStructureFinder.java index 2b8f3c678e9d5..e43b2cfdc96d3 100644 --- a/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/NdJsonTextStructureFinder.java +++ b/x-pack/plugin/text-structure/src/main/java/org/elasticsearch/xpack/textstructure/structurefinder/NdJsonTextStructureFinder.java @@ -44,9 +44,10 @@ static NdJsonTextStructureFinder makeNdJsonTextStructureFinder( List sampleMessages = Arrays.asList(sample.split("\n")); for (String sampleMessage : sampleMessages) { - XContentParser parser = jsonXContent.createParser(XContentParserConfiguration.EMPTY, sampleMessage); - sampleRecords.add(parser.mapOrdered()); - timeoutChecker.check("NDJSON parsing"); + try (XContentParser parser = jsonXContent.createParser(XContentParserConfiguration.EMPTY, sampleMessage)) { + sampleRecords.add(parser.mapOrdered()); + timeoutChecker.check("NDJSON parsing"); + } } TextStructure.Builder structureBuilder = new TextStructure.Builder(TextStructure.Format.NDJSON).setCharset(charsetName) diff --git a/x-pack/plugin/transform/qa/multi-cluster-tests-with-security/build.gradle b/x-pack/plugin/transform/qa/multi-cluster-tests-with-security/build.gradle index ef34db62e5e03..8f129789d46b7 100644 --- a/x-pack/plugin/transform/qa/multi-cluster-tests-with-security/build.gradle +++ b/x-pack/plugin/transform/qa/multi-cluster-tests-with-security/build.gradle @@ -2,6 +2,7 @@ import org.elasticsearch.gradle.internal.test.RestIntegTestTask import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.VersionProperties import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE +import org.elasticsearch.gradle.internal.info.BuildParams apply plugin: 'elasticsearch.internal-testclusters' apply plugin: 'elasticsearch.standalone-rest-test' @@ -53,16 +54,29 @@ testClusters.register('mixed-cluster') { tasks.register('remote-cluster', RestIntegTestTask) { mustRunAfter("precommit") systemProperty 'tests.rest.suite', 'remote_cluster' + maybeDisableForFips(it) } tasks.register('mixed-cluster', RestIntegTestTask) { dependsOn 'remote-cluster' useCluster remoteCluster systemProperty 'tests.rest.suite', 'multi_cluster' + maybeDisableForFips(it) } tasks.register("integTest") { dependsOn 'mixed-cluster' + maybeDisableForFips(it) } tasks.named("check").configure { dependsOn("integTest") } + +//TODO: remove with version 8.14. A new FIPS setting was added in 8.13. Since FIPS configures all test clusters and this specific integTest uses +// the previous minor version, that setting is not available when running in FIPS until 8.14. +def maybeDisableForFips(task) { + if (BuildParams.inFipsJvm) { + if(Version.fromString(project.version).before(Version.fromString('8.14.0'))) { + task.enabled = false + } + } +} diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/DefaultTransformExtension.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/DefaultTransformExtension.java index 9cccbade339dc..ea9260f555905 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/DefaultTransformExtension.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/DefaultTransformExtension.java @@ -9,9 +9,12 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; public class DefaultTransformExtension implements TransformExtension { + private static final TimeValue MIN_FREQUENCY = TimeValue.timeValueSeconds(1); + @Override public boolean includeNodeInfo() { return true; @@ -33,4 +36,9 @@ public Settings getTransformDestinationIndexSettings() { .put(IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS, "0-1") .build(); } + + @Override + public TimeValue getMinFrequency() { + return MIN_FREQUENCY; + } } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java index 61cc0e2c072ad..98c95c5a9803a 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java @@ -243,7 +243,12 @@ public Collection createComponents(PluginServices services) { configManager, auditor ); - TransformScheduler scheduler = new TransformScheduler(clock, services.threadPool(), settings); + TransformScheduler scheduler = new TransformScheduler( + clock, + services.threadPool(), + settings, + getTransformExtension().getMinFrequency() + ); scheduler.start(); transformServices.set(new TransformServices(configManager, checkpointService, auditor, scheduler)); diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/TransformExtension.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/TransformExtension.java index c919f4dd4c550..4794f3c86f259 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/TransformExtension.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/TransformExtension.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.transform; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; public interface TransformExtension { @@ -20,4 +21,10 @@ public interface TransformExtension { * source settings. */ Settings getTransformDestinationIndexSettings(); + + // TODO(jkuipers): remove this default implementation after the ServerlessTransformPlugin + // in the elasticsearch-serverless project is updated. + default TimeValue getMinFrequency() { + return TimeValue.timeValueSeconds(1); + } } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java index 29be02b87cbdf..1b8d14c6cdc2f 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexer.java @@ -519,16 +519,23 @@ private void injectPointInTimeIfNeeded( void doSearch(Tuple namedSearchRequest, ActionListener listener) { String name = namedSearchRequest.v1(); - SearchRequest searchRequest = namedSearchRequest.v2(); + SearchRequest originalRequest = namedSearchRequest.v2(); // We want to treat a request to search 0 indices as a request to do nothing, not a request to search all indices - if (searchRequest.indices().length == 0) { - logger.debug("[{}] Search request [{}] optimized to noop; searchRequest [{}]", getJobId(), name, searchRequest); + if (originalRequest.indices().length == 0) { + logger.debug("[{}] Search request [{}] optimized to noop; searchRequest [{}]", getJobId(), name, originalRequest); listener.onResponse(null); return; } - logger.trace("searchRequest: [{}]", searchRequest); - PointInTimeBuilder pit = searchRequest.pointInTimeBuilder(); + final SearchRequest searchRequest; + PointInTimeBuilder pit = originalRequest.pointInTimeBuilder(); + if (pit != null) { + // remove the indices from the request, they will be derived from the provided pit + searchRequest = new SearchRequest(originalRequest).indices(new String[0]).indicesOptions(SearchRequest.DEFAULT_INDICES_OPTIONS); + } else { + searchRequest = originalRequest; + } + logger.trace("searchRequest: [{}]", searchRequest); ClientHelper.executeWithHeadersAsync( transformConfig.getHeaders(), @@ -555,13 +562,13 @@ void doSearch(Tuple namedSearchRequest, ActionListener namedSearchRequest, ActionListener new TransformScheduledTask( task.getTransformId(), - task.getFrequency(), + getFrequency(task.getFrequency()), task.getLastTriggeredTimeMillis(), failureCount, task.getListener() @@ -245,7 +249,7 @@ public void scheduleNow(String transformId) { transformId, task -> new TransformScheduledTask( task.getTransformId(), - task.getFrequency(), + getFrequency(task.getFrequency()), task.getLastTriggeredTimeMillis(), task.getFailureCount(), currentTimeMillis, // we schedule this task at current clock time so that it is processed ASAP @@ -273,4 +277,11 @@ public void deregisterTransform(String transformId) { List getTransformScheduledTasks() { return scheduledTasks.listScheduledTasks(); } + + private TimeValue getFrequency(TimeValue frequency) { + if (frequency == null) { + frequency = Transform.DEFAULT_TRANSFORM_FREQUENCY; + } + return frequency.compareTo(minFrequency) >= 0 ? frequency : minFrequency; + } } diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/TimeBasedCheckpointProviderTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/TimeBasedCheckpointProviderTests.java index bed646b9ddeb2..0a1179e4224aa 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/TimeBasedCheckpointProviderTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/checkpoint/TimeBasedCheckpointProviderTests.java @@ -13,7 +13,6 @@ import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchResponseSections; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.client.internal.Client; @@ -339,15 +338,13 @@ public SingleGroupSource get() { private static SearchResponse newSearchResponse(long totalHits) { return new SearchResponse( - new SearchResponseSections( - new SearchHits(SearchHits.EMPTY, new TotalHits(totalHits, TotalHits.Relation.EQUAL_TO), 0), - null, - null, - false, - false, - null, - 0 - ), + new SearchHits(SearchHits.EMPTY, new TotalHits(totalHits, TotalHits.Relation.EQUAL_TO), 0), + null, + null, + false, + false, + null, + 0, null, 1, 1, diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java index 06de37af346d2..b1c9edc0fab0a 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/ClientTransformIndexerTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.action.support.ActionTestUtils; import org.elasticsearch.client.internal.ParentTaskAssigningClient; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.search.SearchContextMissingException; @@ -30,7 +31,6 @@ import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.builder.PointInTimeBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.search.profile.SearchProfileResults; import org.elasticsearch.search.suggest.Suggest; @@ -59,7 +59,6 @@ import java.time.Clock; import java.time.Instant; -import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.concurrent.CountDownLatch; @@ -135,7 +134,7 @@ public void testPitInjection() throws InterruptedException { mock(IndexBasedTransformConfigManager.class), mock(TransformCheckpointService.class), mock(TransformAuditor.class), - new TransformScheduler(Clock.systemUTC(), mock(ThreadPool.class), Settings.EMPTY) + new TransformScheduler(Clock.systemUTC(), mock(ThreadPool.class), Settings.EMPTY, TimeValue.ZERO) ), mock(CheckpointProvider.class), new AtomicReference<>(IndexerState.STOPPED), @@ -229,7 +228,7 @@ public void testPitInjectionIfPitNotSupported() throws InterruptedException { mock(IndexBasedTransformConfigManager.class), mock(TransformCheckpointService.class), mock(TransformAuditor.class), - new TransformScheduler(Clock.systemUTC(), mock(ThreadPool.class), Settings.EMPTY) + new TransformScheduler(Clock.systemUTC(), mock(ThreadPool.class), Settings.EMPTY, TimeValue.ZERO) ), mock(CheckpointProvider.class), new AtomicReference<>(IndexerState.STOPPED), @@ -306,7 +305,7 @@ public void testDisablePit() throws InterruptedException { mock(IndexBasedTransformConfigManager.class), mock(TransformCheckpointService.class), mock(TransformAuditor.class), - new TransformScheduler(Clock.systemUTC(), mock(ThreadPool.class), Settings.EMPTY) + new TransformScheduler(Clock.systemUTC(), mock(ThreadPool.class), Settings.EMPTY, TimeValue.ZERO) ), mock(CheckpointProvider.class), new AtomicReference<>(IndexerState.STOPPED), @@ -370,7 +369,7 @@ public void testDisablePitWhenThereIsRemoteIndexInSource() throws InterruptedExc mock(IndexBasedTransformConfigManager.class), mock(TransformCheckpointService.class), mock(TransformAuditor.class), - new TransformScheduler(Clock.systemUTC(), mock(ThreadPool.class), Settings.EMPTY) + new TransformScheduler(Clock.systemUTC(), mock(ThreadPool.class), Settings.EMPTY, TimeValue.ZERO) ), mock(CheckpointProvider.class), new AtomicReference<>(IndexerState.STOPPED), @@ -420,8 +419,9 @@ public void testHandlePitIndexNotFound() throws InterruptedException { try (var threadPool = createThreadPool()) { final var client = new PitMockClient(threadPool, true); ClientTransformIndexer indexer = createTestIndexer(new ParentTaskAssigningClient(client, new TaskId("dummy-node:123456"))); - SearchRequest searchRequest = new SearchRequest("deleted-index"); - searchRequest.source().pointInTimeBuilder(new PointInTimeBuilder("the_pit_id")); + SearchRequest searchRequest = new SearchRequest("deleted-index").source( + new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder("the_pit_id_on_deleted_index")) + ); Tuple namedSearchRequest = new Tuple<>("test-handle-pit-index-not-found", searchRequest); this.assertAsync(listener -> indexer.doSearch(namedSearchRequest, listener), response -> { // if the pit got deleted, we know it retried @@ -433,8 +433,9 @@ public void testHandlePitIndexNotFound() throws InterruptedException { try (var threadPool = createThreadPool()) { final var client = new PitMockClient(threadPool, true); ClientTransformIndexer indexer = createTestIndexer(new ParentTaskAssigningClient(client, new TaskId("dummy-node:123456"))); - SearchRequest searchRequest = new SearchRequest("essential-deleted-index"); - searchRequest.source().pointInTimeBuilder(new PointInTimeBuilder("the_pit_id")); + SearchRequest searchRequest = new SearchRequest("essential-deleted-index").source( + new SearchSourceBuilder().pointInTimeBuilder(new PointInTimeBuilder("the_pit_id_essential-deleted-index")) + ); Tuple namedSearchRequest = new Tuple<>("test-handle-pit-index-not-found", searchRequest); indexer.doSearch(namedSearchRequest, ActionListener.wrap(r -> fail("expected a failure, got response"), e -> { assertTrue(e instanceof IndexNotFoundException); @@ -521,14 +522,16 @@ protected void listener.onResponse((Response) response); return; } else if (request instanceof SearchRequest searchRequest) { - // if pit is used and deleted-index is given throw index not found - if (searchRequest.pointInTimeBuilder() != null && Arrays.binarySearch(searchRequest.indices(), "deleted-index") >= 0) { + if (searchRequest.pointInTimeBuilder() != null + && searchRequest.pointInTimeBuilder().getEncodedId().equals("the_pit_id_on_deleted_index")) { listener.onFailure(new IndexNotFoundException("deleted-index")); return; } - if (Arrays.binarySearch(searchRequest.indices(), "essential-deleted-index") >= 0) { + if ((searchRequest.pointInTimeBuilder() != null + && searchRequest.pointInTimeBuilder().getEncodedId().equals("the_pit_id_essential-deleted-index")) + || (searchRequest.indices().length > 0 && searchRequest.indices()[0].equals("essential-deleted-index"))) { listener.onFailure(new IndexNotFoundException("essential-deleted-index")); return; } @@ -539,16 +542,14 @@ protected void listener.onFailure(new SearchContextMissingException(new ShardSearchContextId("sc_missing", 42))); } else { SearchResponse response = new SearchResponse( - new InternalSearchResponse( - new SearchHits(new SearchHit[] { new SearchHit(1) }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1.0f), - // Simulate completely null aggs - null, - new Suggest(Collections.emptyList()), - new SearchProfileResults(Collections.emptyMap()), - false, - false, - 1 - ), + new SearchHits(new SearchHit[] { new SearchHit(1) }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1.0f), + // Simulate completely null aggs + null, + new Suggest(Collections.emptyList()), + false, + false, + new SearchProfileResults(Collections.emptyMap()), + 1, null, 1, 1, @@ -564,7 +565,6 @@ protected void } return; } - super.doExecute(action, request, listener); } } @@ -599,7 +599,7 @@ private ClientTransformIndexer createTestIndexer(ParentTaskAssigningClient clien mock(IndexBasedTransformConfigManager.class), mock(TransformCheckpointService.class), mock(TransformAuditor.class), - new TransformScheduler(Clock.systemUTC(), mock(ThreadPool.class), Settings.EMPTY) + new TransformScheduler(Clock.systemUTC(), mock(ThreadPool.class), Settings.EMPTY, TimeValue.ZERO) ), mock(CheckpointProvider.class), new AtomicReference<>(IndexerState.STOPPED), diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureHandlingTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureHandlingTests.java index d3be18a193415..5c6539d0a5045 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureHandlingTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureHandlingTests.java @@ -30,7 +30,6 @@ import org.elasticsearch.script.ScriptException; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.profile.SearchProfileResults; import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.test.ESTestCase; @@ -129,7 +128,7 @@ static class MockedTransformIndexer extends ClientTransformIndexer { transformsConfigManager, mock(TransformCheckpointService.class), auditor, - new TransformScheduler(Clock.systemUTC(), threadPool, Settings.EMPTY) + new TransformScheduler(Clock.systemUTC(), threadPool, Settings.EMPTY, TimeValue.ZERO) ), checkpointProvider, initialState, @@ -224,16 +223,14 @@ protected void onAbort() { void doGetInitialProgress(SearchRequest request, ActionListener responseListener) { responseListener.onResponse( new SearchResponse( - new InternalSearchResponse( - new SearchHits(new SearchHit[0], new TotalHits(0L, TotalHits.Relation.EQUAL_TO), 0.0f), - // Simulate completely null aggs - null, - new Suggest(Collections.emptyList()), - new SearchProfileResults(Collections.emptyMap()), - false, - false, - 1 - ), + new SearchHits(new SearchHit[0], new TotalHits(0L, TotalHits.Relation.EQUAL_TO), 0.0f), + // Simulate completely null aggs + null, + new Suggest(Collections.emptyList()), + false, + false, + new SearchProfileResults(Collections.emptyMap()), + 1, "", 1, 1, @@ -375,16 +372,14 @@ public void testDoProcessAggNullCheck() { null ); SearchResponse searchResponse = new SearchResponse( - new InternalSearchResponse( - new SearchHits(new SearchHit[0], new TotalHits(0L, TotalHits.Relation.EQUAL_TO), 0.0f), - // Simulate completely null aggs - null, - new Suggest(Collections.emptyList()), - new SearchProfileResults(Collections.emptyMap()), - false, - false, - 1 - ), + new SearchHits(new SearchHit[0], new TotalHits(0L, TotalHits.Relation.EQUAL_TO), 0.0f), + // Simulate completely null aggs + null, + new Suggest(Collections.emptyList()), + false, + false, + new SearchProfileResults(Collections.emptyMap()), + 1, "", 1, 1, @@ -513,16 +508,14 @@ public void testRetentionPolicyDeleteByQueryThrowsIrrecoverable() throws Excepti ); final SearchResponse searchResponse = new SearchResponse( - new InternalSearchResponse( - new SearchHits(new SearchHit[] { new SearchHit(1) }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1.0f), - // Simulate completely null aggs - null, - new Suggest(Collections.emptyList()), - new SearchProfileResults(Collections.emptyMap()), - false, - false, - 1 - ), + new SearchHits(new SearchHit[] { new SearchHit(1) }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1.0f), + // Simulate completely null aggs + null, + new Suggest(Collections.emptyList()), + false, + false, + new SearchProfileResults(Collections.emptyMap()), + 1, "", 1, 1, @@ -605,16 +598,14 @@ public void testRetentionPolicyDeleteByQueryThrowsTemporaryProblem() throws Exce ); final SearchResponse searchResponse = new SearchResponse( - new InternalSearchResponse( - new SearchHits(new SearchHit[] { new SearchHit(1) }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1.0f), - // Simulate completely null aggs - null, - new Suggest(Collections.emptyList()), - new SearchProfileResults(Collections.emptyMap()), - false, - false, - 1 - ), + new SearchHits(new SearchHit[] { new SearchHit(1) }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1.0f), + // Simulate completely null aggs + null, + new Suggest(Collections.emptyList()), + false, + false, + new SearchProfileResults(Collections.emptyMap()), + 1, "", 1, 1, @@ -700,16 +691,14 @@ public void testFailureCounterIsResetOnSuccess() throws Exception { ); final SearchResponse searchResponse = new SearchResponse( - new InternalSearchResponse( - new SearchHits(new SearchHit[] { new SearchHit(1) }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1.0f), - // Simulate completely null aggs - null, - new Suggest(Collections.emptyList()), - new SearchProfileResults(Collections.emptyMap()), - false, - false, - 1 - ), + new SearchHits(new SearchHit[] { new SearchHit(1) }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1.0f), + // Simulate completely null aggs + null, + new Suggest(Collections.emptyList()), + false, + false, + new SearchProfileResults(Collections.emptyMap()), + 1, "", 1, 1, diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureOnStatePersistenceTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureOnStatePersistenceTests.java index 55ae653c39629..750e535c4330f 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureOnStatePersistenceTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerFailureOnStatePersistenceTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.client.internal.ParentTaskAssigningClient; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.shard.ShardId; @@ -217,7 +218,7 @@ public void fail(Throwable exception, String failureMessage, ActionListener(IndexerState.STOPPED), @@ -299,7 +300,7 @@ public void fail(Throwable exception, String failureMessage, ActionListener(IndexerState.STOPPED), @@ -430,7 +431,7 @@ public void fail(Throwable exception, String failureMessage, ActionListener(IndexerState.STOPPED), diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerStateTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerStateTests.java index 638a66fa3fb0d..9e72a92da5bee 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerStateTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerStateTests.java @@ -26,7 +26,6 @@ import org.elasticsearch.index.reindex.DeleteByQueryRequest; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.profile.SearchProfileResults; import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.test.ESTestCase; @@ -79,16 +78,14 @@ public class TransformIndexerStateTests extends ESTestCase { private static final SearchResponse ONE_HIT_SEARCH_RESPONSE = new SearchResponse( - new InternalSearchResponse( - new SearchHits(new SearchHit[] { new SearchHit(1) }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1.0f), - // Simulate completely null aggs - null, - new Suggest(Collections.emptyList()), - new SearchProfileResults(Collections.emptyMap()), - false, - false, - 1 - ), + new SearchHits(new SearchHit[] { new SearchHit(1) }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1.0f), + // Simulate completely null aggs + null, + new Suggest(Collections.emptyList()), + false, + false, + new SearchProfileResults(Collections.emptyMap()), + 1, "", 1, 1, @@ -805,7 +802,7 @@ private MockedTransformIndexer createMockIndexer( transformConfigManager, mock(TransformCheckpointService.class), transformAuditor, - new TransformScheduler(Clock.systemUTC(), threadPool, Settings.EMPTY) + new TransformScheduler(Clock.systemUTC(), threadPool, Settings.EMPTY, TimeValue.ZERO) ); MockedTransformIndexer indexer = new MockedTransformIndexer( @@ -839,7 +836,7 @@ private MockedTransformIndexerForStatePersistenceTesting createMockIndexerForSta transformConfigManager, mock(TransformCheckpointService.class), transformAuditor, - new TransformScheduler(Clock.systemUTC(), threadPool, Settings.EMPTY) + new TransformScheduler(Clock.systemUTC(), threadPool, Settings.EMPTY, TimeValue.ZERO) ); MockedTransformIndexerForStatePersistenceTesting indexer = new MockedTransformIndexerForStatePersistenceTesting( diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerTests.java index a7d588641da75..94a4440f7c659 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformIndexerTests.java @@ -26,7 +26,6 @@ import org.elasticsearch.index.reindex.DeleteByQueryRequest; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.profile.SearchProfileResults; import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.test.ESTestCase; @@ -75,16 +74,14 @@ public class TransformIndexerTests extends ESTestCase { private static final SearchResponse ONE_HIT_SEARCH_RESPONSE = new SearchResponse( - new InternalSearchResponse( - new SearchHits(new SearchHit[] { new SearchHit(1) }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1.0f), - // Simulate completely null aggs - null, - new Suggest(Collections.emptyList()), - new SearchProfileResults(Collections.emptyMap()), - false, - false, - 1 - ), + new SearchHits(new SearchHit[] { new SearchHit(1) }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1.0f), + // Simulate completely null aggs + null, + new Suggest(Collections.emptyList()), + false, + false, + new SearchProfileResults(Collections.emptyMap()), + 1, "", 1, 1, @@ -455,7 +452,7 @@ private MockedTransformIndexer createMockIndexer( transformConfigManager, mock(TransformCheckpointService.class), transformAuditor, - new TransformScheduler(Clock.systemUTC(), threadPool, Settings.EMPTY) + new TransformScheduler(Clock.systemUTC(), threadPool, Settings.EMPTY, TimeValue.ZERO) ); MockedTransformIndexer indexer = new MockedTransformIndexer( diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutorTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutorTests.java index b1582970d4e07..69d81c85a62d3 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutorTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutorTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; @@ -444,7 +445,7 @@ private TransformPersistentTasksExecutor buildTaskExecutor() { transformsConfigManager, transformCheckpointService, mockAuditor, - new TransformScheduler(Clock.systemUTC(), threadPool, Settings.EMPTY) + new TransformScheduler(Clock.systemUTC(), threadPool, Settings.EMPTY, TimeValue.ZERO) ); ClusterSettings cSettings = new ClusterSettings(Settings.EMPTY, Collections.singleton(Transform.NUM_FAILURE_RETRIES_SETTING)); diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformTaskTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformTaskTests.java index cda258c6daa81..12af48faf8e38 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformTaskTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformTaskTests.java @@ -112,7 +112,7 @@ public void testStopOnFailedTaskWithStoppedIndexer() { transformsConfigManager, transformsCheckpointService, auditor, - new TransformScheduler(clock, threadPool, Settings.EMPTY) + new TransformScheduler(clock, threadPool, Settings.EMPTY, TimeValue.ZERO) ); TransformState transformState = new TransformState( @@ -134,7 +134,7 @@ public void testStopOnFailedTaskWithStoppedIndexer() { TaskId.EMPTY_TASK_ID, createTransformTaskParams(transformConfig.getId()), transformState, - new TransformScheduler(clock, threadPool, Settings.EMPTY), + new TransformScheduler(clock, threadPool, Settings.EMPTY, TimeValue.ZERO), auditor, threadPool, Collections.emptyMap() @@ -212,7 +212,7 @@ public void testStopOnFailedTaskWithoutIndexer() { TaskId.EMPTY_TASK_ID, createTransformTaskParams(transformConfig.getId()), transformState, - new TransformScheduler(Clock.systemUTC(), threadPool, Settings.EMPTY), + new TransformScheduler(Clock.systemUTC(), threadPool, Settings.EMPTY, TimeValue.ZERO), auditor, threadPool, Collections.emptyMap() @@ -431,7 +431,7 @@ public void testApplyNewAuthState() { TaskId.EMPTY_TASK_ID, createTransformTaskParams(transformConfig.getId()), transformState, - new TransformScheduler(Clock.systemUTC(), threadPool, Settings.EMPTY), + new TransformScheduler(Clock.systemUTC(), threadPool, Settings.EMPTY, TimeValue.ZERO), auditor, threadPool, Collections.emptyMap() diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/CompositeBucketsChangeCollectorTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/CompositeBucketsChangeCollectorTests.java index 9b8cf9745c558..708cb3d93cbed 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/CompositeBucketsChangeCollectorTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/CompositeBucketsChangeCollectorTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.transform.transforms.pivot; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchResponseSections; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.TermsQueryBuilder; @@ -112,8 +111,22 @@ public void testTermsFieldCollector() throws IOException { }); Aggregations aggs = new Aggregations(Collections.singletonList(composite)); - SearchResponseSections sections = new SearchResponseSections(null, aggs, null, false, null, null, 1); - SearchResponse response = new SearchResponse(sections, null, 1, 1, 0, 0, ShardSearchFailure.EMPTY_ARRAY, null); + SearchResponse response = new SearchResponse( + null, + aggs, + null, + false, + null, + null, + 1, + null, + 1, + 1, + 0, + 0, + ShardSearchFailure.EMPTY_ARRAY, + null + ); try { collector.processSearchResponse(response); diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/DateHistogramFieldCollectorTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/DateHistogramFieldCollectorTests.java index d43b4bd672a07..dab6d8518d28f 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/DateHistogramFieldCollectorTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/DateHistogramFieldCollectorTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.transform.transforms.pivot; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchResponseSections; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.RangeQueryBuilder; @@ -171,16 +170,22 @@ private static QueryBuilder buildFilterQuery(ChangeCollector collector) { } private static SearchResponse buildSearchResponse(SingleValue minTimestamp, SingleValue maxTimestamp) { - SearchResponseSections sections = new SearchResponseSections( + return new SearchResponse( null, new Aggregations(Arrays.asList(minTimestamp, maxTimestamp)), null, false, null, null, - 1 + 1, + null, + 1, + 1, + 0, + 0, + ShardSearchFailure.EMPTY_ARRAY, + null ); - return new SearchResponse(sections, null, 1, 1, 0, 0, ShardSearchFailure.EMPTY_ARRAY, null); } } diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/PivotTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/PivotTests.java index 37bee4a4eb999..66e7efe764732 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/PivotTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/PivotTests.java @@ -14,7 +14,6 @@ import org.elasticsearch.action.ActionType; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchResponseSections; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.aggregations.AggregationsPlugin; import org.elasticsearch.client.internal.Client; @@ -327,9 +326,7 @@ public void testPreviewForCompositeAggregation() throws Exception { } private static SearchResponse searchResponseFromAggs(Aggregations aggs) { - SearchResponseSections sections = new SearchResponseSections(null, aggs, null, false, null, null, 1); - SearchResponse searchResponse = new SearchResponse(sections, null, 10, 5, 0, 0, new ShardSearchFailure[0], null); - return searchResponse; + return new SearchResponse(null, aggs, null, false, null, null, 1, null, 10, 5, 0, 0, new ShardSearchFailure[0], null); } private class MyMockClient extends NoOpClient { @@ -359,17 +356,14 @@ protected void } } - final SearchResponseSections sections = new SearchResponseSections( + final SearchResponse response = new SearchResponse( new SearchHits(new SearchHit[0], new TotalHits(0L, TotalHits.Relation.EQUAL_TO), 0), null, null, false, null, null, - 1 - ); - final SearchResponse response = new SearchResponse( - sections, + 1, null, 10, searchFailures.size() > 0 ? 0 : 5, diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformScheduledTaskQueueTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformScheduledTaskQueueTests.java index 4748189745f1b..5030d42f9c17c 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformScheduledTaskQueueTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformScheduledTaskQueueTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.xpack.transform.Transform; import org.elasticsearch.xpack.transform.transforms.scheduling.TransformScheduler.Event; import org.hamcrest.Matchers; import org.junit.After; @@ -197,7 +198,7 @@ public void testUpdatePriority() { private static TransformScheduledTask createTask(String transformId, long nextScheduledTimeMillis) { return new TransformScheduledTask( transformId, - null, + Transform.DEFAULT_SCHEDULER_FREQUENCY, null, 0, nextScheduledTimeMillis, diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformScheduledTaskTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformScheduledTaskTests.java index fd8a1de429c14..5d2efdd23a0af 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformScheduledTaskTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformScheduledTaskTests.java @@ -32,11 +32,6 @@ public void testBasics() { assertThat(task.getListener(), is(equalTo(LISTENER))); } - public void testDefaultFrequency() { - TransformScheduledTask task = new TransformScheduledTask(TRANSFORM_ID, null, LAST_TRIGGERED_TIME_MILLIS, 0, 0, LISTENER); - assertThat(task.getFrequency(), is(equalTo(DEFAULT_FREQUENCY))); - } - public void testNextScheduledTimeMillis() { { TransformScheduledTask task = new TransformScheduledTask(TRANSFORM_ID, FREQUENCY, LAST_TRIGGERED_TIME_MILLIS, 0, 123, LISTENER); diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformSchedulerTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformSchedulerTests.java index 7125b4074bc4a..8d3220a5b4de3 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformSchedulerTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformSchedulerTests.java @@ -61,49 +61,59 @@ public void shutdownThreadPool() { } public void testScheduling() { + testScheduling(5, 0); + } + + public void testScheduling_withMinFrequency() { + testScheduling(1, 5); + } + + // Note: frequencySeconds and minFrequencySeconds together should lead to an expected frequency of 5 seconds. + private void testScheduling(int frequencySeconds, int minFreqencySeconds) { String transformId = "test-with-fake-clock"; - int frequencySeconds = 5; TimeValue frequency = TimeValue.timeValueSeconds(frequencySeconds); + TimeValue minFrequency = TimeValue.timeValueSeconds(minFreqencySeconds); + TimeValue fiveSeconds = TimeValue.timeValueSeconds(5); TransformTaskParams transformTaskParams = new TransformTaskParams(transformId, TransformConfigVersion.CURRENT, frequency, false); FakeClock clock = new FakeClock(Instant.ofEpochMilli(0)); CopyOnWriteArrayList events = new CopyOnWriteArrayList<>(); TransformScheduler.Listener listener = events::add; - TransformScheduler transformScheduler = new TransformScheduler(clock, threadPool, SETTINGS); + TransformScheduler transformScheduler = new TransformScheduler(clock, threadPool, SETTINGS, minFrequency); transformScheduler.registerTransform(transformTaskParams, listener); assertThat( transformScheduler.getTransformScheduledTasks(), - contains(new TransformScheduledTask(transformId, frequency, 0L, 0, 5000, listener)) + contains(new TransformScheduledTask(transformId, fiveSeconds, 0L, 0, 5000, listener)) ); assertThat(events, hasSize(1)); - for (int i = 0; i < frequencySeconds; ++i) { + for (int i = 0; i < 5; ++i) { transformScheduler.processScheduledTasks(); assertThat( transformScheduler.getTransformScheduledTasks(), - contains(new TransformScheduledTask(transformId, frequency, 0L, 0, 5000, listener)) + contains(new TransformScheduledTask(transformId, fiveSeconds, 0L, 0, 5000, listener)) ); assertThat(events, hasSize(1)); clock.advanceTimeBy(Duration.ofMillis(1001)); } assertThat(clock.instant(), is(equalTo(Instant.ofEpochMilli(5005)))); - for (int i = 0; i < frequencySeconds; ++i) { + for (int i = 0; i < 5; ++i) { transformScheduler.processScheduledTasks(); assertThat( transformScheduler.getTransformScheduledTasks(), - contains(new TransformScheduledTask(transformId, frequency, 5005L, 0, 10005, listener)) + contains(new TransformScheduledTask(transformId, fiveSeconds, 5005L, 0, 10005, listener)) ); assertThat(events, hasSize(2)); clock.advanceTimeBy(Duration.ofMillis(1001)); } assertThat(clock.instant(), is(equalTo(Instant.ofEpochMilli(10010)))); - for (int i = 0; i < frequencySeconds; ++i) { + for (int i = 0; i < 5; ++i) { transformScheduler.processScheduledTasks(); assertThat( transformScheduler.getTransformScheduledTasks(), - contains(new TransformScheduledTask(transformId, frequency, 10010L, 0, 15010, listener)) + contains(new TransformScheduledTask(transformId, fiveSeconds, 10010L, 0, 15010, listener)) ); assertThat(events, hasSize(3)); clock.advanceTimeBy(Duration.ofMillis(1001)); @@ -128,7 +138,7 @@ public void testSchedulingWithFailures() { CopyOnWriteArrayList events = new CopyOnWriteArrayList<>(); TransformScheduler.Listener listener = events::add; - TransformScheduler transformScheduler = new TransformScheduler(clock, threadPool, SETTINGS); + TransformScheduler transformScheduler = new TransformScheduler(clock, threadPool, SETTINGS, TimeValue.ZERO); transformScheduler.registerTransform(transformTaskParams, listener); assertThat( transformScheduler.getTransformScheduledTasks(), @@ -180,7 +190,7 @@ public void testScheduleNow() { CopyOnWriteArrayList events = new CopyOnWriteArrayList<>(); TransformScheduler.Listener listener = events::add; - TransformScheduler transformScheduler = new TransformScheduler(clock, threadPool, SETTINGS); + TransformScheduler transformScheduler = new TransformScheduler(clock, threadPool, SETTINGS, TimeValue.ZERO); transformScheduler.registerTransform(transformTaskParams, listener); assertThat( transformScheduler.getTransformScheduledTasks(), @@ -230,7 +240,7 @@ public void testConcurrentProcessing() throws Exception { CopyOnWriteArrayList events = new CopyOnWriteArrayList<>(); TransformScheduler.Listener listener = events::add; - TransformScheduler transformScheduler = new TransformScheduler(clock, threadPool, SETTINGS); + TransformScheduler transformScheduler = new TransformScheduler(clock, threadPool, SETTINGS, TimeValue.ZERO); transformScheduler.registerTransform(transformTaskParams, listener); assertThat( transformScheduler.getTransformScheduledTasks(), @@ -267,7 +277,7 @@ public void testConcurrentModifications() { FakeClock clock = new FakeClock(Instant.ofEpochMilli(0)); CopyOnWriteArrayList events = new CopyOnWriteArrayList<>(); - TransformScheduler transformScheduler = new TransformScheduler(clock, threadPool, SETTINGS); + TransformScheduler transformScheduler = new TransformScheduler(clock, threadPool, SETTINGS, TimeValue.ZERO); TransformScheduler.Listener taskModifyingListener = new TransformScheduler.Listener() { private boolean firstTime = true; @@ -309,7 +319,7 @@ public void testSchedulingWithSystemClock() throws Exception { Clock clock = Clock.systemUTC(); CopyOnWriteArrayList events = new CopyOnWriteArrayList<>(); - TransformScheduler transformScheduler = new TransformScheduler(clock, threadPool, SETTINGS); + TransformScheduler transformScheduler = new TransformScheduler(clock, threadPool, SETTINGS, TimeValue.ZERO); transformScheduler.start(); transformScheduler.registerTransform(transformTaskParams, events::add); assertThat(events, hasSize(1)); @@ -334,7 +344,7 @@ public void testScheduleNowWithSystemClock() throws Exception { Clock clock = Clock.systemUTC(); CopyOnWriteArrayList events = new CopyOnWriteArrayList<>(); - TransformScheduler transformScheduler = new TransformScheduler(clock, threadPool, SETTINGS); + TransformScheduler transformScheduler = new TransformScheduler(clock, threadPool, SETTINGS, TimeValue.ZERO); transformScheduler.start(); transformScheduler.registerTransform(transformTaskParams, events::add); assertThat(events, hasSize(1)); @@ -391,7 +401,7 @@ public void testRegisterMultipleTransforms() { CopyOnWriteArrayList events = new CopyOnWriteArrayList<>(); TransformScheduler.Listener listener = events::add; - TransformScheduler transformScheduler = new TransformScheduler(clock, threadPool, SETTINGS); + TransformScheduler transformScheduler = new TransformScheduler(clock, threadPool, SETTINGS, TimeValue.ZERO); transformScheduler.registerTransform(transformTaskParams1, listener); transformScheduler.registerTransform(transformTaskParams2, listener); transformScheduler.registerTransform(transformTaskParams3, listener); @@ -421,7 +431,7 @@ public void testMultipleTransformsEligibleForProcessingAtOnce() { CopyOnWriteArrayList events = new CopyOnWriteArrayList<>(); TransformScheduler.Listener listener = events::add; - TransformScheduler transformScheduler = new TransformScheduler(clock, threadPool, SETTINGS); + TransformScheduler transformScheduler = new TransformScheduler(clock, threadPool, SETTINGS, TimeValue.ZERO); transformScheduler.registerTransform(transformTaskParams1, listener); transformScheduler.registerTransform(transformTaskParams2, listener); transformScheduler.registerTransform(transformTaskParams3, listener); diff --git a/x-pack/plugin/vector-tile/src/main/java/org/elasticsearch/xpack/vectortile/rest/RestVectorTileAction.java b/x-pack/plugin/vector-tile/src/main/java/org/elasticsearch/xpack/vectortile/rest/RestVectorTileAction.java index f3332cb50e27b..63850e11ae64b 100644 --- a/x-pack/plugin/vector-tile/src/main/java/org/elasticsearch/xpack/vectortile/rest/RestVectorTileAction.java +++ b/x-pack/plugin/vector-tile/src/main/java/org/elasticsearch/xpack/vectortile/rest/RestVectorTileAction.java @@ -11,7 +11,6 @@ import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchResponseSections; import org.elasticsearch.client.internal.node.NodeClient; import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.geo.GeoBoundingBox; @@ -147,21 +146,14 @@ public RestResponse buildResponse(SearchResponse searchResponse) throws Exceptio .collect(Collectors.toList()) ); final SearchResponse meta = new SearchResponse( - new SearchResponseSections( - new SearchHits( - SearchHits.EMPTY, - searchResponse.getHits().getTotalHits(), - searchResponse.getHits().getMaxScore() - ), // remove actual hits - aggsWithoutGridAndBounds, - searchResponse.getSuggest(), - searchResponse.isTimedOut(), - searchResponse.isTerminatedEarly(), - searchResponse.getProfileResults() == null - ? null - : new SearchProfileResults(searchResponse.getProfileResults()), - searchResponse.getNumReducePhases() - ), + // remove actual hits + new SearchHits(SearchHits.EMPTY, searchResponse.getHits().getTotalHits(), searchResponse.getHits().getMaxScore()), + aggsWithoutGridAndBounds, + searchResponse.getSuggest(), + searchResponse.isTimedOut(), + searchResponse.isTerminatedEarly(), + searchResponse.getProfileResults() == null ? null : new SearchProfileResults(searchResponse.getProfileResults()), + searchResponse.getNumReducePhases(), searchResponse.getScrollId(), searchResponse.getTotalShards(), searchResponse.getSuccessfulShards(), diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookTokenIntegrationTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookTokenIntegrationTests.java index 5797541b72d98..9724aa6e0a8ce 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookTokenIntegrationTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookTokenIntegrationTests.java @@ -7,8 +7,8 @@ package org.elasticsearch.xpack.watcher.actions.webhook; -import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsAction; import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsRequest; +import org.elasticsearch.action.admin.cluster.node.reload.TransportNodesReloadSecureSettingsAction; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.KeyStoreWrapper; import org.elasticsearch.common.settings.MockSecureSettings; @@ -101,7 +101,7 @@ public void testWebhook() throws Exception { // Reload the keystore to load the new settings NodesReloadSecureSettingsRequest reloadReq = new NodesReloadSecureSettingsRequest(); reloadReq.setSecureStorePassword(new SecureString("".toCharArray())); - client().execute(NodesReloadSecureSettingsAction.INSTANCE, reloadReq).get(); + client().execute(TransportNodesReloadSecureSettingsAction.TYPE, reloadReq).get(); webServer.enqueue(new MockResponse().setResponseCode(200).setBody("body")); HttpRequestTemplate.Builder builder = HttpRequestTemplate.builder("localhost", webServer.getPort()) diff --git a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/condition/CompareConditionSearchTests.java b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/condition/CompareConditionSearchTests.java index dbb7b7d93c2e3..f02b3f865adf0 100644 --- a/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/condition/CompareConditionSearchTests.java +++ b/x-pack/plugin/watcher/src/internalClusterTest/java/org/elasticsearch/xpack/watcher/condition/CompareConditionSearchTests.java @@ -16,7 +16,6 @@ import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; import org.elasticsearch.xpack.core.watcher.watch.Payload; @@ -105,17 +104,14 @@ public void testExecuteAccessHits() throws Exception { hit.score(1f); hit.shard(new SearchShardTarget("a", new ShardId("a", "indexUUID", 0), null)); - InternalSearchResponse internalSearchResponse = new InternalSearchResponse( + SearchResponse response = new SearchResponse( new SearchHits(new SearchHit[] { hit }, new TotalHits(1L, TotalHits.Relation.EQUAL_TO), 1f), null, null, - null, false, false, - 1 - ); - SearchResponse response = new SearchResponse( - internalSearchResponse, + null, + 1, "", 3, 3, diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java index 1dede3f4e135c..c2ed68d8fa1bd 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherServiceTests.java @@ -16,7 +16,6 @@ import org.elasticsearch.action.search.ClearScrollResponse; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchResponseSections; import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.search.TransportClearScrollAction; @@ -43,6 +42,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -172,29 +172,21 @@ void stopExecutor() {} return null; }).when(client).execute(eq(RefreshAction.INSTANCE), any(RefreshRequest.class), anyActionListener()); - // empty scroll response, no further scrolling needed - SearchResponseSections scrollSearchSections = new SearchResponseSections( - SearchHits.EMPTY_WITH_TOTAL_HITS, - null, - null, - false, - false, - null, - 1 - ); - SearchResponse scrollSearchResponse = new SearchResponse( - scrollSearchSections, - "scrollId", - 1, - 1, - 0, - 10, - ShardSearchFailure.EMPTY_ARRAY, - SearchResponse.Clusters.EMPTY - ); doAnswer(invocation -> { ActionListener listener = (ActionListener) invocation.getArguments()[2]; - listener.onResponse(scrollSearchResponse); + // empty scroll response, no further scrolling needed + ActionListener.respondAndRelease( + listener, + SearchResponseUtils.emptyWithTotalHits( + "scrollId", + 1, + 1, + 0, + 10, + ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY + ) + ); return null; }).when(client).execute(eq(TransportSearchScrollAction.TYPE), any(SearchScrollRequest.class), anyActionListener()); @@ -221,20 +213,27 @@ void stopExecutor() {} when(parser.parse(eq(id), eq(true), any(), eq(XContentType.JSON), anyLong(), anyLong())).thenReturn(watch); } SearchHits searchHits = new SearchHits(hits, new TotalHits(count, TotalHits.Relation.EQUAL_TO), 1.0f); - SearchResponseSections sections = new SearchResponseSections(searchHits, null, null, false, false, null, 1); - SearchResponse searchResponse = new SearchResponse( - sections, - "scrollId", - 1, - 1, - 0, - 10, - ShardSearchFailure.EMPTY_ARRAY, - SearchResponse.Clusters.EMPTY - ); doAnswer(invocation -> { ActionListener listener = (ActionListener) invocation.getArguments()[2]; - listener.onResponse(searchResponse); + ActionListener.respondAndRelease( + listener, + new SearchResponse( + searchHits, + null, + null, + false, + false, + null, + 1, + "scrollId", + 1, + 1, + 0, + 10, + ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY + ) + ); return null; }).when(client).execute(eq(TransportSearchAction.TYPE), any(SearchRequest.class), anyActionListener()); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/ScriptConditionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/ScriptConditionTests.java index 89ddb2c0011bb..fa0dc89fd5106 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/ScriptConditionTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/ScriptConditionTests.java @@ -21,7 +21,7 @@ import org.elasticsearch.script.ScriptMetadata; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptType; -import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -95,8 +95,7 @@ public void init() throws IOException { public void testExecute() throws Exception { ScriptCondition condition = new ScriptCondition(mockScript("ctx.payload.hits.total.value > 1"), scriptService); - SearchResponse response = new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, + SearchResponse response = SearchResponseUtils.emptyWithTotalHits( "", 3, 3, @@ -121,8 +120,7 @@ public void testExecuteMergedParams() throws Exception { singletonMap("threshold", 1) ); ScriptCondition executable = new ScriptCondition(script, scriptService); - SearchResponse response = new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, + SearchResponse response = SearchResponseUtils.emptyWithTotalHits( "", 3, 3, @@ -147,8 +145,7 @@ public void testParserValid() throws Exception { parser.nextToken(); ExecutableCondition executable = ScriptCondition.parse(scriptService, "_watch", parser); - SearchResponse response = new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, + SearchResponse response = SearchResponseUtils.emptyWithTotalHits( "", 3, 3, @@ -223,8 +220,7 @@ public void testScriptConditionParser_badLang() throws Exception { public void testScriptConditionThrowException() throws Exception { ScriptCondition condition = new ScriptCondition(mockScript("null.foo"), scriptService); - SearchResponse response = new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, + SearchResponse response = SearchResponseUtils.emptyWithTotalHits( "", 3, 3, @@ -247,8 +243,7 @@ public void testScriptConditionAccessCtx() throws Exception { mockScript("ctx.trigger.scheduled_time.toInstant().toEpochMill() < new Date().time"), scriptService ); - SearchResponse response = new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, + SearchResponse response = SearchResponseUtils.emptyWithTotalHits( "", 3, 3, diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java index 0f47df9dff12b..d25cc7168ec75 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/execution/TriggeredWatchStoreTests.java @@ -46,8 +46,8 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.search.SearchShardTarget; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.ToXContent; @@ -231,26 +231,8 @@ public void testFindTriggeredWatchesGoodCase() { hit.shard(new SearchShardTarget("_node_id", new ShardId(index, 0), null)); hit.sourceRef(source); hits = new SearchHits(new SearchHit[] { hit }, new TotalHits(1, TotalHits.Relation.EQUAL_TO), 1.0f); - SearchResponse searchResponse2 = new SearchResponse( - new InternalSearchResponse(hits, null, null, null, false, null, 1), - "_scrollId1", - 1, - 1, - 0, - 1, - null, - null - ); - SearchResponse searchResponse3 = new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, - "_scrollId2", - 1, - 1, - 0, - 1, - null, - null - ); + SearchResponse searchResponse2 = new SearchResponse(hits, null, null, false, null, null, 1, "_scrollId1", 1, 1, 0, 1, null, null); + SearchResponse searchResponse3 = SearchResponseUtils.emptyWithTotalHits("_scrollId2", 1, 1, 0, 1, null, null); doAnswer(invocation -> { SearchScrollRequest request = (SearchScrollRequest) invocation.getArguments()[1]; diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SearchInputTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SearchInputTests.java index d06ee606f31ce..172338d60bbe1 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SearchInputTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/SearchInputTests.java @@ -22,8 +22,8 @@ import org.elasticsearch.script.ScriptEngine; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -91,8 +91,7 @@ public void setup() { public void testExecute() throws Exception { ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(SearchRequest.class); PlainActionFuture searchFuture = new PlainActionFuture<>(); - SearchResponse searchResponse = new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, + SearchResponse searchResponse = SearchResponseUtils.emptyWithTotalHits( "", 1, 1, @@ -132,8 +131,7 @@ public void testExecute() throws Exception { public void testDifferentSearchType() throws Exception { ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(SearchRequest.class); PlainActionFuture searchFuture = new PlainActionFuture<>(); - SearchResponse searchResponse = new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, + SearchResponse searchResponse = SearchResponseUtils.emptyWithTotalHits( "", 1, 1, @@ -187,8 +185,7 @@ public void testParserValid() throws Exception { public void testThatEmptyRequestBodyWorks() throws Exception { ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(SearchRequest.class); PlainActionFuture searchFuture = new PlainActionFuture<>(); - SearchResponse searchResponse = new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, + SearchResponse searchResponse = SearchResponseUtils.emptyWithTotalHits( "", 1, 1, diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java index b2594eaf02ea4..c6ef15bace343 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/MLModelDeploymentFullClusterRestartIT.java @@ -10,6 +10,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import org.apache.http.util.EntityUtils; +import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; @@ -36,6 +37,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103808") public class MLModelDeploymentFullClusterRestartIT extends AbstractXpackFullClusterRestartTestCase { // See PyTorchModelIT for how this model was created diff --git a/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/build.gradle b/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/build.gradle index cce18a4bd1579..54b455d483b9a 100644 --- a/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/build.gradle +++ b/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/build.gradle @@ -8,7 +8,7 @@ apply plugin: 'elasticsearch.rest-resources' restResources { restApi { include '_common', 'bulk', 'field_caps', 'security', 'search', 'clear_scroll', 'scroll', 'async_search', 'cluster', - 'indices', 'open_point_in_time', 'close_point_in_time', 'terms_enum' + 'indices', 'open_point_in_time', 'close_point_in_time', 'terms_enum', 'esql' } } @@ -23,6 +23,8 @@ def fulfillingCluster = testClusters.register('fulfilling-cluster') { module ':modules:data-streams' module ':x-pack:plugin:mapper-constant-keyword' module ':x-pack:plugin:async-search' + module ':x-pack:plugin:ql' + module ':x-pack:plugin:esql' user username: "test_user", password: "x-pack-test-password" } @@ -34,6 +36,8 @@ def queryingCluster = testClusters.register('querying-cluster') { module ':modules:data-streams' module ':x-pack:plugin:mapper-constant-keyword' module ':x-pack:plugin:async-search' + module ':x-pack:plugin:ql' + module ':x-pack:plugin:esql' setting 'cluster.remote.connections_per_cluster', "1" user username: "test_user", password: "x-pack-test-password" diff --git a/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/src/test/resources/rest-api-spec/test/fulfilling_cluster/10_basic.yml b/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/src/test/resources/rest-api-spec/test/fulfilling_cluster/10_basic.yml index e91a87b65c013..36002f3cde470 100644 --- a/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/src/test/resources/rest-api-spec/test/fulfilling_cluster/10_basic.yml +++ b/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/src/test/resources/rest-api-spec/test/fulfilling_cluster/10_basic.yml @@ -23,7 +23,7 @@ setup: "indices": [ { "names": ["single_doc_index", "secure_alias", "test_index", "aliased_test_index", "field_caps_index_1", - "field_caps_index_3", "point_in_time_index", "simple-data-stream1", "simple-data-stream2"], + "field_caps_index_3", "point_in_time_index", "simple-data-stream1", "simple-data-stream2", "esql_index"], "privileges": ["read", "read_cross_cluster"] } ] @@ -46,7 +46,7 @@ setup: "indices": [ { "names": ["single_doc_index", "secure_alias", "test_index", "aliased_test_index", "field_caps_index_1", - "field_caps_index_3", "point_in_time_index", "simple-data-stream1", "simple-data-stream2"], + "field_caps_index_3", "point_in_time_index", "simple-data-stream1", "simple-data-stream2", "esql_index"], "privileges": ["read", "read_cross_cluster"] } ] @@ -429,3 +429,31 @@ setup: - '{"foo": "foo"}' - '{"index": {"_index": "terms_enum_index"}}' - '{"foo": "foobar"}' + + - do: + indices.create: + index: esql_index + body: + mappings: + properties: + since: + type: date + format: "yyyy-MM-dd" + cost: + type: long + tag: + type: keyword + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "esql_index"}}' + - '{"since" : "2023-01-01", "cost": 1000, "tag": "computer"}' + - '{"index": {"_index": "esql_index"}}' + - '{ "since" : "2023-01-02", "cost": 1200, "tag": "computer"}' + - '{"index": {"_index": "esql_index"}}' + - '{"since" : "2023-01-03", "cost": 450, "tag": "tablet"}' + - '{"index": {"_index": "esql_index"}}' + - '{"since" : "2023-01-04", "cost": 100, "tag": "headphone"}' + - '{"index": {"_index": "esql_index"}}' + - '{"since" : "2023-01-05", "cost": 20, "tag": "headphone"}' diff --git a/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/src/test/resources/rest-api-spec/test/querying_cluster/100_resolve_index.yml b/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/src/test/resources/rest-api-spec/test/querying_cluster/100_resolve_index.yml index 36ea0b65f2aa5..b9dbb0a070af4 100644 --- a/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/src/test/resources/rest-api-spec/test/querying_cluster/100_resolve_index.yml +++ b/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/src/test/resources/rest-api-spec/test/querying_cluster/100_resolve_index.yml @@ -26,23 +26,25 @@ - match: {indices.4.name: my_remote_cluster:closed_index} - match: {indices.4.aliases.0: aliased_closed_index} - match: {indices.4.attributes.0: closed} - - match: {indices.5.name: my_remote_cluster:field_caps_index_1} - - match: {indices.5.attributes.0: open} - - match: {indices.6.name: my_remote_cluster:field_caps_index_3} + - match: {indices.5.name: my_remote_cluster:esql_index } + - match: {indices.5.attributes.0: open } + - match: {indices.6.name: my_remote_cluster:field_caps_index_1} - match: {indices.6.attributes.0: open} - - match: {indices.7.name: my_remote_cluster:point_in_time_index } - - match: {indices.7.attributes.0: open } - - match: {indices.8.name: my_remote_cluster:secured_via_alias} - - match: {indices.8.attributes.0: open} - - match: {indices.9.name: my_remote_cluster:shared_index} + - match: {indices.7.name: my_remote_cluster:field_caps_index_3} + - match: {indices.7.attributes.0: open} + - match: {indices.8.name: my_remote_cluster:point_in_time_index } + - match: {indices.8.attributes.0: open } + - match: {indices.9.name: my_remote_cluster:secured_via_alias} - match: {indices.9.attributes.0: open} - - match: {indices.10.name: my_remote_cluster:single_doc_index} + - match: {indices.10.name: my_remote_cluster:shared_index} - match: {indices.10.attributes.0: open} - - match: {indices.11.name: my_remote_cluster:terms_enum_index } - - match: {indices.11.attributes.0: open } - - match: {indices.12.name: my_remote_cluster:test_index} - - match: {indices.12.aliases.0: aliased_test_index} - - match: {indices.12.attributes.0: open} + - match: {indices.11.name: my_remote_cluster:single_doc_index} + - match: {indices.11.attributes.0: open} + - match: {indices.12.name: my_remote_cluster:terms_enum_index } + - match: {indices.12.attributes.0: open } + - match: {indices.13.name: my_remote_cluster:test_index} + - match: {indices.13.aliases.0: aliased_test_index} + - match: {indices.13.attributes.0: open} - match: {aliases.0.name: my_remote_cluster:.security} - match: {aliases.0.indices.0: .security-7} - match: {aliases.1.name: my_remote_cluster:aliased_closed_index} diff --git a/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/src/test/resources/rest-api-spec/test/querying_cluster/10_basic.yml b/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/src/test/resources/rest-api-spec/test/querying_cluster/10_basic.yml index 4a5905a11feed..cbbfbe2372f3e 100644 --- a/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/src/test/resources/rest-api-spec/test/querying_cluster/10_basic.yml +++ b/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/src/test/resources/rest-api-spec/test/querying_cluster/10_basic.yml @@ -52,6 +52,7 @@ teardown: security.delete_role: name: "x_cluster_role" ignore: 404 + --- "Index data and search on the mixed cluster": @@ -236,6 +237,9 @@ teardown: - match: { aggregations.cluster.buckets.0.key: "local_cluster" } - match: { aggregations.cluster.buckets.0.doc_count: 5 } + - do: + indices.delete: + index: local_index --- "Add persistent remote cluster based on the preset cluster": - do: diff --git a/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/src/test/resources/rest-api-spec/test/querying_cluster/80_esql.yml b/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/src/test/resources/rest-api-spec/test/querying_cluster/80_esql.yml new file mode 100644 index 0000000000000..1894a26e80f33 --- /dev/null +++ b/x-pack/qa/multi-cluster-search-security/legacy-with-basic-license/src/test/resources/rest-api-spec/test/querying_cluster/80_esql.yml @@ -0,0 +1,143 @@ +--- +setup: + - skip: + features: headers + + - do: + cluster.health: + wait_for_status: yellow + - do: + security.put_user: + username: "joe" + body: > + { + "password": "s3krit-password", + "roles" : [ "x_cluster_role" ] + } + - do: + security.put_role: + name: "x_cluster_role" + body: > + { + "cluster": [], + "indices": [ + { + "names": ["local_index", "esql_local"], + "privileges": ["read"] + } + ] + } + + - do: + security.put_user: + username: "remote" + body: > + { + "password": "s3krit-password", + "roles" : [ "remote_ccs" ] + } + - do: + security.put_role: + name: "remote_ccs" + body: > + { + } +--- +teardown: + - do: + security.delete_user: + username: "joe" + ignore: 404 + - do: + security.delete_role: + name: "x_cluster_role" + ignore: 404 + +--- +"Index data and search on the mixed cluster": + + - do: + indices.create: + index: esql_local + body: + mappings: + properties: + since: + type: date + format: "yyyy-MM-dd" + cost: + type: long + tag: + type: keyword + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "esql_local"}}' + - '{"since" : "2023-01-01", "cost": 750, "tag": "monitor"}' + - '{"index": {"_index": "esql_local"}}' + - '{ "since" : "2023-01-02", "cost": 2100, "tag": "laptop"}' + - '{"index": {"_index": "esql_local"}}' + - '{"since" : "2023-01-03", "cost": 250, "tag": "monitor"}' + - '{"index": {"_index": "esql_local"}}' + - '{"since" : "2023-01-04", "cost": 100, "tag": "tablet"}' + - '{"index": {"_index": "esql_local"}}' + - '{"since" : "2023-01-05", "cost": 50, "tag": "headphone"}' + + - do: + headers: { Authorization: "Basic am9lOnMza3JpdC1wYXNzd29yZA==" } + esql.query: + body: + query: 'FROM *:esql*,esql_* | STATS total = sum(cost) by tag | SORT tag | LIMIT 10' + + - match: {columns.0.name: "total"} + - match: {columns.0.type: "long"} + - match: {columns.1.name: "tag"} + - match: {columns.1.type: "keyword"} + + - match: {values.0.0: 2200} + - match: {values.0.1: "computer"} + - match: {values.1.0: 170} + - match: {values.1.1: "headphone"} + - match: {values.2.0: 2100 } + - match: {values.2.1: "laptop" } + - match: {values.3.0: 1000 } + - match: {values.3.1: "monitor" } + - match: {values.4.0: 550 } + - match: {values.4.1: "tablet" } + + - do: + headers: { Authorization: "Basic am9lOnMza3JpdC1wYXNzd29yZA==" } + esql.query: + body: + query: 'FROM *:esql*,esql_* [METADATA _index] | sort cost | KEEP _index, tag, cost | LIMIT 10' + filter: + range: + since: + gte: "2023-01-02" + lte: "2023-01-03" + format: "yyyy-MM-dd" + + - match: {columns.0.name: "_index"} + - match: {columns.0.type: "keyword"} + - match: {columns.1.name: "tag"} + - match: {columns.1.type: "keyword"} + - match: {columns.2.name: "cost" } + - match: {columns.2.type: "long" } + + - match: {values.0.0: "esql_local"} + - match: {values.0.1: "monitor"} + - match: {values.0.2: 250 } + - match: {values.1.0: "my_remote_cluster:esql_index" } + - match: {values.1.1: "tablet"} + - match: {values.1.2: 450 } + - match: {values.2.0: "my_remote_cluster:esql_index" } + - match: {values.2.1: "computer" } + - match: {values.2.2: 1200 } + - match: {values.3.0: "esql_local"} + - match: {values.3.1: "laptop" } + - match: {values.3.2: 2100 } + + - do: + indices.delete: + index: esql_local diff --git a/x-pack/qa/openldap-tests/build.gradle b/x-pack/qa/openldap-tests/build.gradle index 78a03c556bc11..02b2abad3726f 100644 --- a/x-pack/qa/openldap-tests/build.gradle +++ b/x-pack/qa/openldap-tests/build.gradle @@ -5,11 +5,8 @@ dependencies { testImplementation(testArtifact(project(xpackModule('core')))) testImplementation project(":x-pack:test:idp-fixture") testImplementation "junit:junit:${versions.junit}" - testImplementation "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" - testImplementation "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" } - tasks.named('test') { // test suite uses jks which is not supported in fips mode systemProperty 'tests.security.manager', 'false' diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/ILMHistoryManagedTemplateUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/ILMHistoryManagedTemplateUpgradeIT.java new file mode 100644 index 0000000000000..aa177474b81e8 --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/ILMHistoryManagedTemplateUpgradeIT.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.upgrades; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.test.rest.ObjectPath; + +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.is; + +public class ILMHistoryManagedTemplateUpgradeIT extends AbstractUpgradeTestCase { + + @SuppressWarnings("unchecked") + public void testEnsureHistoryManagedTemplateIsInstalledOnUpgradedVersion() throws Exception { + if (CLUSTER_TYPE.equals(ClusterType.UPGRADED)) { + assertBusy(() -> { + Request request = new Request("GET", "/_index_template/ilm-history-7"); + try { + Response response = client().performRequest(request); + Map responseMap = entityAsMap(response); + assertNotNull(responseMap); + + List> indexTemplates = (List>) responseMap.get("index_templates"); + assertThat(indexTemplates.size(), is(1)); + assertThat(ObjectPath.evaluate(indexTemplates.get(0), "name"), is("ilm-history-7")); + assertThat(ObjectPath.evaluate(indexTemplates.get(0), "index_template.index_patterns"), is(List.of("ilm-history-7*"))); + } catch (ResponseException e) { + // Not found is fine + assertThat( + "Unexpected failure getting templates: " + e.getResponse().getStatusLine(), + e.getResponse().getStatusLine().getStatusCode(), + is(404) + ); + } + }, 30, TimeUnit.SECONDS); + } + } +} diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SLMHistoryManagedTemplateUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SLMHistoryManagedTemplateUpgradeIT.java new file mode 100644 index 0000000000000..fed42c35cf5ce --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/SLMHistoryManagedTemplateUpgradeIT.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ +package org.elasticsearch.upgrades; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.test.rest.ObjectPath; + +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.is; + +public class SLMHistoryManagedTemplateUpgradeIT extends AbstractUpgradeTestCase { + + @SuppressWarnings("unchecked") + public void testEnsureHistoryManagedTemplateIsInstalledOnUpgradedVersion() throws Exception { + if (CLUSTER_TYPE.equals(ClusterType.UPGRADED)) { + assertBusy(() -> { + Request request = new Request("GET", "/_index_template/.slm-history-7"); + try { + Response response = client().performRequest(request); + Map responseMap = entityAsMap(response); + assertNotNull(responseMap); + + List> indexTemplates = (List>) responseMap.get("index_templates"); + assertThat(indexTemplates.size(), is(1)); + assertThat(ObjectPath.evaluate(indexTemplates.get(0), "name"), is(".slm-history-7")); + assertThat(ObjectPath.evaluate(indexTemplates.get(0), "index_template.index_patterns"), is(List.of(".slm-history-7*"))); + } catch (ResponseException e) { + // Not found is fine + assertThat( + "Unexpected failure getting templates: " + e.getResponse().getStatusLine(), + e.getResponse().getStatusLine().getStatusCode(), + is(404) + ); + } + }, 30, TimeUnit.SECONDS); + } + } +} diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java index 8b2fe0d1e2af1..dddba9b7b0fba 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java @@ -8,7 +8,6 @@ import org.apache.http.HttpHeaders; import org.apache.http.HttpHost; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; @@ -17,6 +16,7 @@ import org.elasticsearch.client.WarningsHandler; import org.elasticsearch.core.Strings; import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.test.rest.ObjectPath; import org.junit.After; import org.junit.Before; @@ -440,17 +440,22 @@ private List getAllTokenIds() throws IOException { }"""); final Response searchResponse = client().performRequest(searchRequest); assertOK(searchResponse); - final SearchHits searchHits = SearchResponse.fromXContent(responseAsParser(searchResponse)).getHits(); - assertThat( - "Search request used with size parameter that was too small to fetch all tokens.", - searchHits.getTotalHits().value, - lessThanOrEqualTo(searchSize) - ); - final List tokenIds = Arrays.stream(searchHits.getHits()).map(searchHit -> { - assertNotNull(searchHit.getId()); - return searchHit.getId(); - }).toList(); - assertThat(tokenIds, not(empty())); - return tokenIds; + var response = SearchResponseUtils.responseAsSearchResponse(searchResponse); + try { + final SearchHits searchHits = response.getHits(); + assertThat( + "Search request used with size parameter that was too small to fetch all tokens.", + searchHits.getTotalHits().value, + lessThanOrEqualTo(searchSize) + ); + final List tokenIds = Arrays.stream(searchHits.getHits()).map(searchHit -> { + assertNotNull(searchHit.getId()); + return searchHit.getId(); + }).toList(); + assertThat(tokenIds, not(empty())); + return tokenIds; + } finally { + response.decRef(); + } } } diff --git a/x-pack/qa/saml-idp-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticationIT.java b/x-pack/qa/saml-idp-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticationIT.java index c8b3b3fc3aed2..5718930f37c82 100644 --- a/x-pack/qa/saml-idp-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticationIT.java +++ b/x-pack/qa/saml-idp-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticationIT.java @@ -28,6 +28,7 @@ import org.apache.http.protocol.HttpContext; import org.apache.http.protocol.HttpCoreContext; import org.apache.http.util.EntityUtils; +import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; @@ -91,6 +92,7 @@ /** * An integration test for validating SAML authentication against a real Identity Provider (Shibboleth) */ +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103717") @ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) public class SamlAuthenticationIT extends ESRestTestCase { diff --git a/x-pack/qa/third-party/active-directory/build.gradle b/x-pack/qa/third-party/active-directory/build.gradle index f5c4e6d63d37c..5156d20dd1d12 100644 --- a/x-pack/qa/third-party/active-directory/build.gradle +++ b/x-pack/qa/third-party/active-directory/build.gradle @@ -1,12 +1,15 @@ apply plugin: 'elasticsearch.standalone-test' -apply plugin: 'elasticsearch.test.fixtures' +configurations.all { + exclude group: 'org.slf4j', module: 'slf4j-nop' +} dependencies { + testImplementation project(':test:framework') testImplementation project(xpackModule('core')) testImplementation project(xpackModule('security')) - testImplementation(testArtifact(project(xpackModule('security'))))} - -testFixtures.useFixture ":x-pack:test:smb-fixture" + testImplementation(testArtifact(project(xpackModule('security')))) + testImplementation project(":x-pack:test:smb-fixture") +} // add test resources from security, so tests can use example certs tasks.named("processTestResources").configure { @@ -23,6 +26,7 @@ tasks.named("forbiddenPatterns").configure { } tasks.named("test").configure { + systemProperty 'tests.security.manager', 'false' include '**/*IT.class' include '**/*Tests.class' } diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ADLdapUserSearchSessionFactoryTests.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ADLdapUserSearchSessionFactoryTests.java index 26e0121b92a7d..d2443720de5ce 100644 --- a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ADLdapUserSearchSessionFactoryTests.java +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ADLdapUserSearchSessionFactoryTests.java @@ -63,7 +63,7 @@ public void testUserSearchWithActiveDirectory() throws Exception { String groupSearchBase = "DC=ad,DC=test,DC=elasticsearch,DC=com"; String userSearchBase = "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com"; Settings settings = Settings.builder() - .put("url", ActiveDirectorySessionFactoryTests.AD_LDAP_URL) + .put("url", smbFixture.getAdLdapUrl()) .put("group_search.base_dn", groupSearchBase) .put("user_search.base_dn", userSearchBase) .put("bind_dn", "ironman@ad.test.elasticsearch.com") diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractActiveDirectoryTestCase.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractActiveDirectoryTestCase.java index 9ab6b5a309393..ff68d879d8a8f 100644 --- a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractActiveDirectoryTestCase.java +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractActiveDirectoryTestCase.java @@ -6,6 +6,7 @@ */ package org.elasticsearch.xpack.security.authc.ldap; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; import com.unboundid.ldap.sdk.LDAPConnection; import com.unboundid.ldap.sdk.LDAPConnectionPool; import com.unboundid.ldap.sdk.LDAPException; @@ -18,6 +19,8 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.fixtures.smb.SmbTestContainer; +import org.elasticsearch.test.fixtures.testcontainers.TestContainersThreadFilter; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.ldap.ActiveDirectorySessionFactorySettings; import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; @@ -25,6 +28,7 @@ import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; import org.elasticsearch.xpack.core.ssl.SSLService; import org.junit.Before; +import org.junit.ClassRule; import java.io.IOException; import java.nio.file.FileVisitResult; @@ -39,8 +43,11 @@ import static org.elasticsearch.xpack.core.security.authc.RealmSettings.getFullSettingKey; +@ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) public abstract class AbstractActiveDirectoryTestCase extends ESTestCase { + @ClassRule + public static final SmbTestContainer smbFixture = new SmbTestContainer(); // follow referrals defaults to false here which differs from the default value of the setting // this is needed to prevent test logs being filled by errors as the default configuration of // the tests run against a vagrant samba4 instance configured as a domain controller with the @@ -48,14 +55,7 @@ public abstract class AbstractActiveDirectoryTestCase extends ESTestCase { // as we cannot control the URL of the referral which may contain a non-resolvable DNS name as // this name would be served by the samba4 instance public static final Boolean FOLLOW_REFERRALS = Booleans.parseBoolean(getFromEnv("TESTS_AD_FOLLOW_REFERRALS", "false")); - public static final String AD_LDAP_URL = getFromEnv("TESTS_AD_LDAP_URL", "ldaps://localhost:" + getFromProperty("636")); - public static final String AD_LDAP_GC_URL = getFromEnv("TESTS_AD_LDAP_GC_URL", "ldaps://localhost:" + getFromProperty("3269")); - public static final String PASSWORD = getFromEnv("TESTS_AD_USER_PASSWORD", "Passw0rd"); - public static final String AD_LDAP_PORT = getFromEnv("TESTS_AD_LDAP_PORT", getFromProperty("389")); - - public static final String AD_LDAPS_PORT = getFromEnv("TESTS_AD_LDAPS_PORT", getFromProperty("636")); - public static final String AD_GC_LDAP_PORT = getFromEnv("TESTS_AD_GC_LDAP_PORT", getFromProperty("3268")); - public static final String AD_GC_LDAPS_PORT = getFromEnv("TESTS_AD_GC_LDAPS_PORT", getFromProperty("3269")); + public static final String PASSWORD = "Passw0rd"; public static final String AD_DOMAIN = "ad.test.elasticsearch.com"; protected SSLService sslService; @@ -108,10 +108,6 @@ Settings buildAdSettings( .put(getFullSettingKey(realmId, ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING), adDomainName) .put(getFullSettingKey(realmName, ActiveDirectorySessionFactorySettings.AD_USER_SEARCH_BASEDN_SETTING), userSearchDN) .put(getFullSettingKey(realmName, ActiveDirectorySessionFactorySettings.AD_USER_SEARCH_SCOPE_SETTING), scope) - .put(getFullSettingKey(realmName, ActiveDirectorySessionFactorySettings.AD_LDAP_PORT_SETTING), AD_LDAP_PORT) - .put(getFullSettingKey(realmName, ActiveDirectorySessionFactorySettings.AD_LDAPS_PORT_SETTING), AD_LDAPS_PORT) - .put(getFullSettingKey(realmName, ActiveDirectorySessionFactorySettings.AD_GC_LDAP_PORT_SETTING), AD_GC_LDAP_PORT) - .put(getFullSettingKey(realmName, ActiveDirectorySessionFactorySettings.AD_GC_LDAPS_PORT_SETTING), AD_GC_LDAPS_PORT) .put(getFullSettingKey(realmId, SessionFactorySettings.FOLLOW_REFERRALS_SETTING), FOLLOW_REFERRALS) .putList(getFullSettingKey(realmId, SSLConfigurationSettings.CAPATH_SETTING_REALM), certificatePaths); if (randomBoolean()) { @@ -153,11 +149,4 @@ private static String getFromEnv(String envVar, String defaultValue) { final String value = System.getenv(envVar); return value == null ? defaultValue : value; } - - private static String getFromProperty(String port) { - String key = "test.fixtures.smb-fixture.tcp." + port; - final String value = System.getProperty(key); - assertNotNull("Expected the actual value for port " + port + " to be in system property " + key, value); - return value; - } } diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractAdLdapRealmTestCase.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractAdLdapRealmTestCase.java index 1af08ffd5fafe..3d9e7f3828bc7 100644 --- a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractAdLdapRealmTestCase.java +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/AbstractAdLdapRealmTestCase.java @@ -6,6 +6,8 @@ */ package org.elasticsearch.xpack.security.authc.ldap; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; + import org.apache.logging.log4j.LogManager; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchSecurityException; @@ -21,18 +23,20 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.test.fixtures.smb.SmbTestContainer; +import org.elasticsearch.test.fixtures.testcontainers.TestContainersThreadFilter; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingRequestBuilder; import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingResponse; import org.elasticsearch.xpack.core.security.action.user.AuthenticateAction; import org.elasticsearch.xpack.core.security.action.user.AuthenticateRequest; import org.elasticsearch.xpack.core.security.action.user.AuthenticateResponse; -import org.elasticsearch.xpack.core.security.authc.ldap.ActiveDirectorySessionFactorySettings; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; +import org.junit.ClassRule; import java.io.IOException; import java.nio.file.Path; @@ -47,14 +51,9 @@ import java.util.stream.Collectors; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.xpack.core.security.authc.RealmSettings.getFullSettingKey; import static org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope.ONE_LEVEL; import static org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope.SUB_TREE; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.BASIC_AUTH_HEADER; -import static org.elasticsearch.xpack.security.authc.ldap.AbstractActiveDirectoryTestCase.AD_GC_LDAPS_PORT; -import static org.elasticsearch.xpack.security.authc.ldap.AbstractActiveDirectoryTestCase.AD_GC_LDAP_PORT; -import static org.elasticsearch.xpack.security.authc.ldap.AbstractActiveDirectoryTestCase.AD_LDAPS_PORT; -import static org.elasticsearch.xpack.security.authc.ldap.AbstractActiveDirectoryTestCase.AD_LDAP_PORT; import static org.elasticsearch.xpack.security.test.SecurityTestUtils.writeFile; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -63,6 +62,7 @@ * This test assumes all subclass tests will be of type SUITE. It picks a random realm configuration for the tests, and * writes a group to role mapping file for each node. */ +@ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) public abstract class AbstractAdLdapRealmTestCase extends SecurityIntegTestCase { public static final String XPACK_SECURITY_AUTHC_REALMS_AD_EXTERNAL = "xpack.security.authc.realms.active_directory.external"; @@ -72,6 +72,9 @@ public abstract class AbstractAdLdapRealmTestCase extends SecurityIntegTestCase public static final String PHILANTHROPISTS_INDEX = "philanthropists"; public static final String SECURITY_INDEX = "security"; + @ClassRule + public static final SmbTestContainer smbFixture = new SmbTestContainer(); + private static final RoleMappingEntry[] AD_ROLE_MAPPING = new RoleMappingEntry[] { new RoleMappingEntry("SHIELD: [ \"CN=SHIELD,CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com\" ]", """ { @@ -359,12 +362,8 @@ enum RealmConfig { .put(XPACK_SECURITY_AUTHC_REALMS_AD_EXTERNAL + ".domain_name", ActiveDirectorySessionFactoryTests.AD_DOMAIN) .put(XPACK_SECURITY_AUTHC_REALMS_AD_EXTERNAL + ".group_search.base_dn", "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com") .put(XPACK_SECURITY_AUTHC_REALMS_AD_EXTERNAL + ".group_search.scope", randomBoolean() ? SUB_TREE : ONE_LEVEL) - .put(XPACK_SECURITY_AUTHC_REALMS_AD_EXTERNAL + ".url", ActiveDirectorySessionFactoryTests.AD_LDAP_URL) + .put(XPACK_SECURITY_AUTHC_REALMS_AD_EXTERNAL + ".url", smbFixture.getAdLdapUrl()) .put(XPACK_SECURITY_AUTHC_REALMS_AD_EXTERNAL + ".follow_referrals", ActiveDirectorySessionFactoryTests.FOLLOW_REFERRALS) - .put(getFullSettingKey("external", ActiveDirectorySessionFactorySettings.AD_LDAP_PORT_SETTING), AD_LDAP_PORT) - .put(getFullSettingKey("external", ActiveDirectorySessionFactorySettings.AD_LDAPS_PORT_SETTING), AD_LDAPS_PORT) - .put(getFullSettingKey("external", ActiveDirectorySessionFactorySettings.AD_GC_LDAP_PORT_SETTING), AD_GC_LDAP_PORT) - .put(getFullSettingKey("external", ActiveDirectorySessionFactorySettings.AD_GC_LDAPS_PORT_SETTING), AD_GC_LDAPS_PORT) .build(), "active_directory" ), @@ -373,7 +372,7 @@ enum RealmConfig { true, AD_ROLE_MAPPING, Settings.builder() - .put(XPACK_SECURITY_AUTHC_REALMS_LDAP_EXTERNAL + ".url", ActiveDirectorySessionFactoryTests.AD_LDAP_URL) + .put(XPACK_SECURITY_AUTHC_REALMS_LDAP_EXTERNAL + ".url", smbFixture.getAdLdapUrl()) .put(XPACK_SECURITY_AUTHC_REALMS_LDAP_EXTERNAL + ".group_search.base_dn", "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com") .put(XPACK_SECURITY_AUTHC_REALMS_LDAP_EXTERNAL + ".group_search.scope", randomBoolean() ? SUB_TREE : ONE_LEVEL) .putList( @@ -389,7 +388,7 @@ enum RealmConfig { true, AD_ROLE_MAPPING, Settings.builder() - .put(XPACK_SECURITY_AUTHC_REALMS_LDAP_EXTERNAL + ".url", ActiveDirectorySessionFactoryTests.AD_LDAP_URL) + .put(XPACK_SECURITY_AUTHC_REALMS_LDAP_EXTERNAL + ".url", smbFixture.getAdLdapUrl()) .putList( XPACK_SECURITY_AUTHC_REALMS_LDAP_EXTERNAL + ".user_dn_templates", "cn={0},CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com" diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryGroupsResolverTests.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryGroupsResolverTests.java index d8f82c6419501..231bf47e3e712 100644 --- a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryGroupsResolverTests.java +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectoryGroupsResolverTests.java @@ -6,15 +6,19 @@ */ package org.elasticsearch.xpack.security.authc.ldap; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; import com.unboundid.ldap.sdk.Filter; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.test.fixtures.smb.SmbTestContainer; +import org.elasticsearch.test.fixtures.testcontainers.TestContainersThreadFilter; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; import org.elasticsearch.xpack.core.security.support.NoOpLogger; import org.junit.Before; +import org.junit.ClassRule; import java.util.List; import java.util.regex.Pattern; @@ -24,12 +28,16 @@ import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.is; +@ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) public class ActiveDirectoryGroupsResolverTests extends GroupsResolverTestCase { private static final String BRUCE_BANNER_DN = "cn=Bruce Banner,CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com"; private static final RealmConfig.RealmIdentifier REALM_ID = new RealmConfig.RealmIdentifier("active_directory", "ad"); + @ClassRule + public static final SmbTestContainer smbFixture = new SmbTestContainer(); + @Before public void setReferralFollowing() { ldapConnection.getConnectionOptions().setFollowReferrals(AbstractActiveDirectoryTestCase.FOLLOW_REFERRALS); @@ -145,7 +153,7 @@ private void assertValidSidQuery(Filter query, String[] expectedSids) { @Override protected String ldapUrl() { - return ActiveDirectorySessionFactoryTests.AD_LDAP_URL; + return smbFixture.getAdLdapUrl(); } @Override diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactoryTests.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactoryTests.java index 120a27c944bd8..28637560d9d53 100644 --- a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactoryTests.java +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/ActiveDirectorySessionFactoryTests.java @@ -75,7 +75,11 @@ public boolean enableWarningsCheck() { } public void testAdAuth() throws Exception { - RealmConfig config = configureRealm("ad-test", LdapRealmSettings.AD_TYPE, buildAdSettings(AD_LDAP_URL, AD_DOMAIN, false)); + RealmConfig config = configureRealm( + "ad-test", + LdapRealmSettings.AD_TYPE, + buildAdSettings(smbFixture.getAdLdapUrl(), AD_DOMAIN, false) + ); try (ActiveDirectorySessionFactory sessionFactory = getActiveDirectorySessionFactory(config, sslService, threadPool)) { String userName = "ironman"; @@ -115,7 +119,7 @@ private RealmConfig configureRealm(String name, String type, Settings settings) } public void testNetbiosAuth() throws Exception { - final String adUrl = randomFrom(AD_LDAP_URL, AD_LDAP_GC_URL); + final String adUrl = randomFrom(smbFixture.getAdLdapUrl(), smbFixture.getAdLdapGcUrl()); RealmConfig config = configureRealm("ad-test", LdapRealmSettings.AD_TYPE, buildAdSettings(adUrl, AD_DOMAIN, false)); try (ActiveDirectorySessionFactory sessionFactory = getActiveDirectorySessionFactory(config, sslService, threadPool)) { @@ -142,7 +146,11 @@ public void testNetbiosAuth() throws Exception { } public void testAdAuthAvengers() throws Exception { - RealmConfig config = configureRealm("ad-test", LdapRealmSettings.AD_TYPE, buildAdSettings(AD_LDAP_URL, AD_DOMAIN, false)); + RealmConfig config = configureRealm( + "ad-test", + LdapRealmSettings.AD_TYPE, + buildAdSettings(smbFixture.getAdLdapUrl(), AD_DOMAIN, false) + ); try (ActiveDirectorySessionFactory sessionFactory = getActiveDirectorySessionFactory(config, sslService, threadPool)) { String[] users = new String[] { "cap", "hawkeye", "hulk", "ironman", "thor", "blackwidow" }; @@ -158,7 +166,7 @@ public void testAdAuthAvengers() throws Exception { public void testAuthenticate() throws Exception { Settings settings = buildAdSettings( REALM_ID, - AD_LDAP_URL, + smbFixture.getAdLdapUrl(), AD_DOMAIN, "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", LdapSearchScope.ONE_LEVEL, @@ -191,7 +199,7 @@ public void testAuthenticate() throws Exception { public void testAuthenticateBaseUserSearch() throws Exception { Settings settings = buildAdSettings( REALM_ID, - AD_LDAP_URL, + smbFixture.getAdLdapUrl(), AD_DOMAIN, "CN=Bruce Banner, CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", LdapSearchScope.BASE, @@ -226,7 +234,7 @@ public void testAuthenticateBaseGroupSearch() throws Exception { .put( buildAdSettings( REALM_ID, - AD_LDAP_URL, + smbFixture.getAdLdapUrl(), AD_DOMAIN, "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", LdapSearchScope.ONE_LEVEL, @@ -255,7 +263,7 @@ public void testAuthenticateBaseGroupSearch() throws Exception { public void testAuthenticateWithUserPrincipalName() throws Exception { Settings settings = buildAdSettings( REALM_ID, - AD_LDAP_URL, + smbFixture.getAdLdapUrl(), AD_DOMAIN, "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", LdapSearchScope.ONE_LEVEL, @@ -281,7 +289,7 @@ public void testAuthenticateWithUserPrincipalName() throws Exception { public void testAuthenticateWithSAMAccountName() throws Exception { Settings settings = buildAdSettings( REALM_ID, - AD_LDAP_URL, + smbFixture.getAdLdapUrl(), AD_DOMAIN, "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", LdapSearchScope.ONE_LEVEL, @@ -310,7 +318,7 @@ public void testCustomUserFilter() throws Exception { .put( buildAdSettings( REALM_ID, - AD_LDAP_URL, + smbFixture.getAdLdapUrl(), AD_DOMAIN, "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", LdapSearchScope.SUB_TREE, @@ -349,7 +357,7 @@ public void testStandardLdapConnection() throws Exception { .put( LdapTestCase.buildLdapSettings( realmId, - new String[] { AD_LDAP_URL }, + new String[] { smbFixture.getAdLdapUrl() }, new String[] { userTemplate }, groupSearchBase, LdapSearchScope.SUB_TREE, @@ -389,7 +397,7 @@ public void testHandlingLdapReferralErrors() throws Exception { .put( LdapTestCase.buildLdapSettings( realmId, - new String[] { AD_LDAP_URL }, + new String[] { smbFixture.getAdLdapUrl() }, new String[] { userTemplate }, groupSearchBase, LdapSearchScope.SUB_TREE, @@ -423,7 +431,7 @@ public void testStandardLdapWithAttributeGroups() throws Exception { .put( LdapTestCase.buildLdapSettings( realmId, - new String[] { AD_LDAP_URL }, + new String[] { smbFixture.getAdLdapUrl() }, new String[] { userTemplate }, groupSearchBase, LdapSearchScope.SUB_TREE, @@ -456,7 +464,11 @@ public void testStandardLdapWithAttributeGroups() throws Exception { } public void testADLookup() throws Exception { - RealmConfig config = configureRealm("ad-test", LdapRealmSettings.AD_TYPE, buildAdSettings(AD_LDAP_URL, AD_DOMAIN, false, true)); + RealmConfig config = configureRealm( + "ad-test", + LdapRealmSettings.AD_TYPE, + buildAdSettings(smbFixture.getAdLdapUrl(), AD_DOMAIN, false, true) + ); try (ActiveDirectorySessionFactory sessionFactory = getActiveDirectorySessionFactory(config, sslService, threadPool)) { List users = randomSubsetOf( @@ -499,7 +511,7 @@ public void testResolveTokenGroupsSID() throws Exception { .put( buildAdSettings( REALM_ID, - AD_LDAP_URL, + smbFixture.getAdLdapUrl(), AD_DOMAIN, "CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com", LdapSearchScope.SUB_TREE, @@ -536,10 +548,6 @@ private Settings buildAdSettings(String ldapUrl, String adDomainName, boolean ho Settings.Builder builder = Settings.builder() .put(getFullSettingKey(REALM_ID, SessionFactorySettings.URLS_SETTING), ldapUrl) .put(getFullSettingKey(REALM_ID, ActiveDirectorySessionFactorySettings.AD_DOMAIN_NAME_SETTING), adDomainName) - .put(getFullSettingKey(REALM_NAME, ActiveDirectorySessionFactorySettings.AD_LDAP_PORT_SETTING), AD_LDAP_PORT) - .put(getFullSettingKey(REALM_NAME, ActiveDirectorySessionFactorySettings.AD_LDAPS_PORT_SETTING), AD_LDAPS_PORT) - .put(getFullSettingKey(REALM_NAME, ActiveDirectorySessionFactorySettings.AD_GC_LDAP_PORT_SETTING), AD_GC_LDAP_PORT) - .put(getFullSettingKey(REALM_NAME, ActiveDirectorySessionFactorySettings.AD_GC_LDAPS_PORT_SETTING), AD_GC_LDAPS_PORT) .put(getFullSettingKey(REALM_ID, SessionFactorySettings.FOLLOW_REFERRALS_SETTING), FOLLOW_REFERRALS); if (randomBoolean()) { builder.put( diff --git a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/UserAttributeGroupsResolverTests.java b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/UserAttributeGroupsResolverTests.java index 5a8350739ef6b..256d710b3dfe2 100644 --- a/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/UserAttributeGroupsResolverTests.java +++ b/x-pack/qa/third-party/active-directory/src/test/java/org/elasticsearch/xpack/security/authc/ldap/UserAttributeGroupsResolverTests.java @@ -6,16 +6,20 @@ */ package org.elasticsearch.xpack.security.authc.ldap; +import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; import com.unboundid.ldap.sdk.Attribute; import com.unboundid.ldap.sdk.SearchRequest; import com.unboundid.ldap.sdk.SearchScope; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.test.fixtures.smb.SmbTestContainer; +import org.elasticsearch.test.fixtures.testcontainers.TestContainersThreadFilter; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.ldap.UserAttributeGroupsResolverSettings; import org.elasticsearch.xpack.core.security.support.NoOpLogger; import org.elasticsearch.xpack.security.authc.ldap.support.LdapUtils; +import org.junit.ClassRule; import java.util.Collection; import java.util.List; @@ -26,11 +30,15 @@ import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.hasSize; +@ThreadLeakFilters(filters = { TestContainersThreadFilter.class }) public class UserAttributeGroupsResolverTests extends GroupsResolverTestCase { public static final String BRUCE_BANNER_DN = "cn=Bruce Banner,CN=Users,DC=ad,DC=test,DC=elasticsearch,DC=com"; private static final RealmConfig.RealmIdentifier REALM_ID = new RealmConfig.RealmIdentifier("ldap", "realm1"); + @ClassRule + public static final SmbTestContainer smbFixture = new SmbTestContainer(); + public void testResolve() throws Exception { // falling back on the 'memberOf' attribute UserAttributeGroupsResolver resolver = new UserAttributeGroupsResolver(config(REALM_ID, Settings.EMPTY)); @@ -112,7 +120,7 @@ public void testResolveInvalidGroupAttribute() throws Exception { @Override protected String ldapUrl() { - return ActiveDirectorySessionFactoryTests.AD_LDAP_URL; + return smbFixture.getAdLdapUrl(); } @Override diff --git a/x-pack/test/idp-fixture/build.gradle b/x-pack/test/idp-fixture/build.gradle index 407fb520fcae1..691483bcfe5c3 100644 --- a/x-pack/test/idp-fixture/build.gradle +++ b/x-pack/test/idp-fixture/build.gradle @@ -1,27 +1,9 @@ apply plugin: 'elasticsearch.java' apply plugin: 'elasticsearch.cache-test-fixtures' -configurations.all { - transitive = false -} - dependencies { testImplementation project(':test:framework') api project(':test:fixtures:testcontainer-utils') api "junit:junit:${versions.junit}" - api "org.testcontainers:testcontainers:${versions.testcontainer}" - implementation "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" - implementation "org.slf4j:slf4j-api:${versions.slf4j}" - implementation "com.github.docker-java:docker-java-api:${versions.dockerJava}" - - runtimeOnly "com.github.docker-java:docker-java-transport-zerodep:${versions.dockerJava}" - runtimeOnly "com.github.docker-java:docker-java-transport:${versions.dockerJava}" - runtimeOnly "com.github.docker-java:docker-java-core:${versions.dockerJava}" - runtimeOnly "org.apache.commons:commons-compress:${versions.commonsCompress}" - runtimeOnly "org.rnorth.duct-tape:duct-tape:${versions.ductTape}" - - // ensure we have proper logging during when used in tests - runtimeOnly "org.slf4j:slf4j-simple:${versions.slf4j}" - runtimeOnly "org.hamcrest:hamcrest:${versions.hamcrest}" } diff --git a/x-pack/test/idp-fixture/src/main/java/org/elasticsearch/test/fixtures/idp/HttpProxyTestContainer.java b/x-pack/test/idp-fixture/src/main/java/org/elasticsearch/test/fixtures/idp/HttpProxyTestContainer.java index e517c2a9fe2c3..4f7d3528f85d4 100644 --- a/x-pack/test/idp-fixture/src/main/java/org/elasticsearch/test/fixtures/idp/HttpProxyTestContainer.java +++ b/x-pack/test/idp-fixture/src/main/java/org/elasticsearch/test/fixtures/idp/HttpProxyTestContainer.java @@ -31,7 +31,6 @@ public HttpProxyTestContainer(Network network) { ); addExposedPort(PORT); withNetwork(network); - } public Integer getProxyPort() { diff --git a/x-pack/test/idp-fixture/src/main/java/org/elasticsearch/test/fixtures/idp/IdpTestContainer.java b/x-pack/test/idp-fixture/src/main/java/org/elasticsearch/test/fixtures/idp/IdpTestContainer.java index ed19dc997fd8e..692cd4b081411 100644 --- a/x-pack/test/idp-fixture/src/main/java/org/elasticsearch/test/fixtures/idp/IdpTestContainer.java +++ b/x-pack/test/idp-fixture/src/main/java/org/elasticsearch/test/fixtures/idp/IdpTestContainer.java @@ -10,7 +10,9 @@ import org.elasticsearch.test.fixtures.testcontainers.DockerEnvironmentAwareTestContainer; import org.junit.rules.TemporaryFolder; import org.testcontainers.containers.Network; +import org.testcontainers.containers.wait.strategy.Wait; import org.testcontainers.images.builder.ImageFromDockerfile; +import org.testcontainers.images.builder.dockerfile.statement.SingleArgumentStatement; import java.io.IOException; import java.nio.file.Path; @@ -125,19 +127,24 @@ public IdpTestContainer(Network network) { .run("chmod +x /opt/jetty-home/bin/jetty.sh") // Opening 4443 (browser TLS), 8443 (mutual auth TLS) .cmd("run-jetty.sh") + .withStatement( + new SingleArgumentStatement( + "HEALTHCHECK", + "CMD curl -f -s --http0.9 http://localhost:4443 " + "--connect-timeout 10 --max-time 10 --output - > /dev/null" + ) + ) // .expose(4443) .build() - ) .withFileFromClasspath("idp/jetty-custom/ssl.mod", "/idp/jetty-custom/ssl.mod") .withFileFromClasspath("idp/jetty-custom/keystore", "/idp/jetty-custom/keystore") .withFileFromClasspath("idp/shib-jetty-base/", "/idp/shib-jetty-base/") .withFileFromClasspath("idp/shibboleth-idp/", "/idp/shibboleth-idp/") .withFileFromClasspath("idp/bin/", "/idp/bin/") - ); withNetworkAliases("idp"); withNetwork(network); + waitingFor(Wait.forHealthcheck()); addExposedPorts(4443, 8443); } diff --git a/x-pack/test/idp-fixture/src/main/resources/idp/bin/run-jetty.sh b/x-pack/test/idp-fixture/src/main/resources/idp/bin/run-jetty.sh index 24ece94c2715d..0160cc613407d 100644 --- a/x-pack/test/idp-fixture/src/main/resources/idp/bin/run-jetty.sh +++ b/x-pack/test/idp-fixture/src/main/resources/idp/bin/run-jetty.sh @@ -20,7 +20,7 @@ exit_code=$? end_time=$(date +%s) duration=$((end_time - start_time)) -if [ $duration -lt 5 ]; then +if [ $duration -lt 10 ]; then /opt/jetty-home/bin/jetty.sh run exit_code=$? fi diff --git a/x-pack/test/smb-fixture/Dockerfile b/x-pack/test/smb-fixture/Dockerfile deleted file mode 100644 index bcd74758ff496..0000000000000 --- a/x-pack/test/smb-fixture/Dockerfile +++ /dev/null @@ -1,12 +0,0 @@ -FROM ubuntu:16.04 -RUN apt-get update -qqy && apt-get install -qqy samba ldap-utils -ADD . /fixture -RUN chmod +x /fixture/src/main/resources/provision/installsmb.sh -RUN /fixture/src/main/resources/provision/installsmb.sh - -EXPOSE 389 -EXPOSE 636 -EXPOSE 3268 -EXPOSE 3269 - -CMD service samba-ad-dc restart && sleep infinity diff --git a/x-pack/test/smb-fixture/build.gradle b/x-pack/test/smb-fixture/build.gradle index 8740d94f26357..aeb5626ce9508 100644 --- a/x-pack/test/smb-fixture/build.gradle +++ b/x-pack/test/smb-fixture/build.gradle @@ -1 +1,13 @@ -apply plugin: 'elasticsearch.test.fixtures' +apply plugin: 'elasticsearch.java' +apply plugin: 'elasticsearch.cache-test-fixtures' + +dependencies { + api project(':test:fixtures:testcontainer-utils') + api "junit:junit:${versions.junit}" + api "org.testcontainers:testcontainers:${versions.testcontainer}" + api "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" + + // ensure we have proper logging during when used in tests + runtimeOnly "org.slf4j:slf4j-simple:${versions.slf4j}" + runtimeOnly "org.hamcrest:hamcrest:${versions.hamcrest}" +} diff --git a/x-pack/test/smb-fixture/docker-compose.yml b/x-pack/test/smb-fixture/docker-compose.yml deleted file mode 100644 index 51a76fd42b435..0000000000000 --- a/x-pack/test/smb-fixture/docker-compose.yml +++ /dev/null @@ -1,11 +0,0 @@ -version: '3' -services: - smb-fixture: - build: - context: . - dockerfile: Dockerfile - ports: - - "389" - - "636" - - "3268" - - "3269" diff --git a/x-pack/test/smb-fixture/src/main/java/org/elasticsearch/test/fixtures/smb/SmbTestContainer.java b/x-pack/test/smb-fixture/src/main/java/org/elasticsearch/test/fixtures/smb/SmbTestContainer.java new file mode 100644 index 0000000000000..10f589e4e1df3 --- /dev/null +++ b/x-pack/test/smb-fixture/src/main/java/org/elasticsearch/test/fixtures/smb/SmbTestContainer.java @@ -0,0 +1,53 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.test.fixtures.smb; + +import org.elasticsearch.test.fixtures.testcontainers.DockerEnvironmentAwareTestContainer; +import org.testcontainers.images.builder.ImageFromDockerfile; + +public final class SmbTestContainer extends DockerEnvironmentAwareTestContainer { + + private static final String DOCKER_BASE_IMAGE = "ubuntu:16.04"; + public static final int AD_LDAP_PORT = 636; + public static final int AD_LDAP_GC_PORT = 3269; + + public SmbTestContainer() { + super( + new ImageFromDockerfile("es-smb-fixture").withDockerfileFromBuilder( + builder -> builder.from(DOCKER_BASE_IMAGE) + .run("apt-get update -qqy && apt-get install -qqy samba ldap-utils") + .copy("fixture/provision/installsmb.sh", "/fixture/provision/installsmb.sh") + .copy("fixture/certs/ca.key", "/fixture/certs/ca.key") + .copy("fixture/certs/ca.pem", "/fixture/certs/ca.pem") + .copy("fixture/certs/cert.pem", "/fixture/certs/cert.pem") + .copy("fixture/certs/key.pem", "/fixture/certs/key.pem") + .run("chmod +x /fixture/provision/installsmb.sh") + .run("/fixture/provision/installsmb.sh") + .cmd("service samba-ad-dc restart && sleep infinity") + .build() + ) + .withFileFromClasspath("fixture/provision/installsmb.sh", "/smb/provision/installsmb.sh") + .withFileFromClasspath("fixture/certs/ca.key", "/smb/certs/ca.key") + .withFileFromClasspath("fixture/certs/ca.pem", "/smb/certs/ca.pem") + .withFileFromClasspath("fixture/certs/cert.pem", "/smb/certs/cert.pem") + .withFileFromClasspath("fixture/certs/key.pem", "/smb/certs/key.pem") + ); + // addExposedPort(389); + // addExposedPort(3268); + addExposedPort(AD_LDAP_PORT); + addExposedPort(AD_LDAP_GC_PORT); + } + + public String getAdLdapUrl() { + return "ldaps://localhost:" + getMappedPort(AD_LDAP_PORT); + } + + public String getAdLdapGcUrl() { + return "ldaps://localhost:" + getMappedPort(AD_LDAP_GC_PORT); + } +} diff --git a/x-pack/test/smb-fixture/src/main/resources/certs/README.asciidoc b/x-pack/test/smb-fixture/src/main/resources/smb/certs/README.asciidoc similarity index 100% rename from x-pack/test/smb-fixture/src/main/resources/certs/README.asciidoc rename to x-pack/test/smb-fixture/src/main/resources/smb/certs/README.asciidoc diff --git a/x-pack/test/smb-fixture/src/main/resources/certs/ca.key b/x-pack/test/smb-fixture/src/main/resources/smb/certs/ca.key similarity index 100% rename from x-pack/test/smb-fixture/src/main/resources/certs/ca.key rename to x-pack/test/smb-fixture/src/main/resources/smb/certs/ca.key diff --git a/x-pack/test/smb-fixture/src/main/resources/certs/ca.pem b/x-pack/test/smb-fixture/src/main/resources/smb/certs/ca.pem similarity index 100% rename from x-pack/test/smb-fixture/src/main/resources/certs/ca.pem rename to x-pack/test/smb-fixture/src/main/resources/smb/certs/ca.pem diff --git a/x-pack/test/smb-fixture/src/main/resources/certs/cert.pem b/x-pack/test/smb-fixture/src/main/resources/smb/certs/cert.pem similarity index 100% rename from x-pack/test/smb-fixture/src/main/resources/certs/cert.pem rename to x-pack/test/smb-fixture/src/main/resources/smb/certs/cert.pem diff --git a/x-pack/test/smb-fixture/src/main/resources/certs/key.pem b/x-pack/test/smb-fixture/src/main/resources/smb/certs/key.pem similarity index 100% rename from x-pack/test/smb-fixture/src/main/resources/certs/key.pem rename to x-pack/test/smb-fixture/src/main/resources/smb/certs/key.pem diff --git a/x-pack/test/smb-fixture/src/main/resources/provision/installsmb.sh b/x-pack/test/smb-fixture/src/main/resources/smb/provision/installsmb.sh similarity index 97% rename from x-pack/test/smb-fixture/src/main/resources/provision/installsmb.sh rename to x-pack/test/smb-fixture/src/main/resources/smb/provision/installsmb.sh index 0bc86e96530bc..463238b9f50c2 100644 --- a/x-pack/test/smb-fixture/src/main/resources/provision/installsmb.sh +++ b/x-pack/test/smb-fixture/src/main/resources/smb/provision/installsmb.sh @@ -8,8 +8,7 @@ set -ex VDIR=/fixture -RESOURCES=$VDIR/src/main/resources -CERTS_DIR=$RESOURCES/certs +CERTS_DIR=$VDIR/certs SSL_DIR=/var/lib/samba/private/tls # install ssl certs