From 0602eddd3b3c7d5ee7625fd58f2270603c31c8ff Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Wed, 6 Dec 2023 15:28:36 +0100 Subject: [PATCH 01/45] Restore CacheFileRegion refcounting for writes (#102843) In #98241 we removed the refcounting around write handler in SharedBytes.IO. But recently we saw wrong bytes being read from the snapshot file under heavy evictions and investigation shows that the bytes belonged to another cached file. Low level logging (hard to reproduce) shows that writes and reads using the same SharedBytes.IO instance but for different cache file region could be interleaved, so that bytes in shared cache could be overwritten and the last read would read (and store in internal index input buffers) bytes from a different file: Thread[elasticsearch[node_t0][stateless_shard][T#4],5,TGRP-IndexCorruptionIT]: 10485760 bytes written using SharedBytes$IO@dc07632 (230716978) for FileCacheKey[shardId=[index-0][0], primaryTerm=1, fileName=stateless_commit_26] Thread[elasticsearch[node_t0][stateless_shard][T#3],5,TGRP-IndexCorruptionIT]: 10485760 bytes written using SharedBytes$IO@dc07632 (230716978) for FileCacheKey[shardId=[index-0][0], primaryTerm=1, fileName=stateless_commit_16] Thread[elasticsearch[node_t0][stateless_shard][T#4],5,TGRP-IndexCorruptionIT]: 375 bytes read using SharedBytes$IO@dc07632 (230716978) for key FileCacheKey[shardId=[index-0][0], primaryTerm=1, fileName=stateless_commit_26] This change fixes resfcounting around the write handler so that the IO instance is decref after bytes are fully written. Relates #98241 --- docs/changelog/102843.yaml | 5 +++ .../shared/SharedBlobCacheService.java | 32 +++++++++++-------- 2 files changed, 24 insertions(+), 13 deletions(-) create mode 100644 docs/changelog/102843.yaml diff --git a/docs/changelog/102843.yaml b/docs/changelog/102843.yaml new file mode 100644 index 0000000000000..7e561fa7cc582 --- /dev/null +++ b/docs/changelog/102843.yaml @@ -0,0 +1,5 @@ +pr: 102843 +summary: Restore `SharedBytes.IO` refcounting on reads & writes +area: Snapshot/Restore +type: bug +issues: [] diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java index 695e96850e8e1..9867c81808d24 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java @@ -708,25 +708,31 @@ void populateAndRead( } private void fillGaps(Executor executor, RangeMissingHandler writer, List gaps) { + final var cacheFileRegion = CacheFileRegion.this; for (SparseFileTracker.Gap gap : gaps) { executor.execute(new AbstractRunnable() { @Override protected void doRun() throws Exception { - assert CacheFileRegion.this.hasReferences(); ensureOpen(); - final int start = Math.toIntExact(gap.start()); - var ioRef = io; - assert regionOwners.get(ioRef) == CacheFileRegion.this; - writer.fillCacheRange( - ioRef, - start, - start, - Math.toIntExact(gap.end() - start), - progress -> gap.onProgress(start + progress) - ); - writeCount.increment(); - + if (cacheFileRegion.tryIncRef() == false) { + throw new AlreadyClosedException("File chunk [" + cacheFileRegion.regionKey + "] has been released"); + } + try { + final int start = Math.toIntExact(gap.start()); + var ioRef = io; + assert regionOwners.get(ioRef) == cacheFileRegion; + writer.fillCacheRange( + ioRef, + start, + start, + Math.toIntExact(gap.end() - start), + progress -> gap.onProgress(start + progress) + ); + writeCount.increment(); + } finally { + cacheFileRegion.decRef(); + } gap.onCompletion(); } From 9883f585acf4a109f506101e131e0c5f7460fe91 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lorenzo=20Dematt=C3=A9?= Date: Wed, 6 Dec 2023 15:32:09 +0100 Subject: [PATCH 02/45] Fix SLM detection in tests (#103053) --- .../org/elasticsearch/test/rest/RestTestLegacyFeatures.java | 5 ++++- .../elasticsearch/xpack/restart/FullClusterRestartIT.java | 4 ++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestLegacyFeatures.java b/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestLegacyFeatures.java index 1530809a064b1..aedd916c0a0f3 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestLegacyFeatures.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestLegacyFeatures.java @@ -59,6 +59,8 @@ public class RestTestLegacyFeatures implements FeatureSpecification { public static final NodeFeature ML_INDICES_HIDDEN = new NodeFeature("ml.indices_hidden"); @UpdateForV9 public static final NodeFeature ML_ANALYTICS_MAPPINGS = new NodeFeature("ml.analytics_mappings"); + @UpdateForV9 + public static final NodeFeature SLM_SUPPORTED = new NodeFeature("slm.supported"); @Override public Map getHistoricalFeatures() { @@ -78,7 +80,8 @@ public Map getHistoricalFeatures() { entry(TRANSFORM_NEW_API_ENDPOINT, Version.V_7_5_0), entry(DATA_STREAMS_DATE_IN_INDEX_NAME, Version.V_7_11_0), entry(ML_INDICES_HIDDEN, Version.V_7_7_0), - entry(ML_ANALYTICS_MAPPINGS, Version.V_7_3_0) + entry(ML_ANALYTICS_MAPPINGS, Version.V_7_3_0), + entry(SLM_SUPPORTED, Version.V_7_4_0) ); } } diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index 2ad66f071d784..bd422c0c578d8 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -594,7 +594,7 @@ public void testSlmPolicyAndStats() throws IOException { Collections.singletonMap("indices", Collections.singletonList("*")), null ); - if (isRunningAgainstOldCluster() && has(ProductFeature.SLM)) { + if (isRunningAgainstOldCluster() && clusterHasFeature(RestTestLegacyFeatures.SLM_SUPPORTED)) { Request createRepoRequest = new Request("PUT", "_snapshot/test-repo"); String repoCreateJson = "{" + " \"type\": \"fs\"," + " \"settings\": {" + " \"location\": \"test-repo\"" + " }" + "}"; createRepoRequest.setJsonEntity(repoCreateJson); @@ -608,7 +608,7 @@ public void testSlmPolicyAndStats() throws IOException { client().performRequest(createSlmPolicyRequest); } - if (isRunningAgainstOldCluster() == false && has(ProductFeature.SLM)) { + if (isRunningAgainstOldCluster() == false && clusterHasFeature(RestTestLegacyFeatures.SLM_SUPPORTED)) { Request getSlmPolicyRequest = new Request("GET", "_slm/policy/test-policy"); Response response = client().performRequest(getSlmPolicyRequest); Map responseMap = entityAsMap(response); From b03c4ab952c78dd925f96ff39895e2fbb07d1382 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 6 Dec 2023 15:44:41 +0100 Subject: [PATCH 03/45] Fix SearchResponseMerger ref counting (#103021) SearchResponseMerger holds references to SearchResponse instances. This commit makes it releasable to enable us to safely hold a reference to each of the SearchResponse that it will merge and safely release it in both merge and error cases. --- .../action/search/MultiSearchResponse.java | 7 +- .../action/search/SearchResponseMerger.java | 16 +- .../search/TransportMultiSearchAction.java | 3 +- .../action/search/TransportSearchAction.java | 22 +- .../search/SearchResponseMergerTests.java | 1451 +++++++++-------- .../search/TransportSearchActionTests.java | 65 +- .../geo/BasePointShapeQueryTestCase.java | 3 +- 7 files changed, 885 insertions(+), 682 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java b/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java index aee631fb5d4cf..3cc3370edfe54 100644 --- a/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/MultiSearchResponse.java @@ -61,7 +61,12 @@ public static class Item implements Writeable, ChunkedToXContent { private final SearchResponse response; private final Exception exception; - public Item(SearchResponse response, Exception exception) { + /** + * + * @param response search response that is considered owned by this instance after this constructor returns or {@code null} + * @param exception exception in case of search failure + */ + public Item(@Nullable SearchResponse response, @Nullable Exception exception) { this.response = response; this.exception = exception; } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java index 1faa57cbfcd60..b6143cfc51c3a 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponseMerger.java @@ -19,6 +19,7 @@ import org.elasticsearch.action.search.SearchResponse.Clusters; import org.elasticsearch.action.search.TransportSearchAction.SearchTimeProvider; import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; +import org.elasticsearch.core.Releasable; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.lucene.grouping.TopFieldGroups; import org.elasticsearch.search.SearchHit; @@ -31,6 +32,7 @@ import org.elasticsearch.search.profile.SearchProfileShardResult; import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.search.suggest.completion.CompletionSuggestion; +import org.elasticsearch.transport.LeakTracker; import java.util.ArrayList; import java.util.Arrays; @@ -64,7 +66,7 @@ // TODO it may make sense to integrate the remote clusters responses as a shard response in the initial search phase and ignore hits coming // from the remote clusters in the fetch phase. This would be identical to the removed QueryAndFetch strategy except that only the remote // cluster response would have the fetch results. -final class SearchResponseMerger { +final class SearchResponseMerger implements Releasable { final int from; final int size; final int trackTotalHitsUpTo; @@ -72,6 +74,12 @@ final class SearchResponseMerger { private final AggregationReduceContext.Builder aggReduceContextBuilder; private final List searchResponses = new CopyOnWriteArrayList<>(); + private final Releasable releasable = LeakTracker.wrap(() -> { + for (SearchResponse searchResponse : searchResponses) { + searchResponse.decRef(); + } + }); + SearchResponseMerger( int from, int size, @@ -93,6 +101,7 @@ final class SearchResponseMerger { */ void add(SearchResponse searchResponse) { assert searchResponse.getScrollId() == null : "merging scroll results is not supported"; + searchResponse.mustIncRef(); searchResponses.add(searchResponse); } @@ -383,6 +392,11 @@ private static SearchHits topDocsToSearchHits(TopDocs topDocs, TopDocsStats topD return new SearchHits(searchHits, topDocsStats.getTotalHits(), topDocsStats.getMaxScore(), sortFields, groupField, groupValues); } + @Override + public void close() { + releasable.close(); + } + private static final class FieldDocAndSearchHit extends FieldDoc { private final SearchHit searchHit; diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java index 1fc9bca607285..c81f3c3dc24c6 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportMultiSearchAction.java @@ -154,9 +154,10 @@ void executeSearch( * when we handle the response rather than going recursive, we fork to another thread, otherwise we recurse. */ final Thread thread = Thread.currentThread(); - client.search(request.request, new ActionListener() { + client.search(request.request, new ActionListener<>() { @Override public void onResponse(final SearchResponse searchResponse) { + searchResponse.mustIncRef(); // acquire reference on behalf of MultiSearchResponse.Item below handleResponse(request.responseSlot, new MultiSearchResponse.Item(searchResponse, null)); } diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index f164e3342fb60..6045a9ff5efa3 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -765,7 +765,14 @@ private static ActionListener createCCSListener( SearchResponse.Clusters clusters, ActionListener originalListener ) { - return new CCSActionListener<>(clusterAlias, skipUnavailable, countDown, exceptions, clusters, originalListener) { + return new CCSActionListener<>( + clusterAlias, + skipUnavailable, + countDown, + exceptions, + clusters, + ActionListener.releaseAfter(originalListener, searchResponseMerger) + ) { @Override void innerOnResponse(SearchResponse searchResponse) { // TODO: in CCS fail fast ticket we may need to fail the query if the cluster gets marked as FAILED @@ -777,6 +784,11 @@ void innerOnResponse(SearchResponse searchResponse) { SearchResponse createFinalResponse() { return searchResponseMerger.getMergedResponse(clusters); } + + @Override + protected void releaseResponse(SearchResponse searchResponse) { + searchResponse.decRef(); + } }; } @@ -1493,13 +1505,19 @@ private void maybeFinish() { originalListener.onFailure(e); return; } - originalListener.onResponse(response); + try { + originalListener.onResponse(response); + } finally { + releaseResponse(response); + } } else { originalListener.onFailure(exceptions.get()); } } } + protected void releaseResponse(FinalResponse response) {} + abstract FinalResponse createFinalResponse(); } diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java index dc6e69b15ee32..e57b204df0836 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java @@ -98,367 +98,446 @@ private void awaitResponsesAdded() throws InterruptedException { public void testMergeTookInMillis() throws InterruptedException { long currentRelativeTime = randomNonNegativeLong(); SearchTimeProvider timeProvider = new SearchTimeProvider(randomLong(), 0, () -> currentRelativeTime); - SearchResponseMerger merger = new SearchResponseMerger( - randomIntBetween(0, 1000), - randomIntBetween(0, 10000), - SearchContext.TRACK_TOTAL_HITS_ACCURATE, - timeProvider, - emptyReduceContextBuilder() - ); - for (int i = 0; i < numResponses; i++) { - SearchResponse searchResponse = new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, - null, - 1, - 1, - 0, - randomNonNegativeLong(), - ShardSearchFailure.EMPTY_ARRAY, - SearchResponseTests.randomClusters() - ); - addResponse(merger, searchResponse); + try ( + SearchResponseMerger merger = new SearchResponseMerger( + randomIntBetween(0, 1000), + randomIntBetween(0, 10000), + SearchContext.TRACK_TOTAL_HITS_ACCURATE, + timeProvider, + emptyReduceContextBuilder() + ) + ) { + for (int i = 0; i < numResponses; i++) { + SearchResponse searchResponse = new SearchResponse( + InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, + null, + 1, + 1, + 0, + randomNonNegativeLong(), + ShardSearchFailure.EMPTY_ARRAY, + SearchResponseTests.randomClusters() + ); + try { + addResponse(merger, searchResponse); + } finally { + searchResponse.decRef(); + } + } + awaitResponsesAdded(); + SearchResponse searchResponse = merger.getMergedResponse(SearchResponse.Clusters.EMPTY); + try { + assertEquals(TimeUnit.NANOSECONDS.toMillis(currentRelativeTime), searchResponse.getTook().millis()); + } finally { + searchResponse.decRef(); + } } - awaitResponsesAdded(); - SearchResponse searchResponse = merger.getMergedResponse(SearchResponse.Clusters.EMPTY); - assertEquals(TimeUnit.NANOSECONDS.toMillis(currentRelativeTime), searchResponse.getTook().millis()); } public void testMergeShardFailures() throws InterruptedException { SearchTimeProvider searchTimeProvider = new SearchTimeProvider(0, 0, () -> 0); - SearchResponseMerger merger = new SearchResponseMerger( - 0, - 0, - SearchContext.TRACK_TOTAL_HITS_ACCURATE, - searchTimeProvider, - emptyReduceContextBuilder() - ); - PriorityQueue> priorityQueue = new PriorityQueue<>( - Comparator.comparing(Tuple::v1, (o1, o2) -> { - int compareTo = o1.getShardId().compareTo(o2.getShardId()); - if (compareTo != 0) { - return compareTo; - } - return o1.getClusterAlias().compareTo(o2.getClusterAlias()); - }) - ); - int numIndices = numResponses * randomIntBetween(1, 3); - Iterator> indicesPerCluster = randomRealisticIndices(numIndices, numResponses).entrySet().iterator(); - for (int i = 0; i < numResponses; i++) { - Map.Entry entry = indicesPerCluster.next(); - String clusterAlias = entry.getKey(); - Index[] indices = entry.getValue(); - int numFailures = randomIntBetween(1, 10); - ShardSearchFailure[] shardSearchFailures = new ShardSearchFailure[numFailures]; - for (int j = 0; j < numFailures; j++) { - ShardId shardId = new ShardId(randomFrom(indices), j); - SearchShardTarget searchShardTarget = new SearchShardTarget(randomAlphaOfLength(6), shardId, clusterAlias); - ShardSearchFailure failure = new ShardSearchFailure(new IllegalArgumentException(), searchShardTarget); - shardSearchFailures[j] = failure; - priorityQueue.add(Tuple.tuple(searchShardTarget, failure)); - } - SearchResponse searchResponse = new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, - null, - 1, - 1, + try ( + SearchResponseMerger merger = new SearchResponseMerger( + 0, 0, - 100L, - shardSearchFailures, - SearchResponse.Clusters.EMPTY + SearchContext.TRACK_TOTAL_HITS_ACCURATE, + searchTimeProvider, + emptyReduceContextBuilder() + ) + ) { + PriorityQueue> priorityQueue = new PriorityQueue<>( + Comparator.comparing(Tuple::v1, (o1, o2) -> { + int compareTo = o1.getShardId().compareTo(o2.getShardId()); + if (compareTo != 0) { + return compareTo; + } + return o1.getClusterAlias().compareTo(o2.getClusterAlias()); + }) ); - addResponse(merger, searchResponse); - } - awaitResponsesAdded(); - assertEquals(numResponses, merger.numResponses()); - SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); - SearchResponse mergedResponse = merger.getMergedResponse(clusters); - assertSame(clusters, mergedResponse.getClusters()); - assertEquals(numResponses, mergedResponse.getTotalShards()); - assertEquals(numResponses, mergedResponse.getSuccessfulShards()); - assertEquals(0, mergedResponse.getSkippedShards()); - assertEquals(priorityQueue.size(), mergedResponse.getFailedShards()); - ShardSearchFailure[] shardFailures = mergedResponse.getShardFailures(); - assertEquals(priorityQueue.size(), shardFailures.length); - for (ShardSearchFailure shardFailure : shardFailures) { - ShardSearchFailure expected = priorityQueue.poll().v2(); - assertSame(expected, shardFailure); + int numIndices = numResponses * randomIntBetween(1, 3); + Iterator> indicesPerCluster = randomRealisticIndices(numIndices, numResponses).entrySet().iterator(); + for (int i = 0; i < numResponses; i++) { + Map.Entry entry = indicesPerCluster.next(); + String clusterAlias = entry.getKey(); + Index[] indices = entry.getValue(); + int numFailures = randomIntBetween(1, 10); + ShardSearchFailure[] shardSearchFailures = new ShardSearchFailure[numFailures]; + for (int j = 0; j < numFailures; j++) { + ShardId shardId = new ShardId(randomFrom(indices), j); + SearchShardTarget searchShardTarget = new SearchShardTarget(randomAlphaOfLength(6), shardId, clusterAlias); + ShardSearchFailure failure = new ShardSearchFailure(new IllegalArgumentException(), searchShardTarget); + shardSearchFailures[j] = failure; + priorityQueue.add(Tuple.tuple(searchShardTarget, failure)); + } + SearchResponse searchResponse = new SearchResponse( + InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, + null, + 1, + 1, + 0, + 100L, + shardSearchFailures, + SearchResponse.Clusters.EMPTY + ); + try { + addResponse(merger, searchResponse); + } finally { + searchResponse.decRef(); + } + } + awaitResponsesAdded(); + assertEquals(numResponses, merger.numResponses()); + SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); + SearchResponse mergedResponse = merger.getMergedResponse(clusters); + try { + assertSame(clusters, mergedResponse.getClusters()); + assertEquals(numResponses, mergedResponse.getTotalShards()); + assertEquals(numResponses, mergedResponse.getSuccessfulShards()); + assertEquals(0, mergedResponse.getSkippedShards()); + assertEquals(priorityQueue.size(), mergedResponse.getFailedShards()); + ShardSearchFailure[] shardFailures = mergedResponse.getShardFailures(); + assertEquals(priorityQueue.size(), shardFailures.length); + for (ShardSearchFailure shardFailure : shardFailures) { + ShardSearchFailure expected = priorityQueue.poll().v2(); + assertSame(expected, shardFailure); + } + } finally { + mergedResponse.decRef(); + } } } public void testMergeShardFailuresNullShardTarget() throws InterruptedException { SearchTimeProvider searchTimeProvider = new SearchTimeProvider(0, 0, () -> 0); - SearchResponseMerger merger = new SearchResponseMerger( - 0, - 0, - SearchContext.TRACK_TOTAL_HITS_ACCURATE, - searchTimeProvider, - emptyReduceContextBuilder() - ); - PriorityQueue> priorityQueue = new PriorityQueue<>(Comparator.comparing(Tuple::v1)); - for (int i = 0; i < numResponses; i++) { - int numFailures = randomIntBetween(1, 10); - ShardSearchFailure[] shardSearchFailures = new ShardSearchFailure[numFailures]; - for (int j = 0; j < numFailures; j++) { - String index = "index-" + i; - ShardId shardId = new ShardId(index, index + "-uuid", j); - ElasticsearchException elasticsearchException = new ElasticsearchException(new IllegalArgumentException()); - elasticsearchException.setShard(shardId); - ShardSearchFailure failure = new ShardSearchFailure(elasticsearchException); - shardSearchFailures[j] = failure; - priorityQueue.add(Tuple.tuple(shardId, failure)); - } - SearchResponse searchResponse = new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, - null, - 1, - 1, + try ( + SearchResponseMerger merger = new SearchResponseMerger( 0, - 100L, - shardSearchFailures, - SearchResponse.Clusters.EMPTY - ); - addResponse(merger, searchResponse); - } - awaitResponsesAdded(); - assertEquals(numResponses, merger.numResponses()); - SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); - SearchResponse mergedResponse = merger.getMergedResponse(clusters); - assertSame(clusters, mergedResponse.getClusters()); - assertEquals(numResponses, mergedResponse.getTotalShards()); - assertEquals(numResponses, mergedResponse.getSuccessfulShards()); - assertEquals(0, mergedResponse.getSkippedShards()); - assertEquals(priorityQueue.size(), mergedResponse.getFailedShards()); - ShardSearchFailure[] shardFailures = mergedResponse.getShardFailures(); - assertEquals(priorityQueue.size(), shardFailures.length); - for (ShardSearchFailure shardFailure : shardFailures) { - ShardSearchFailure expected = priorityQueue.poll().v2(); - assertSame(expected, shardFailure); + 0, + SearchContext.TRACK_TOTAL_HITS_ACCURATE, + searchTimeProvider, + emptyReduceContextBuilder() + ) + ) { + PriorityQueue> priorityQueue = new PriorityQueue<>(Comparator.comparing(Tuple::v1)); + for (int i = 0; i < numResponses; i++) { + int numFailures = randomIntBetween(1, 10); + ShardSearchFailure[] shardSearchFailures = new ShardSearchFailure[numFailures]; + for (int j = 0; j < numFailures; j++) { + String index = "index-" + i; + ShardId shardId = new ShardId(index, index + "-uuid", j); + ElasticsearchException elasticsearchException = new ElasticsearchException(new IllegalArgumentException()); + elasticsearchException.setShard(shardId); + ShardSearchFailure failure = new ShardSearchFailure(elasticsearchException); + shardSearchFailures[j] = failure; + priorityQueue.add(Tuple.tuple(shardId, failure)); + } + SearchResponse searchResponse = new SearchResponse( + InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, + null, + 1, + 1, + 0, + 100L, + shardSearchFailures, + SearchResponse.Clusters.EMPTY + ); + try { + addResponse(merger, searchResponse); + } finally { + searchResponse.decRef(); + } + } + awaitResponsesAdded(); + assertEquals(numResponses, merger.numResponses()); + SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); + SearchResponse mergedResponse = merger.getMergedResponse(clusters); + try { + assertSame(clusters, mergedResponse.getClusters()); + assertEquals(numResponses, mergedResponse.getTotalShards()); + assertEquals(numResponses, mergedResponse.getSuccessfulShards()); + assertEquals(0, mergedResponse.getSkippedShards()); + assertEquals(priorityQueue.size(), mergedResponse.getFailedShards()); + ShardSearchFailure[] shardFailures = mergedResponse.getShardFailures(); + assertEquals(priorityQueue.size(), shardFailures.length); + for (ShardSearchFailure shardFailure : shardFailures) { + ShardSearchFailure expected = priorityQueue.poll().v2(); + assertSame(expected, shardFailure); + } + } finally { + mergedResponse.decRef(); + } } } public void testMergeShardFailuresNullShardId() throws InterruptedException { SearchTimeProvider searchTimeProvider = new SearchTimeProvider(0, 0, () -> 0); - SearchResponseMerger merger = new SearchResponseMerger( - 0, - 0, - SearchContext.TRACK_TOTAL_HITS_ACCURATE, - searchTimeProvider, - emptyReduceContextBuilder() - ); - List expectedFailures = new ArrayList<>(); - for (int i = 0; i < numResponses; i++) { - int numFailures = randomIntBetween(1, 50); - ShardSearchFailure[] shardSearchFailures = new ShardSearchFailure[numFailures]; - for (int j = 0; j < numFailures; j++) { - ShardSearchFailure shardSearchFailure = new ShardSearchFailure(new ElasticsearchException(new IllegalArgumentException())); - shardSearchFailures[j] = shardSearchFailure; - expectedFailures.add(shardSearchFailure); - } - SearchResponse searchResponse = new SearchResponse( - InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, - null, - 1, - 1, + try ( + SearchResponseMerger merger = new SearchResponseMerger( 0, - 100L, - shardSearchFailures, - SearchResponse.Clusters.EMPTY - ); - addResponse(merger, searchResponse); + 0, + SearchContext.TRACK_TOTAL_HITS_ACCURATE, + searchTimeProvider, + emptyReduceContextBuilder() + ) + ) { + List expectedFailures = new ArrayList<>(); + for (int i = 0; i < numResponses; i++) { + int numFailures = randomIntBetween(1, 50); + ShardSearchFailure[] shardSearchFailures = new ShardSearchFailure[numFailures]; + for (int j = 0; j < numFailures; j++) { + ShardSearchFailure shardSearchFailure = new ShardSearchFailure( + new ElasticsearchException(new IllegalArgumentException()) + ); + shardSearchFailures[j] = shardSearchFailure; + expectedFailures.add(shardSearchFailure); + } + SearchResponse searchResponse = new SearchResponse( + InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, + null, + 1, + 1, + 0, + 100L, + shardSearchFailures, + SearchResponse.Clusters.EMPTY + ); + try { + addResponse(merger, searchResponse); + } finally { + searchResponse.decRef(); + } + } + awaitResponsesAdded(); + assertEquals(numResponses, merger.numResponses()); + var mergedResponse = merger.getMergedResponse(SearchResponse.Clusters.EMPTY); + try { + ShardSearchFailure[] shardFailures = mergedResponse.getShardFailures(); + assertThat(Arrays.asList(shardFailures), containsInAnyOrder(expectedFailures.toArray(ShardSearchFailure.EMPTY_ARRAY))); + } finally { + mergedResponse.decRef(); + } } - awaitResponsesAdded(); - assertEquals(numResponses, merger.numResponses()); - ShardSearchFailure[] shardFailures = merger.getMergedResponse(SearchResponse.Clusters.EMPTY).getShardFailures(); - assertThat(Arrays.asList(shardFailures), containsInAnyOrder(expectedFailures.toArray(ShardSearchFailure.EMPTY_ARRAY))); } public void testMergeProfileResults() throws InterruptedException { SearchTimeProvider searchTimeProvider = new SearchTimeProvider(0, 0, () -> 0); - SearchResponseMerger merger = new SearchResponseMerger( - 0, - 0, - SearchContext.TRACK_TOTAL_HITS_ACCURATE, - searchTimeProvider, - emptyReduceContextBuilder() - ); - Map expectedProfile = new HashMap<>(); - for (int i = 0; i < numResponses; i++) { - SearchProfileResults profile = SearchProfileResultsTests.createTestItem(); - expectedProfile.putAll(profile.getShardResults()); - SearchHits searchHits = new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), Float.NaN); - InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchHits, null, null, profile, false, null, 1); - SearchResponse searchResponse = new SearchResponse( - internalSearchResponse, - null, - 1, - 1, + try ( + SearchResponseMerger merger = new SearchResponseMerger( 0, - 100L, - ShardSearchFailure.EMPTY_ARRAY, - SearchResponse.Clusters.EMPTY - ); - addResponse(merger, searchResponse); + 0, + SearchContext.TRACK_TOTAL_HITS_ACCURATE, + searchTimeProvider, + emptyReduceContextBuilder() + ) + ) { + Map expectedProfile = new HashMap<>(); + for (int i = 0; i < numResponses; i++) { + SearchProfileResults profile = SearchProfileResultsTests.createTestItem(); + expectedProfile.putAll(profile.getShardResults()); + SearchHits searchHits = new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), Float.NaN); + InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchHits, null, null, profile, false, null, 1); + SearchResponse searchResponse = new SearchResponse( + internalSearchResponse, + null, + 1, + 1, + 0, + 100L, + ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY + ); + try { + addResponse(merger, searchResponse); + } finally { + searchResponse.decRef(); + } + } + awaitResponsesAdded(); + assertEquals(numResponses, merger.numResponses()); + SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); + SearchResponse mergedResponse = merger.getMergedResponse(clusters); + try { + assertSame(clusters, mergedResponse.getClusters()); + assertEquals(numResponses, mergedResponse.getTotalShards()); + assertEquals(numResponses, mergedResponse.getSuccessfulShards()); + assertEquals(0, mergedResponse.getSkippedShards()); + assertEquals(0, mergedResponse.getFailedShards()); + assertEquals(0, mergedResponse.getShardFailures().length); + assertEquals(expectedProfile, mergedResponse.getProfileResults()); + } finally { + mergedResponse.decRef(); + } } - awaitResponsesAdded(); - assertEquals(numResponses, merger.numResponses()); - SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); - SearchResponse mergedResponse = merger.getMergedResponse(clusters); - assertSame(clusters, mergedResponse.getClusters()); - assertEquals(numResponses, mergedResponse.getTotalShards()); - assertEquals(numResponses, mergedResponse.getSuccessfulShards()); - assertEquals(0, mergedResponse.getSkippedShards()); - assertEquals(0, mergedResponse.getFailedShards()); - assertEquals(0, mergedResponse.getShardFailures().length); - assertEquals(expectedProfile, mergedResponse.getProfileResults()); } public void testMergeCompletionSuggestions() throws InterruptedException { String suggestionName = randomAlphaOfLengthBetween(4, 8); int size = randomIntBetween(1, 100); - SearchResponseMerger searchResponseMerger = new SearchResponseMerger( - 0, - 0, - 0, - new SearchTimeProvider(0, 0, () -> 0), - emptyReduceContextBuilder() - ); - for (int i = 0; i < numResponses; i++) { - List>> suggestions = - new ArrayList<>(); - CompletionSuggestion completionSuggestion = new CompletionSuggestion(suggestionName, size, false); - CompletionSuggestion.Entry options = new CompletionSuggestion.Entry(new Text("suggest"), 0, 10); - int docId = randomIntBetween(0, Integer.MAX_VALUE); - CompletionSuggestion.Entry.Option option = new CompletionSuggestion.Entry.Option( - docId, - new Text(randomAlphaOfLengthBetween(5, 10)), - i, - Collections.emptyMap() - ); - SearchHit hit = new SearchHit(docId); - ShardId shardId = new ShardId( - randomAlphaOfLengthBetween(5, 10), - randomAlphaOfLength(10), - randomIntBetween(0, Integer.MAX_VALUE) - ); - String clusterAlias = randomBoolean() ? "" : randomAlphaOfLengthBetween(5, 10); - hit.shard(new SearchShardTarget("node", shardId, clusterAlias)); - option.setHit(hit); - options.addOption(option); - completionSuggestion.addTerm(options); - suggestions.add(completionSuggestion); - Suggest suggest = new Suggest(suggestions); - SearchHits searchHits = new SearchHits(new SearchHit[0], null, Float.NaN); - InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchHits, null, suggest, null, false, null, 1); - SearchResponse searchResponse = new SearchResponse( - internalSearchResponse, - null, - 1, - 1, + try ( + SearchResponseMerger searchResponseMerger = new SearchResponseMerger( 0, - randomLong(), - ShardSearchFailure.EMPTY_ARRAY, - SearchResponse.Clusters.EMPTY - ); - addResponse(searchResponseMerger, searchResponse); - } - awaitResponsesAdded(); - assertEquals(numResponses, searchResponseMerger.numResponses()); - SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); - SearchResponse mergedResponse = searchResponseMerger.getMergedResponse(clusters); - assertSame(clusters, mergedResponse.getClusters()); - assertEquals(numResponses, mergedResponse.getTotalShards()); - assertEquals(numResponses, mergedResponse.getSuccessfulShards()); - assertEquals(0, mergedResponse.getSkippedShards()); - assertEquals(0, mergedResponse.getFailedShards()); - assertEquals(0, mergedResponse.getShardFailures().length); - Suggest.Suggestion> suggestion = mergedResponse - .getSuggest() - .getSuggestion(suggestionName); - assertEquals(1, suggestion.getEntries().size()); - Suggest.Suggestion.Entry options = suggestion.getEntries().get(0); - assertEquals(Math.min(numResponses, size), options.getOptions().size()); - int i = numResponses; - for (Suggest.Suggestion.Entry.Option option : options) { - assertEquals(--i, option.getScore(), 0f); + 0, + 0, + new SearchTimeProvider(0, 0, () -> 0), + emptyReduceContextBuilder() + ) + ) { + for (int i = 0; i < numResponses; i++) { + List>> suggestions = + new ArrayList<>(); + CompletionSuggestion completionSuggestion = new CompletionSuggestion(suggestionName, size, false); + CompletionSuggestion.Entry options = new CompletionSuggestion.Entry(new Text("suggest"), 0, 10); + int docId = randomIntBetween(0, Integer.MAX_VALUE); + CompletionSuggestion.Entry.Option option = new CompletionSuggestion.Entry.Option( + docId, + new Text(randomAlphaOfLengthBetween(5, 10)), + i, + Collections.emptyMap() + ); + SearchHit hit = new SearchHit(docId); + ShardId shardId = new ShardId( + randomAlphaOfLengthBetween(5, 10), + randomAlphaOfLength(10), + randomIntBetween(0, Integer.MAX_VALUE) + ); + String clusterAlias = randomBoolean() ? "" : randomAlphaOfLengthBetween(5, 10); + hit.shard(new SearchShardTarget("node", shardId, clusterAlias)); + option.setHit(hit); + options.addOption(option); + completionSuggestion.addTerm(options); + suggestions.add(completionSuggestion); + Suggest suggest = new Suggest(suggestions); + SearchHits searchHits = new SearchHits(new SearchHit[0], null, Float.NaN); + InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchHits, null, suggest, null, false, null, 1); + SearchResponse searchResponse = new SearchResponse( + internalSearchResponse, + null, + 1, + 1, + 0, + randomLong(), + ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY + ); + try { + addResponse(searchResponseMerger, searchResponse); + } finally { + searchResponse.decRef(); + } + } + awaitResponsesAdded(); + assertEquals(numResponses, searchResponseMerger.numResponses()); + SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); + SearchResponse mergedResponse = searchResponseMerger.getMergedResponse(clusters); + try { + assertSame(clusters, mergedResponse.getClusters()); + assertEquals(numResponses, mergedResponse.getTotalShards()); + assertEquals(numResponses, mergedResponse.getSuccessfulShards()); + assertEquals(0, mergedResponse.getSkippedShards()); + assertEquals(0, mergedResponse.getFailedShards()); + assertEquals(0, mergedResponse.getShardFailures().length); + Suggest.Suggestion> suggestion = + mergedResponse.getSuggest().getSuggestion(suggestionName); + assertEquals(1, suggestion.getEntries().size()); + Suggest.Suggestion.Entry options = suggestion.getEntries().get(0); + assertEquals(Math.min(numResponses, size), options.getOptions().size()); + int i = numResponses; + for (Suggest.Suggestion.Entry.Option option : options) { + assertEquals(--i, option.getScore(), 0f); + } + } finally { + mergedResponse.decRef(); + } } } public void testMergeCompletionSuggestionsTieBreak() throws InterruptedException { String suggestionName = randomAlphaOfLengthBetween(4, 8); int size = randomIntBetween(1, 100); - SearchResponseMerger searchResponseMerger = new SearchResponseMerger( - 0, - 0, - 0, - new SearchTimeProvider(0, 0, () -> 0), - emptyReduceContextBuilder() - ); - for (int i = 0; i < numResponses; i++) { - List>> suggestions = - new ArrayList<>(); - CompletionSuggestion completionSuggestion = new CompletionSuggestion(suggestionName, size, false); - CompletionSuggestion.Entry options = new CompletionSuggestion.Entry(new Text("suggest"), 0, 10); - int docId = randomIntBetween(0, Integer.MAX_VALUE); - CompletionSuggestion.Entry.Option option = new CompletionSuggestion.Entry.Option( - docId, - new Text("suggestion"), - 1F, - Collections.emptyMap() - ); - SearchHit searchHit = new SearchHit(docId); - searchHit.shard( - new SearchShardTarget( - "node", - new ShardId("index", "uuid", randomIntBetween(0, Integer.MAX_VALUE)), - randomBoolean() ? RemoteClusterService.LOCAL_CLUSTER_GROUP_KEY : randomAlphaOfLengthBetween(5, 10) - ) - ); - option.setHit(searchHit); - options.addOption(option); - completionSuggestion.addTerm(options); - suggestions.add(completionSuggestion); - Suggest suggest = new Suggest(suggestions); - SearchHits searchHits = new SearchHits(new SearchHit[0], null, Float.NaN); - InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchHits, null, suggest, null, false, null, 1); - SearchResponse searchResponse = new SearchResponse( - internalSearchResponse, - null, - 1, - 1, + try ( + SearchResponseMerger searchResponseMerger = new SearchResponseMerger( 0, - randomLong(), - ShardSearchFailure.EMPTY_ARRAY, - SearchResponse.Clusters.EMPTY - ); - addResponse(searchResponseMerger, searchResponse); - } - awaitResponsesAdded(); - assertEquals(numResponses, searchResponseMerger.numResponses()); - SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); - SearchResponse mergedResponse = searchResponseMerger.getMergedResponse(clusters); - assertSame(clusters, mergedResponse.getClusters()); - assertEquals(numResponses, mergedResponse.getTotalShards()); - assertEquals(numResponses, mergedResponse.getSuccessfulShards()); - assertEquals(0, mergedResponse.getSkippedShards()); - assertEquals(0, mergedResponse.getFailedShards()); - assertEquals(0, mergedResponse.getShardFailures().length); - CompletionSuggestion suggestion = mergedResponse.getSuggest().getSuggestion(suggestionName); - assertEquals(1, suggestion.getEntries().size()); - CompletionSuggestion.Entry options = suggestion.getEntries().get(0); - assertEquals(Math.min(numResponses, size), options.getOptions().size()); - int lastShardId = 0; - String lastClusterAlias = null; - for (CompletionSuggestion.Entry.Option option : options) { - assertEquals("suggestion", option.getText().string()); - SearchShardTarget shard = option.getHit().getShard(); - int currentShardId = shard.getShardId().id(); - assertThat(currentShardId, greaterThanOrEqualTo(lastShardId)); - if (currentShardId == lastShardId) { - assertThat(shard.getClusterAlias(), greaterThan(lastClusterAlias)); - } else { - lastShardId = currentShardId; + 0, + 0, + new SearchTimeProvider(0, 0, () -> 0), + emptyReduceContextBuilder() + ) + ) { + for (int i = 0; i < numResponses; i++) { + List>> suggestions = + new ArrayList<>(); + CompletionSuggestion completionSuggestion = new CompletionSuggestion(suggestionName, size, false); + CompletionSuggestion.Entry options = new CompletionSuggestion.Entry(new Text("suggest"), 0, 10); + int docId = randomIntBetween(0, Integer.MAX_VALUE); + CompletionSuggestion.Entry.Option option = new CompletionSuggestion.Entry.Option( + docId, + new Text("suggestion"), + 1F, + Collections.emptyMap() + ); + SearchHit searchHit = new SearchHit(docId); + searchHit.shard( + new SearchShardTarget( + "node", + new ShardId("index", "uuid", randomIntBetween(0, Integer.MAX_VALUE)), + randomBoolean() ? RemoteClusterService.LOCAL_CLUSTER_GROUP_KEY : randomAlphaOfLengthBetween(5, 10) + ) + ); + option.setHit(searchHit); + options.addOption(option); + completionSuggestion.addTerm(options); + suggestions.add(completionSuggestion); + Suggest suggest = new Suggest(suggestions); + SearchHits searchHits = new SearchHits(new SearchHit[0], null, Float.NaN); + InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchHits, null, suggest, null, false, null, 1); + SearchResponse searchResponse = new SearchResponse( + internalSearchResponse, + null, + 1, + 1, + 0, + randomLong(), + ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY + ); + try { + addResponse(searchResponseMerger, searchResponse); + } finally { + searchResponse.decRef(); + } + } + awaitResponsesAdded(); + assertEquals(numResponses, searchResponseMerger.numResponses()); + SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); + SearchResponse mergedResponse = searchResponseMerger.getMergedResponse(clusters); + try { + assertSame(clusters, mergedResponse.getClusters()); + assertEquals(numResponses, mergedResponse.getTotalShards()); + assertEquals(numResponses, mergedResponse.getSuccessfulShards()); + assertEquals(0, mergedResponse.getSkippedShards()); + assertEquals(0, mergedResponse.getFailedShards()); + assertEquals(0, mergedResponse.getShardFailures().length); + CompletionSuggestion suggestion = mergedResponse.getSuggest().getSuggestion(suggestionName); + assertEquals(1, suggestion.getEntries().size()); + CompletionSuggestion.Entry options = suggestion.getEntries().get(0); + assertEquals(Math.min(numResponses, size), options.getOptions().size()); + int lastShardId = 0; + String lastClusterAlias = null; + for (CompletionSuggestion.Entry.Option option : options) { + assertEquals("suggestion", option.getText().string()); + SearchShardTarget shard = option.getHit().getShard(); + int currentShardId = shard.getShardId().id(); + assertThat(currentShardId, greaterThanOrEqualTo(lastShardId)); + if (currentShardId == lastShardId) { + assertThat(shard.getClusterAlias(), greaterThan(lastClusterAlias)); + } else { + lastShardId = currentShardId; + } + lastClusterAlias = shard.getClusterAlias(); + } + } finally { + mergedResponse.decRef(); } - lastClusterAlias = shard.getClusterAlias(); } } @@ -476,101 +555,123 @@ public void testMergeEmptyFormat() throws InterruptedException { ); SearchHits searchHits = new SearchHits(new SearchHit[0], null, Float.NaN); - SearchResponseMerger searchResponseMerger = new SearchResponseMerger( - 0, - 0, - 0, - new SearchTimeProvider(0, 0, () -> 0), - emptyReduceContextBuilder(new AggregatorFactories.Builder().addAggregator(new MaxAggregationBuilder("field1"))) - ); - for (Max max : Arrays.asList(max1, max2)) { - InternalAggregations aggs = InternalAggregations.from(Arrays.asList(max)); - InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchHits, aggs, null, null, false, null, 1); - SearchResponse searchResponse = new SearchResponse( - internalSearchResponse, - null, - 1, - 1, + try ( + SearchResponseMerger searchResponseMerger = new SearchResponseMerger( 0, - randomLong(), - ShardSearchFailure.EMPTY_ARRAY, - SearchResponse.Clusters.EMPTY - ); - searchResponseMerger.add(searchResponse); + 0, + 0, + new SearchTimeProvider(0, 0, () -> 0), + emptyReduceContextBuilder(new AggregatorFactories.Builder().addAggregator(new MaxAggregationBuilder("field1"))) + ) + ) { + for (Max max : Arrays.asList(max1, max2)) { + InternalAggregations aggs = InternalAggregations.from(Arrays.asList(max)); + InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchHits, aggs, null, null, false, null, 1); + SearchResponse searchResponse = new SearchResponse( + internalSearchResponse, + null, + 1, + 1, + 0, + randomLong(), + ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY + ); + try { + searchResponseMerger.add(searchResponse); + } finally { + searchResponse.decRef(); + } + } + SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); + SearchResponse searchResponse = searchResponseMerger.getMergedResponse(clusters); + try { + Max mergedMax = searchResponse.getAggregations().get("field1"); + assertEquals(mergedMax.getValueAsString(), "2021-05-01T00:00:00.000Z"); + } finally { + searchResponse.decRef(); + } } - SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); - SearchResponse searchResponse = searchResponseMerger.getMergedResponse(clusters); - Max mergedMax = searchResponse.getAggregations().get("field1"); - assertEquals(mergedMax.getValueAsString(), "2021-05-01T00:00:00.000Z"); } public void testMergeAggs() throws InterruptedException { String maxAggName = randomAlphaOfLengthBetween(5, 8); String rangeAggName = randomAlphaOfLengthBetween(5, 8); - SearchResponseMerger searchResponseMerger = new SearchResponseMerger( - 0, - 0, - 0, - new SearchTimeProvider(0, 0, () -> 0), - emptyReduceContextBuilder( - new AggregatorFactories.Builder().addAggregator(new MaxAggregationBuilder(maxAggName)) - .addAggregator(new DateRangeAggregationBuilder(rangeAggName)) - ) - ); - int totalCount = 0; - double maxValue = Double.MIN_VALUE; - for (int i = 0; i < numResponses; i++) { - double value = randomDouble(); - maxValue = Math.max(value, maxValue); - Max max = new Max(maxAggName, value, DocValueFormat.RAW, Collections.emptyMap()); - InternalDateRange.Factory factory = new InternalDateRange.Factory(); - int count = randomIntBetween(1, 1000); - totalCount += count; - InternalDateRange.Bucket bucket = factory.createBucket( - "bucket", - 0D, - 10000D, - count, - InternalAggregations.EMPTY, - false, - DocValueFormat.RAW - ); - InternalDateRange range = factory.create(rangeAggName, singletonList(bucket), DocValueFormat.RAW, false, emptyMap()); - InternalAggregations aggs = InternalAggregations.from(Arrays.asList(range, max)); - SearchHits searchHits = new SearchHits(new SearchHit[0], null, Float.NaN); - InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchHits, aggs, null, null, false, null, 1); - SearchResponse searchResponse = new SearchResponse( - internalSearchResponse, - null, - 1, - 1, + try ( + SearchResponseMerger searchResponseMerger = new SearchResponseMerger( 0, - randomLong(), - ShardSearchFailure.EMPTY_ARRAY, - SearchResponse.Clusters.EMPTY - ); - addResponse(searchResponseMerger, searchResponse); + 0, + 0, + new SearchTimeProvider(0, 0, () -> 0), + emptyReduceContextBuilder( + new AggregatorFactories.Builder().addAggregator(new MaxAggregationBuilder(maxAggName)) + .addAggregator(new DateRangeAggregationBuilder(rangeAggName)) + ) + ) + ) { + int totalCount = 0; + double maxValue = Double.MIN_VALUE; + for (int i = 0; i < numResponses; i++) { + double value = randomDouble(); + maxValue = Math.max(value, maxValue); + Max max = new Max(maxAggName, value, DocValueFormat.RAW, Collections.emptyMap()); + InternalDateRange.Factory factory = new InternalDateRange.Factory(); + int count = randomIntBetween(1, 1000); + totalCount += count; + InternalDateRange.Bucket bucket = factory.createBucket( + "bucket", + 0D, + 10000D, + count, + InternalAggregations.EMPTY, + false, + DocValueFormat.RAW + ); + InternalDateRange range = factory.create(rangeAggName, singletonList(bucket), DocValueFormat.RAW, false, emptyMap()); + InternalAggregations aggs = InternalAggregations.from(Arrays.asList(range, max)); + SearchHits searchHits = new SearchHits(new SearchHit[0], null, Float.NaN); + InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchHits, aggs, null, null, false, null, 1); + SearchResponse searchResponse = new SearchResponse( + internalSearchResponse, + null, + 1, + 1, + 0, + randomLong(), + ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY + ); + try { + addResponse(searchResponseMerger, searchResponse); + } finally { + searchResponse.decRef(); + } + } + awaitResponsesAdded(); + assertEquals(numResponses, searchResponseMerger.numResponses()); + SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); + SearchResponse mergedResponse = searchResponseMerger.getMergedResponse(clusters); + try { + assertSame(clusters, mergedResponse.getClusters()); + assertEquals(numResponses, mergedResponse.getTotalShards()); + assertEquals(numResponses, mergedResponse.getSuccessfulShards()); + assertEquals(0, mergedResponse.getSkippedShards()); + assertEquals(0, mergedResponse.getFailedShards()); + assertEquals(0, mergedResponse.getShardFailures().length); + assertEquals(0, mergedResponse.getHits().getHits().length); + assertEquals(2, mergedResponse.getAggregations().asList().size()); + Max max = mergedResponse.getAggregations().get(maxAggName); + assertEquals(maxValue, max.value(), 0d); + Range range = mergedResponse.getAggregations().get(rangeAggName); + assertEquals(1, range.getBuckets().size()); + Range.Bucket bucket = range.getBuckets().get(0); + assertEquals("0.0", bucket.getFromAsString()); + assertEquals("10000.0", bucket.getToAsString()); + assertEquals(totalCount, bucket.getDocCount()); + } finally { + mergedResponse.decRef(); + } } - awaitResponsesAdded(); - assertEquals(numResponses, searchResponseMerger.numResponses()); - SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); - SearchResponse mergedResponse = searchResponseMerger.getMergedResponse(clusters); - assertSame(clusters, mergedResponse.getClusters()); - assertEquals(numResponses, mergedResponse.getTotalShards()); - assertEquals(numResponses, mergedResponse.getSuccessfulShards()); - assertEquals(0, mergedResponse.getSkippedShards()); - assertEquals(0, mergedResponse.getFailedShards()); - assertEquals(0, mergedResponse.getShardFailures().length); - assertEquals(0, mergedResponse.getHits().getHits().length); - assertEquals(2, mergedResponse.getAggregations().asList().size()); - Max max = mergedResponse.getAggregations().get(maxAggName); - assertEquals(maxValue, max.value(), 0d); - Range range = mergedResponse.getAggregations().get(rangeAggName); - assertEquals(1, range.getBuckets().size()); - Range.Bucket bucket = range.getBuckets().get(0); - assertEquals("0.0", bucket.getFromAsString()); - assertEquals("10000.0", bucket.getToAsString()); - assertEquals(totalCount, bucket.getDocCount()); } public void testMergeSearchHits() throws InterruptedException { @@ -606,253 +707,294 @@ public void testMergeSearchHits() throws InterruptedException { TotalHits.Relation totalHitsRelation = randomTrackTotalHits.v2(); PriorityQueue priorityQueue = new PriorityQueue<>(new SearchHitComparator(sortFields)); - SearchResponseMerger searchResponseMerger = new SearchResponseMerger( - from, - size, - trackTotalHitsUpTo, - timeProvider, - emptyReduceContextBuilder() - ); - - TotalHits expectedTotalHits = null; - int expectedTotal = 0; - int expectedSuccessful = 0; - int expectedSkipped = 0; - int expectedReducePhases = 1; - boolean expectedTimedOut = false; - Boolean expectedTerminatedEarly = null; - float expectedMaxScore = Float.NEGATIVE_INFINITY; - int numIndices = requestedSize == 0 ? 0 : randomIntBetween(1, requestedSize); - Iterator> indicesIterator = randomRealisticIndices(numIndices, numResponses).entrySet().iterator(); - boolean hasHits = false; - for (int i = 0; i < numResponses; i++) { - Map.Entry entry = indicesIterator.next(); - String clusterAlias = entry.getKey(); - Index[] indices = entry.getValue(); - int total = randomIntBetween(1, 1000); - expectedTotal += total; - int successful = randomIntBetween(1, total); - expectedSuccessful += successful; - int skipped = randomIntBetween(1, total); - expectedSkipped += skipped; - - TotalHits totalHits = null; - if (trackTotalHitsUpTo != SearchContext.TRACK_TOTAL_HITS_DISABLED) { - totalHits = new TotalHits(randomLongBetween(0, 1000), totalHitsRelation); - long previousValue = expectedTotalHits == null ? 0 : expectedTotalHits.value; - expectedTotalHits = new TotalHits(Math.min(previousValue + totalHits.value, trackTotalHitsUpTo), totalHitsRelation); - } + try ( + SearchResponseMerger searchResponseMerger = new SearchResponseMerger( + from, + size, + trackTotalHitsUpTo, + timeProvider, + emptyReduceContextBuilder() + ) + ) { - final int numDocs = totalHits == null || totalHits.value >= requestedSize ? requestedSize : (int) totalHits.value; - int scoreFactor = randomIntBetween(1, numResponses); - float maxScore = scoreSort ? numDocs * scoreFactor : Float.NaN; - SearchHit[] hits = randomSearchHitArray( - numDocs, - numResponses, - clusterAlias, - indices, - maxScore, - scoreFactor, - sortFields, - priorityQueue - ); - hasHits |= hits.length > 0; - expectedMaxScore = Math.max(expectedMaxScore, maxScore); + TotalHits expectedTotalHits = null; + int expectedTotal = 0; + int expectedSuccessful = 0; + int expectedSkipped = 0; + int expectedReducePhases = 1; + boolean expectedTimedOut = false; + Boolean expectedTerminatedEarly = null; + float expectedMaxScore = Float.NEGATIVE_INFINITY; + int numIndices = requestedSize == 0 ? 0 : randomIntBetween(1, requestedSize); + Iterator> indicesIterator = randomRealisticIndices(numIndices, numResponses).entrySet().iterator(); + boolean hasHits = false; + for (int i = 0; i < numResponses; i++) { + Map.Entry entry = indicesIterator.next(); + String clusterAlias = entry.getKey(); + Index[] indices = entry.getValue(); + int total = randomIntBetween(1, 1000); + expectedTotal += total; + int successful = randomIntBetween(1, total); + expectedSuccessful += successful; + int skipped = randomIntBetween(1, total); + expectedSkipped += skipped; - Object[] collapseValues = null; - if (collapseField != null) { - collapseValues = new Object[numDocs]; - for (int j = 0; j < numDocs; j++) { - // set different collapse values for each cluster for simplicity - collapseValues[j] = j + 1000 * i; + TotalHits totalHits = null; + if (trackTotalHitsUpTo != SearchContext.TRACK_TOTAL_HITS_DISABLED) { + totalHits = new TotalHits(randomLongBetween(0, 1000), totalHitsRelation); + long previousValue = expectedTotalHits == null ? 0 : expectedTotalHits.value; + expectedTotalHits = new TotalHits(Math.min(previousValue + totalHits.value, trackTotalHitsUpTo), totalHitsRelation); } - } - SearchHits searchHits = new SearchHits( - hits, - totalHits, - maxScore == Float.NEGATIVE_INFINITY ? Float.NaN : maxScore, - sortFields, - collapseField, - collapseValues - ); + final int numDocs = totalHits == null || totalHits.value >= requestedSize ? requestedSize : (int) totalHits.value; + int scoreFactor = randomIntBetween(1, numResponses); + float maxScore = scoreSort ? numDocs * scoreFactor : Float.NaN; + SearchHit[] hits = randomSearchHitArray( + numDocs, + numResponses, + clusterAlias, + indices, + maxScore, + scoreFactor, + sortFields, + priorityQueue + ); + hasHits |= hits.length > 0; + expectedMaxScore = Math.max(expectedMaxScore, maxScore); - int numReducePhases = randomIntBetween(1, 5); - expectedReducePhases += numReducePhases; - boolean timedOut = rarely(); - expectedTimedOut = expectedTimedOut || timedOut; - Boolean terminatedEarly = frequently() ? null : true; - expectedTerminatedEarly = expectedTerminatedEarly == null ? terminatedEarly : expectedTerminatedEarly; + Object[] collapseValues = null; + if (collapseField != null) { + collapseValues = new Object[numDocs]; + for (int j = 0; j < numDocs; j++) { + // set different collapse values for each cluster for simplicity + collapseValues[j] = j + 1000 * i; + } + } - InternalSearchResponse internalSearchResponse = new InternalSearchResponse( - searchHits, - null, - null, - null, - timedOut, - terminatedEarly, - numReducePhases - ); + SearchHits searchHits = new SearchHits( + hits, + totalHits, + maxScore == Float.NEGATIVE_INFINITY ? Float.NaN : maxScore, + sortFields, + collapseField, + collapseValues + ); - SearchResponse searchResponse = new SearchResponse( - internalSearchResponse, - null, - total, - successful, - skipped, - randomLong(), - ShardSearchFailure.EMPTY_ARRAY, - SearchResponseTests.randomClusters() - ); + int numReducePhases = randomIntBetween(1, 5); + expectedReducePhases += numReducePhases; + boolean timedOut = rarely(); + expectedTimedOut = expectedTimedOut || timedOut; + Boolean terminatedEarly = frequently() ? null : true; + expectedTerminatedEarly = expectedTerminatedEarly == null ? terminatedEarly : expectedTerminatedEarly; - addResponse(searchResponseMerger, searchResponse); - } + InternalSearchResponse internalSearchResponse = new InternalSearchResponse( + searchHits, + null, + null, + null, + timedOut, + terminatedEarly, + numReducePhases + ); - awaitResponsesAdded(); - assertEquals(numResponses, searchResponseMerger.numResponses()); - final SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); - SearchResponse searchResponse = searchResponseMerger.getMergedResponse(clusters); + SearchResponse searchResponse = new SearchResponse( + internalSearchResponse, + null, + total, + successful, + skipped, + randomLong(), + ShardSearchFailure.EMPTY_ARRAY, + SearchResponseTests.randomClusters() + ); + try { + addResponse(searchResponseMerger, searchResponse); + } finally { + searchResponse.decRef(); + } + } - assertEquals(TimeUnit.NANOSECONDS.toMillis(currentRelativeTime), searchResponse.getTook().millis()); - assertEquals(expectedTotal, searchResponse.getTotalShards()); - assertEquals(expectedSuccessful, searchResponse.getSuccessfulShards()); - assertEquals(expectedSkipped, searchResponse.getSkippedShards()); - assertEquals(0, searchResponse.getShardFailures().length); - assertEquals(expectedReducePhases, searchResponse.getNumReducePhases()); - assertEquals(expectedTimedOut, searchResponse.isTimedOut()); - assertEquals(expectedTerminatedEarly, searchResponse.isTerminatedEarly()); + awaitResponsesAdded(); + assertEquals(numResponses, searchResponseMerger.numResponses()); + final SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); + SearchResponse searchResponse = searchResponseMerger.getMergedResponse(clusters); + try { + assertEquals(TimeUnit.NANOSECONDS.toMillis(currentRelativeTime), searchResponse.getTook().millis()); + assertEquals(expectedTotal, searchResponse.getTotalShards()); + assertEquals(expectedSuccessful, searchResponse.getSuccessfulShards()); + assertEquals(expectedSkipped, searchResponse.getSkippedShards()); + assertEquals(0, searchResponse.getShardFailures().length); + assertEquals(expectedReducePhases, searchResponse.getNumReducePhases()); + assertEquals(expectedTimedOut, searchResponse.isTimedOut()); + assertEquals(expectedTerminatedEarly, searchResponse.isTerminatedEarly()); - assertSame(clusters, searchResponse.getClusters()); - assertNull(searchResponse.getScrollId()); + assertSame(clusters, searchResponse.getClusters()); + assertNull(searchResponse.getScrollId()); - SearchHits searchHits = searchResponse.getHits(); - // the sort fields and the collapse field are not returned when hits are empty - if (hasHits) { - assertArrayEquals(sortFields, searchHits.getSortFields()); - assertEquals(collapseField, searchHits.getCollapseField()); - } else { - assertNull(searchHits.getSortFields()); - assertNull(searchHits.getCollapseField()); - } - if (expectedTotalHits == null) { - assertNull(searchHits.getTotalHits()); - } else { - assertNotNull(searchHits.getTotalHits()); - assertEquals(expectedTotalHits.value, searchHits.getTotalHits().value); - assertSame(expectedTotalHits.relation, searchHits.getTotalHits().relation); - } - if (expectedMaxScore == Float.NEGATIVE_INFINITY) { - assertTrue(Float.isNaN(searchHits.getMaxScore())); - } else { - assertEquals(expectedMaxScore, searchHits.getMaxScore(), 0f); - } + SearchHits searchHits = searchResponse.getHits(); + // the sort fields and the collapse field are not returned when hits are empty + if (hasHits) { + assertArrayEquals(sortFields, searchHits.getSortFields()); + assertEquals(collapseField, searchHits.getCollapseField()); + } else { + assertNull(searchHits.getSortFields()); + assertNull(searchHits.getCollapseField()); + } + if (expectedTotalHits == null) { + assertNull(searchHits.getTotalHits()); + } else { + assertNotNull(searchHits.getTotalHits()); + assertEquals(expectedTotalHits.value, searchHits.getTotalHits().value); + assertSame(expectedTotalHits.relation, searchHits.getTotalHits().relation); + } + if (expectedMaxScore == Float.NEGATIVE_INFINITY) { + assertTrue(Float.isNaN(searchHits.getMaxScore())); + } else { + assertEquals(expectedMaxScore, searchHits.getMaxScore(), 0f); + } - for (int i = 0; i < from; i++) { - priorityQueue.poll(); - } - SearchHit[] hits = searchHits.getHits(); - if (collapseField != null - // the collapse field is not returned when hits are empty - && hasHits) { - assertEquals(hits.length, searchHits.getCollapseValues().length); - } else { - assertNull(searchHits.getCollapseValues()); - } - assertThat(hits.length, lessThanOrEqualTo(size)); - for (SearchHit hit : hits) { - SearchHit expected = priorityQueue.poll(); - assertSame(expected, hit); + for (int i = 0; i < from; i++) { + priorityQueue.poll(); + } + SearchHit[] hits = searchHits.getHits(); + if (collapseField != null + // the collapse field is not returned when hits are empty + && hasHits) { + assertEquals(hits.length, searchHits.getCollapseValues().length); + } else { + assertNull(searchHits.getCollapseValues()); + } + assertThat(hits.length, lessThanOrEqualTo(size)); + for (SearchHit hit : hits) { + SearchHit expected = priorityQueue.poll(); + assertSame(expected, hit); + } + } finally { + searchResponse.decRef(); + } } } public void testMergeNoResponsesAdded() { long currentRelativeTime = randomNonNegativeLong(); final SearchTimeProvider timeProvider = new SearchTimeProvider(randomLong(), 0, () -> currentRelativeTime); - SearchResponseMerger merger = new SearchResponseMerger(0, 10, Integer.MAX_VALUE, timeProvider, emptyReduceContextBuilder()); - SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); - assertEquals(0, merger.numResponses()); - SearchResponse response = merger.getMergedResponse(clusters); - assertSame(clusters, response.getClusters()); - assertEquals(TimeUnit.NANOSECONDS.toMillis(currentRelativeTime), response.getTook().millis()); - assertEquals(0, response.getTotalShards()); - assertEquals(0, response.getSuccessfulShards()); - assertEquals(0, response.getSkippedShards()); - assertEquals(0, response.getFailedShards()); - assertEquals(0, response.getNumReducePhases()); - assertFalse(response.isTimedOut()); - assertNotNull(response.getHits().getTotalHits()); - assertEquals(0, response.getHits().getTotalHits().value); - assertEquals(0, response.getHits().getHits().length); - assertEquals(TotalHits.Relation.EQUAL_TO, response.getHits().getTotalHits().relation); - assertNull(response.getScrollId()); - assertSame(InternalAggregations.EMPTY, response.getAggregations()); - assertNull(response.getSuggest()); - assertEquals(0, response.getProfileResults().size()); - assertNull(response.isTerminatedEarly()); - assertEquals(0, response.getShardFailures().length); + try (SearchResponseMerger merger = new SearchResponseMerger(0, 10, Integer.MAX_VALUE, timeProvider, emptyReduceContextBuilder())) { + SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); + assertEquals(0, merger.numResponses()); + SearchResponse response = merger.getMergedResponse(clusters); + try { + assertSame(clusters, response.getClusters()); + assertEquals(TimeUnit.NANOSECONDS.toMillis(currentRelativeTime), response.getTook().millis()); + assertEquals(0, response.getTotalShards()); + assertEquals(0, response.getSuccessfulShards()); + assertEquals(0, response.getSkippedShards()); + assertEquals(0, response.getFailedShards()); + assertEquals(0, response.getNumReducePhases()); + assertFalse(response.isTimedOut()); + assertNotNull(response.getHits().getTotalHits()); + assertEquals(0, response.getHits().getTotalHits().value); + assertEquals(0, response.getHits().getHits().length); + assertEquals(TotalHits.Relation.EQUAL_TO, response.getHits().getTotalHits().relation); + assertNull(response.getScrollId()); + assertSame(InternalAggregations.EMPTY, response.getAggregations()); + assertNull(response.getSuggest()); + assertEquals(0, response.getProfileResults().size()); + assertNull(response.isTerminatedEarly()); + assertEquals(0, response.getShardFailures().length); + } finally { + response.decRef(); + } + } } public void testMergeEmptySearchHitsWithNonEmpty() { long currentRelativeTime = randomLong(); final SearchTimeProvider timeProvider = new SearchTimeProvider(randomLong(), 0, () -> currentRelativeTime); - SearchResponseMerger merger = new SearchResponseMerger(0, 10, Integer.MAX_VALUE, timeProvider, emptyReduceContextBuilder()); - SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); - int numFields = randomIntBetween(1, 3); - SortField[] sortFields = new SortField[numFields]; - for (int i = 0; i < numFields; i++) { - sortFields[i] = new SortField("field-" + i, SortField.Type.INT, randomBoolean()); - } - PriorityQueue priorityQueue = new PriorityQueue<>(new SearchHitComparator(sortFields)); - SearchHit[] hits = randomSearchHitArray( - 10, - 1, - "remote", - new Index[] { new Index("index", "uuid") }, - Float.NaN, - 1, - sortFields, - priorityQueue - ); - { - SearchHits searchHits = new SearchHits(hits, new TotalHits(10, TotalHits.Relation.EQUAL_TO), Float.NaN, sortFields, null, null); - InternalSearchResponse response = new InternalSearchResponse(searchHits, null, null, null, false, false, 1); - SearchResponse searchResponse = new SearchResponse( - response, - null, - 1, - 1, - 0, - 1L, - ShardSearchFailure.EMPTY_ARRAY, - SearchResponse.Clusters.EMPTY - ); - merger.add(searchResponse); - } - { - SearchHits empty = new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), Float.NaN, null, null, null); - InternalSearchResponse response = new InternalSearchResponse(empty, null, null, null, false, false, 1); - SearchResponse searchResponse = new SearchResponse( - response, - null, + try (SearchResponseMerger merger = new SearchResponseMerger(0, 10, Integer.MAX_VALUE, timeProvider, emptyReduceContextBuilder())) { + SearchResponse.Clusters clusters = SearchResponseTests.randomClusters(); + int numFields = randomIntBetween(1, 3); + SortField[] sortFields = new SortField[numFields]; + for (int i = 0; i < numFields; i++) { + sortFields[i] = new SortField("field-" + i, SortField.Type.INT, randomBoolean()); + } + PriorityQueue priorityQueue = new PriorityQueue<>(new SearchHitComparator(sortFields)); + SearchHit[] hits = randomSearchHitArray( + 10, 1, + "remote", + new Index[] { new Index("index", "uuid") }, + Float.NaN, 1, - 0, - 1L, - ShardSearchFailure.EMPTY_ARRAY, - SearchResponse.Clusters.EMPTY + sortFields, + priorityQueue ); - merger.add(searchResponse); + { + SearchHits searchHits = new SearchHits( + hits, + new TotalHits(10, TotalHits.Relation.EQUAL_TO), + Float.NaN, + sortFields, + null, + null + ); + InternalSearchResponse response = new InternalSearchResponse(searchHits, null, null, null, false, false, 1); + SearchResponse searchResponse = new SearchResponse( + response, + null, + 1, + 1, + 0, + 1L, + ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY + ); + try { + merger.add(searchResponse); + } finally { + searchResponse.decRef(); + } + } + { + SearchHits empty = new SearchHits( + new SearchHit[0], + new TotalHits(0, TotalHits.Relation.EQUAL_TO), + Float.NaN, + null, + null, + null + ); + InternalSearchResponse response = new InternalSearchResponse(empty, null, null, null, false, false, 1); + SearchResponse searchResponse = new SearchResponse( + response, + null, + 1, + 1, + 0, + 1L, + ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY + ); + try { + merger.add(searchResponse); + } finally { + searchResponse.decRef(); + } + } + assertEquals(2, merger.numResponses()); + SearchResponse mergedResponse = merger.getMergedResponse(clusters); + try { + assertEquals(10, mergedResponse.getHits().getTotalHits().value); + assertEquals(10, mergedResponse.getHits().getHits().length); + assertEquals(2, mergedResponse.getTotalShards()); + assertEquals(2, mergedResponse.getSuccessfulShards()); + assertEquals(0, mergedResponse.getSkippedShards()); + assertArrayEquals(sortFields, mergedResponse.getHits().getSortFields()); + assertArrayEquals(hits, mergedResponse.getHits().getHits()); + assertEquals(clusters, mergedResponse.getClusters()); + } finally { + mergedResponse.decRef(); + } } - assertEquals(2, merger.numResponses()); - SearchResponse mergedResponse = merger.getMergedResponse(clusters); - assertEquals(10, mergedResponse.getHits().getTotalHits().value); - assertEquals(10, mergedResponse.getHits().getHits().length); - assertEquals(2, mergedResponse.getTotalShards()); - assertEquals(2, mergedResponse.getSuccessfulShards()); - assertEquals(0, mergedResponse.getSkippedShards()); - assertArrayEquals(sortFields, mergedResponse.getHits().getSortFields()); - assertArrayEquals(hits, mergedResponse.getHits().getHits()); - assertEquals(clusters, mergedResponse.getClusters()); } public void testMergeOnlyEmptyHits() { @@ -862,32 +1004,41 @@ public void testMergeOnlyEmptyHits() { Tuple randomTrackTotalHits = randomTrackTotalHits(); int trackTotalHitsUpTo = randomTrackTotalHits.v1(); TotalHits.Relation totalHitsRelation = randomTrackTotalHits.v2(); - SearchResponseMerger merger = new SearchResponseMerger(0, 10, trackTotalHitsUpTo, timeProvider, emptyReduceContextBuilder()); - int numResponses = randomIntBetween(1, 5); - TotalHits expectedTotalHits = null; - for (int i = 0; i < numResponses; i++) { - TotalHits totalHits = null; - if (trackTotalHitsUpTo != SearchContext.TRACK_TOTAL_HITS_DISABLED) { - totalHits = new TotalHits(randomLongBetween(0, 1000), totalHitsRelation); - long previousValue = expectedTotalHits == null ? 0 : expectedTotalHits.value; - expectedTotalHits = new TotalHits(Math.min(previousValue + totalHits.value, trackTotalHitsUpTo), totalHitsRelation); + try (SearchResponseMerger merger = new SearchResponseMerger(0, 10, trackTotalHitsUpTo, timeProvider, emptyReduceContextBuilder())) { + int numResponses = randomIntBetween(1, 5); + TotalHits expectedTotalHits = null; + for (int i = 0; i < numResponses; i++) { + TotalHits totalHits = null; + if (trackTotalHitsUpTo != SearchContext.TRACK_TOTAL_HITS_DISABLED) { + totalHits = new TotalHits(randomLongBetween(0, 1000), totalHitsRelation); + long previousValue = expectedTotalHits == null ? 0 : expectedTotalHits.value; + expectedTotalHits = new TotalHits(Math.min(previousValue + totalHits.value, trackTotalHitsUpTo), totalHitsRelation); + } + SearchHits empty = new SearchHits(new SearchHit[0], totalHits, Float.NaN, null, null, null); + InternalSearchResponse response = new InternalSearchResponse(empty, null, null, null, false, false, 1); + SearchResponse searchResponse = new SearchResponse( + response, + null, + 1, + 1, + 0, + 1L, + ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY + ); + try { + merger.add(searchResponse); + } finally { + searchResponse.decRef(); + } + } + SearchResponse mergedResponse = merger.getMergedResponse(clusters); + try { + assertEquals(expectedTotalHits, mergedResponse.getHits().getTotalHits()); + } finally { + mergedResponse.decRef(); } - SearchHits empty = new SearchHits(new SearchHit[0], totalHits, Float.NaN, null, null, null); - InternalSearchResponse response = new InternalSearchResponse(empty, null, null, null, false, false, 1); - SearchResponse searchResponse = new SearchResponse( - response, - null, - 1, - 1, - 0, - 1L, - ShardSearchFailure.EMPTY_ARRAY, - SearchResponse.Clusters.EMPTY - ); - merger.add(searchResponse); } - SearchResponse mergedResponse = merger.getMergedResponse(clusters); - assertEquals(expectedTotalHits, mergedResponse.getHits().getTotalHits()); } private static Tuple randomTrackTotalHits() { diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java index 7090d590a4901..6230a24a0768f 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java @@ -1094,23 +1094,33 @@ public void testCreateSearchResponseMerger() { assertEquals(-1, source.size()); assertEquals(-1, source.from()); assertNull(source.trackTotalHitsUpTo()); - SearchResponseMerger merger = TransportSearchAction.createSearchResponseMerger( - source, - timeProvider, - emptyReduceContextBuilder() - ); - assertEquals(0, merger.from); - assertEquals(10, merger.size); - assertEquals(SearchContext.DEFAULT_TRACK_TOTAL_HITS_UP_TO, merger.trackTotalHitsUpTo); - assertEquals(0, source.from()); - assertEquals(10, source.size()); - assertNull(source.trackTotalHitsUpTo()); + try ( + SearchResponseMerger merger = TransportSearchAction.createSearchResponseMerger( + source, + timeProvider, + emptyReduceContextBuilder() + ) + ) { + assertEquals(0, merger.from); + assertEquals(10, merger.size); + assertEquals(SearchContext.DEFAULT_TRACK_TOTAL_HITS_UP_TO, merger.trackTotalHitsUpTo); + assertEquals(0, source.from()); + assertEquals(10, source.size()); + assertNull(source.trackTotalHitsUpTo()); + } } { - SearchResponseMerger merger = TransportSearchAction.createSearchResponseMerger(null, timeProvider, emptyReduceContextBuilder()); - assertEquals(0, merger.from); - assertEquals(10, merger.size); - assertEquals(SearchContext.DEFAULT_TRACK_TOTAL_HITS_UP_TO, merger.trackTotalHitsUpTo); + try ( + SearchResponseMerger merger = TransportSearchAction.createSearchResponseMerger( + null, + timeProvider, + emptyReduceContextBuilder() + ) + ) { + assertEquals(0, merger.from); + assertEquals(10, merger.size); + assertEquals(SearchContext.DEFAULT_TRACK_TOTAL_HITS_UP_TO, merger.trackTotalHitsUpTo); + } } { SearchSourceBuilder source = new SearchSourceBuilder(); @@ -1120,17 +1130,20 @@ public void testCreateSearchResponseMerger() { source.size(originalSize); int trackTotalHitsUpTo = randomIntBetween(0, Integer.MAX_VALUE); source.trackTotalHitsUpTo(trackTotalHitsUpTo); - SearchResponseMerger merger = TransportSearchAction.createSearchResponseMerger( - source, - timeProvider, - emptyReduceContextBuilder() - ); - assertEquals(0, source.from()); - assertEquals(originalFrom + originalSize, source.size()); - assertEquals(trackTotalHitsUpTo, (int) source.trackTotalHitsUpTo()); - assertEquals(originalFrom, merger.from); - assertEquals(originalSize, merger.size); - assertEquals(trackTotalHitsUpTo, merger.trackTotalHitsUpTo); + try ( + SearchResponseMerger merger = TransportSearchAction.createSearchResponseMerger( + source, + timeProvider, + emptyReduceContextBuilder() + ) + ) { + assertEquals(0, source.from()); + assertEquals(originalFrom + originalSize, source.size()); + assertEquals(trackTotalHitsUpTo, (int) source.trackTotalHitsUpTo()); + assertEquals(originalFrom, merger.from); + assertEquals(originalSize, merger.size); + assertEquals(trackTotalHitsUpTo, merger.trackTotalHitsUpTo); + } } } diff --git a/test/framework/src/main/java/org/elasticsearch/search/geo/BasePointShapeQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/geo/BasePointShapeQueryTestCase.java index cef8d555b111d..13131a5e3eef7 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/geo/BasePointShapeQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/geo/BasePointShapeQueryTestCase.java @@ -354,7 +354,8 @@ public void testWithInQueryLine() throws Exception { try { client().prepareSearch(defaultIndexName) .setQuery(queryBuilder().shapeQuery(defaultFieldName, line).relation(ShapeRelation.WITHIN)) - .get(); + .get() + .decRef(); } catch (SearchPhaseExecutionException e) { assertThat(e.getCause().getMessage(), containsString("Field [" + defaultFieldName + "] found an unsupported shape Line")); } From 316603548d0d20dcf8a62d62b6c92d708508b4b1 Mon Sep 17 00:00:00 2001 From: Tim Grein Date: Wed, 6 Dec 2023 16:22:06 +0100 Subject: [PATCH 04/45] [Connectors API] Add sync job status check to cancel connector sync job integration test. (#103057) Extend cancel connector sync integration test --- .../test/entsearch/430_connector_sync_job_cancel.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/430_connector_sync_job_cancel.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/430_connector_sync_job_cancel.yml index e9c612cbf9f27..d934b7c674f25 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/430_connector_sync_job_cancel.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/430_connector_sync_job_cancel.yml @@ -20,13 +20,21 @@ setup: id: test-connector job_type: full trigger_method: on_demand + - set: { id: sync-job-id-to-cancel } + - do: connector_sync_job.cancel: connector_sync_job_id: $sync-job-id-to-cancel - match: { acknowledged: true } + - do: + connector_sync_job.get: + connector_sync_job_id: $sync-job-id-to-cancel + + - match: { status: "canceling"} + --- "Cancel a Connector Sync Job - Connector Sync Job does not exist": From ed2155cc47366982ab22e0a276ef055de0b4279a Mon Sep 17 00:00:00 2001 From: sabi0 <2sabio@gmail.com> Date: Wed, 6 Dec 2023 16:27:16 +0100 Subject: [PATCH 05/45] Fix args length == 1 case handling in ESLoggerUsageChecker (#102382) * Fix args length == 1 case handling in ESLoggerUsageChecker There was an operator precedence mistake in: (lengthWithoutMarker == 1 || lengthWithoutMarker == 2) && lengthWithoutMarker == 2 ? ... Logical AND && has higher precedence than ternary operator ?:, So the above expression is equivalent to lengthWithoutMarker == 2 ? ... --------- Co-authored-by: Elastic Machine --- .../elasticsearch/test/loggerusage/ESLoggerUsageChecker.java | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/test/logger-usage/src/main/java/org/elasticsearch/test/loggerusage/ESLoggerUsageChecker.java b/test/logger-usage/src/main/java/org/elasticsearch/test/loggerusage/ESLoggerUsageChecker.java index bd51c74ee8e47..d7cde5676a27f 100644 --- a/test/logger-usage/src/main/java/org/elasticsearch/test/loggerusage/ESLoggerUsageChecker.java +++ b/test/logger-usage/src/main/java/org/elasticsearch/test/loggerusage/ESLoggerUsageChecker.java @@ -364,9 +364,8 @@ private void verifyLoggerUsage( && argumentTypes[markerOffset + 1].equals(OBJECT_CLASS)) { // MULTI-PARAM METHOD: debug(Marker?, String, Object p0, ...) checkFixedArityArgs(methodNode, logMessageFrames[i], lineNumber, methodInsn, markerOffset + 0, lengthWithoutMarker - 1); - } else if ((lengthWithoutMarker == 1 || lengthWithoutMarker == 2) && lengthWithoutMarker == 2 - ? argumentTypes[markerOffset + 1].equals(THROWABLE_CLASS) - : true) { + } else if (lengthWithoutMarker == 1 + || (lengthWithoutMarker == 2 && argumentTypes[markerOffset + 1].equals(THROWABLE_CLASS))) { // all the rest: debug(Marker?, (Message|MessageSupplier|CharSequence|Object|String|Supplier), Throwable?) checkFixedArityArgs(methodNode, logMessageFrames[i], lineNumber, methodInsn, markerOffset + 0, 0); } else { From 1617a8db3643573671469b343e9c6dfb787026b2 Mon Sep 17 00:00:00 2001 From: Mary Gouseti Date: Wed, 6 Dec 2023 17:29:39 +0200 Subject: [PATCH 06/45] [ILM] More resilient when a policy is added to searchable snapshot (#102741) In this PR we enable ILM to handle the following scenarios: - An ILM policy with the a searchable snapshot action in hot or cold is added on a partially mounted searchable snapshot. - An ILM policy with the a searchable snapshot action in frozen is added on a fully mounted searchable snapshot. The searchable snapshot could have had a previous ILM policy that has been removed via POST /_ilm/remove or it might not have been managed at all. --- docs/changelog/102741.yaml | 6 + .../reference/ilm/actions/ilm-delete.asciidoc | 5 + .../xpack/core/ilm/MountSnapshotStep.java | 50 ++-- .../core/ilm/SearchableSnapshotAction.java | 64 +++-- .../actions/SearchableSnapshotActionIT.java | 218 +++++++++++++++++- 5 files changed, 304 insertions(+), 39 deletions(-) create mode 100644 docs/changelog/102741.yaml diff --git a/docs/changelog/102741.yaml b/docs/changelog/102741.yaml new file mode 100644 index 0000000000000..84a4b8092632f --- /dev/null +++ b/docs/changelog/102741.yaml @@ -0,0 +1,6 @@ +pr: 102741 +summary: "[ILM] More resilient when a policy is added to searchable snapshot" +area: ILM+SLM +type: bug +issues: + - 101958 diff --git a/docs/reference/ilm/actions/ilm-delete.asciidoc b/docs/reference/ilm/actions/ilm-delete.asciidoc index fbd7f1b0a238a..eac3b9804709a 100644 --- a/docs/reference/ilm/actions/ilm-delete.asciidoc +++ b/docs/reference/ilm/actions/ilm-delete.asciidoc @@ -16,6 +16,11 @@ Defaults to `true`. This option is applicable when the <> action is used in any previous phase. +WARNING: If a policy with a searchable snapshot action is applied on an existing searchable snapshot index, +the snapshot backing this index will NOT be deleted because it was not created by this policy. If you want +to clean this snapshot, please delete it manually after the index is deleted using the <>, you +can find the repository and snapshot name using the <>. + [[ilm-delete-action-ex]] ==== Example diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/MountSnapshotStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/MountSnapshotStep.java index 057f0c8930e66..96f280b4e03c9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/MountSnapshotStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/MountSnapshotStep.java @@ -68,24 +68,32 @@ void performDuringNoSnapshot(IndexMetadata indexMetadata, ClusterState currentCl String indexName = indexMetadata.getIndex().getName(); LifecycleExecutionState lifecycleState = indexMetadata.getLifecycleExecutionState(); + SearchableSnapshotAction.SearchableSnapshotMetadata searchableSnapshotMetadata = SearchableSnapshotAction + .extractSearchableSnapshotFromSettings(indexMetadata); String policyName = indexMetadata.getLifecyclePolicyName(); - final String snapshotRepository = lifecycleState.snapshotRepository(); + String snapshotRepository = lifecycleState.snapshotRepository(); if (Strings.hasText(snapshotRepository) == false) { - listener.onFailure( - new IllegalStateException( - "snapshot repository is not present for policy [" + policyName + "] and index [" + indexName + "]" - ) - ); - return; + if (searchableSnapshotMetadata == null) { + listener.onFailure( + new IllegalStateException( + "snapshot repository is not present for policy [" + policyName + "] and index [" + indexName + "]" + ) + ); + return; + } else { + snapshotRepository = searchableSnapshotMetadata.repositoryName(); + } } - final String snapshotName = lifecycleState.snapshotName(); - if (Strings.hasText(snapshotName) == false) { + String snapshotName = lifecycleState.snapshotName(); + if (Strings.hasText(snapshotName) == false && searchableSnapshotMetadata == null) { listener.onFailure( new IllegalStateException("snapshot name was not generated for policy [" + policyName + "] and index [" + indexName + "]") ); return; + } else if (searchableSnapshotMetadata != null) { + snapshotName = searchableSnapshotMetadata.snapshotName(); } String mountedIndexName = restoredIndexPrefix + indexName; @@ -102,16 +110,20 @@ void performDuringNoSnapshot(IndexMetadata indexMetadata, ClusterState currentCl final String snapshotIndexName = lifecycleState.snapshotIndexName(); if (snapshotIndexName == null) { - // This index had its searchable snapshot created prior to a version where we captured - // the original index name, so make our best guess at the name - indexName = bestEffortIndexNameResolution(indexName); - logger.debug( - "index [{}] using policy [{}] does not have a stored snapshot index name, " - + "using our best effort guess of [{}] for the original snapshotted index name", - indexMetadata.getIndex().getName(), - policyName, - indexName - ); + if (searchableSnapshotMetadata == null) { + // This index had its searchable snapshot created prior to a version where we captured + // the original index name, so make our best guess at the name + indexName = bestEffortIndexNameResolution(indexName); + logger.debug( + "index [{}] using policy [{}] does not have a stored snapshot index name, " + + "using our best effort guess of [{}] for the original snapshotted index name", + indexMetadata.getIndex().getName(), + policyName, + indexName + ); + } else { + indexName = searchableSnapshotMetadata.sourceIndex(); + } } else { // Use the name of the snapshot as specified in the metadata, because the current index // name not might not reflect the name of the index actually in the snapshot diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SearchableSnapshotAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SearchableSnapshotAction.java index 9ae0024c5a573..5b9b559b4d957 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SearchableSnapshotAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SearchableSnapshotAction.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.core.Nullable; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.xcontent.ConstructingObjectParser; @@ -32,6 +33,7 @@ import java.util.Objects; import static org.elasticsearch.snapshots.SearchableSnapshotsSettings.SEARCHABLE_SNAPSHOTS_REPOSITORY_NAME_SETTING_KEY; +import static org.elasticsearch.snapshots.SearchableSnapshotsSettings.SEARCHABLE_SNAPSHOTS_SNAPSHOT_NAME_SETTING_KEY; import static org.elasticsearch.snapshots.SearchableSnapshotsSettings.SEARCHABLE_SNAPSHOT_PARTIAL_SETTING_KEY; import static org.elasticsearch.xpack.core.searchablesnapshots.SearchableSnapshotsConstants.SEARCHABLE_SNAPSHOT_FEATURE; @@ -141,10 +143,12 @@ public List toSteps(Client client, String phase, StepKey nextStepKey, XPac IndexMetadata indexMetadata = clusterState.getMetadata().index(index); assert indexMetadata != null : "index " + index.getName() + " must exist in the cluster state"; String policyName = indexMetadata.getLifecyclePolicyName(); - if (indexMetadata.getSettings().get(LifecycleSettings.SNAPSHOT_INDEX_NAME) != null) { + SearchableSnapshotMetadata searchableSnapshotMetadata = extractSearchableSnapshotFromSettings(indexMetadata); + if (searchableSnapshotMetadata != null) { + // TODO: allow this behavior instead of returning false, in this case the index is already a searchable a snapshot + // so the most graceful way of recovery might be to use this repo // The index is already a searchable snapshot, let's see if the repository matches - String repo = indexMetadata.getSettings().get(SEARCHABLE_SNAPSHOTS_REPOSITORY_NAME_SETTING_KEY); - if (this.snapshotRepository.equals(repo) == false) { + if (this.snapshotRepository.equals(searchableSnapshotMetadata.repositoryName) == false) { // Okay, different repo, we need to go ahead with the searchable snapshot logger.debug( "[{}] action is configured for index [{}] in policy [{}] which is already mounted as a searchable " @@ -153,15 +157,14 @@ public List toSteps(Client client, String phase, StepKey nextStepKey, XPac SearchableSnapshotAction.NAME, index.getName(), policyName, - repo, + searchableSnapshotMetadata.repositoryName, this.snapshotRepository ); return false; } // Check to the storage type to see if we need to convert between full <-> partial - final boolean partial = indexMetadata.getSettings().getAsBoolean(SEARCHABLE_SNAPSHOT_PARTIAL_SETTING_KEY, false); - MountSearchableSnapshotRequest.Storage existingType = partial + MountSearchableSnapshotRequest.Storage existingType = searchableSnapshotMetadata.partial ? MountSearchableSnapshotRequest.Storage.SHARED_CACHE : MountSearchableSnapshotRequest.Storage.FULL_COPY; MountSearchableSnapshotRequest.Storage type = getConcreteStorageType(preActionBranchingKey); @@ -172,7 +175,7 @@ public List toSteps(Client client, String phase, StepKey nextStepKey, XPac SearchableSnapshotAction.NAME, index.getName(), policyName, - repo, + searchableSnapshotMetadata.repositoryName, type ); return true; @@ -215,7 +218,7 @@ public List toSteps(Client client, String phase, StepKey nextStepKey, XPac // When generating a snapshot, we either jump to the force merge step, or we skip the // forcemerge and go straight to steps for creating the snapshot StepKey keyForSnapshotGeneration = forceMergeIndex ? forceMergeStepKey : generateSnapshotNameKey; - // Branch, deciding whether there is an existing searchable snapshot snapshot that can be used for mounting the index + // Branch, deciding whether there is an existing searchable snapshot that can be used for mounting the index // (in which case, skip generating a new name and the snapshot cleanup), or if we need to generate a new snapshot BranchingStep skipGeneratingSnapshotStep = new BranchingStep( skipGeneratingSnapshotKey, @@ -225,7 +228,8 @@ public List toSteps(Client client, String phase, StepKey nextStepKey, XPac IndexMetadata indexMetadata = clusterState.getMetadata().index(index); String policyName = indexMetadata.getLifecyclePolicyName(); LifecycleExecutionState lifecycleExecutionState = indexMetadata.getLifecycleExecutionState(); - if (lifecycleExecutionState.snapshotName() == null) { + SearchableSnapshotMetadata searchableSnapshotMetadata = extractSearchableSnapshotFromSettings(indexMetadata); + if (lifecycleExecutionState.snapshotName() == null && searchableSnapshotMetadata == null) { // No name exists, so it must be generated logger.trace( "no snapshot name for index [{}] in policy [{}] exists, so one will be generated", @@ -234,8 +238,20 @@ public List toSteps(Client client, String phase, StepKey nextStepKey, XPac ); return false; } + String snapshotIndexName; + String snapshotName; + String repoName; + if (lifecycleExecutionState.snapshotName() != null) { + snapshotIndexName = lifecycleExecutionState.snapshotIndexName(); + snapshotName = lifecycleExecutionState.snapshotName(); + repoName = lifecycleExecutionState.snapshotRepository(); + } else { + snapshotIndexName = searchableSnapshotMetadata.sourceIndex; + snapshotName = searchableSnapshotMetadata.snapshotName; + repoName = searchableSnapshotMetadata.repositoryName; + } - if (this.snapshotRepository.equals(lifecycleExecutionState.snapshotRepository()) == false) { + if (this.snapshotRepository.equals(repoName) == false) { // A different repository is being used // TODO: allow this behavior instead of throwing an exception throw new IllegalArgumentException("searchable snapshot indices may be converted only within the same repository"); @@ -244,12 +260,14 @@ public List toSteps(Client client, String phase, StepKey nextStepKey, XPac // We can skip the generate, initial cleanup, and snapshot taking for this index, as we already have a generated snapshot. // This will jump ahead directly to the "mount snapshot" step logger.debug( - "an existing snapshot [{}] in repository [{}] (index name: [{}]) " - + "will be used for mounting [{}] as a searchable snapshot", - lifecycleExecutionState.snapshotName(), - lifecycleExecutionState.snapshotRepository(), - lifecycleExecutionState.snapshotIndexName(), - index.getName() + "Policy [{}] will use an existing snapshot [{}] in repository [{}] (index name: [{}]) " + + "to mount [{}] as a searchable snapshot. This snapshot was found in the {}.", + policyName, + snapshotName, + snapshotRepository, + snapshotIndexName, + index.getName(), + lifecycleExecutionState.snapshotName() != null ? "lifecycle execution state" : "metadata of " + index.getName() ); return true; } @@ -411,4 +429,18 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(snapshotRepository, forceMergeIndex); } + + @Nullable + static SearchableSnapshotMetadata extractSearchableSnapshotFromSettings(IndexMetadata indexMetadata) { + String indexName = indexMetadata.getSettings().get(LifecycleSettings.SNAPSHOT_INDEX_NAME); + if (indexName == null) { + return null; + } + String snapshotName = indexMetadata.getSettings().get(SEARCHABLE_SNAPSHOTS_SNAPSHOT_NAME_SETTING_KEY); + String repo = indexMetadata.getSettings().get(SEARCHABLE_SNAPSHOTS_REPOSITORY_NAME_SETTING_KEY); + final boolean partial = indexMetadata.getSettings().getAsBoolean(SEARCHABLE_SNAPSHOT_PARTIAL_SETTING_KEY, false); + return new SearchableSnapshotMetadata(indexName, repo, snapshotName, partial); + } + + record SearchableSnapshotMetadata(String sourceIndex, String repositoryName, String snapshotName, boolean partial) {}; } diff --git a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/SearchableSnapshotActionIT.java b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/SearchableSnapshotActionIT.java index 9ec36d4d9b7cf..361cfd79b5e88 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/SearchableSnapshotActionIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/SearchableSnapshotActionIT.java @@ -58,6 +58,7 @@ import static org.elasticsearch.xpack.TimeSeriesRestDriver.getStepKeyForIndex; import static org.elasticsearch.xpack.TimeSeriesRestDriver.indexDocument; import static org.elasticsearch.xpack.TimeSeriesRestDriver.rolloverMaxOneDocCondition; +import static org.elasticsearch.xpack.core.ilm.DeleteAction.WITH_SNAPSHOT_DELETE; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -184,10 +185,7 @@ public void testDeleteActionDeletesSearchableSnapshot() throws Exception { Map coldActions = Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo)); Map phases = new HashMap<>(); phases.put("cold", new Phase("cold", TimeValue.ZERO, coldActions)); - phases.put( - "delete", - new Phase("delete", TimeValue.timeValueMillis(10000), singletonMap(DeleteAction.NAME, DeleteAction.WITH_SNAPSHOT_DELETE)) - ); + phases.put("delete", new Phase("delete", TimeValue.timeValueMillis(10000), singletonMap(DeleteAction.NAME, WITH_SNAPSHOT_DELETE))); LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, phases); // PUT policy XContentBuilder builder = jsonBuilder(); @@ -574,6 +572,218 @@ public void testConvertingSearchableSnapshotFromFullToPartial() throws Exception ); } + @SuppressWarnings("unchecked") + public void testResumingSearchableSnapshotFromFullToPartial() throws Exception { + String index = "myindex-" + randomAlphaOfLength(4).toLowerCase(Locale.ROOT); + createSnapshotRepo(client(), snapshotRepo, randomBoolean()); + var policyCold = "policy-cold"; + createPolicy( + client(), + policyCold, + null, + null, + new Phase( + "cold", + TimeValue.ZERO, + singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) + ), + null, + null + ); + var policyFrozen = "policy-cold-frozen"; + createPolicy( + client(), + policyFrozen, + null, + null, + new Phase( + "cold", + TimeValue.ZERO, + singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) + ), + new Phase( + "frozen", + TimeValue.ZERO, + singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) + ), + null + ); + + createIndex(index, Settings.EMPTY); + ensureGreen(index); + indexDocument(client(), index, true); + + // enable ILM after we indexed a document as otherwise ILM might sometimes run so fast the indexDocument call will fail with + // `index_not_found_exception` + updateIndexSettings(index, Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, policyCold)); + + final String fullMountedIndexName = SearchableSnapshotAction.FULL_RESTORED_INDEX_PREFIX + index; + + assertBusy(() -> { + logger.info("--> waiting for [{}] to exist...", fullMountedIndexName); + assertTrue(indexExists(fullMountedIndexName)); + }, 30, TimeUnit.SECONDS); + + assertBusy(() -> { + Step.StepKey stepKeyForIndex = getStepKeyForIndex(client(), fullMountedIndexName); + assertThat(stepKeyForIndex.phase(), is("cold")); + assertThat(stepKeyForIndex.name(), is(PhaseCompleteStep.NAME)); + }, 30, TimeUnit.SECONDS); + + // remove ILM + { + Request request = new Request("POST", "/" + fullMountedIndexName + "/_ilm/remove"); + Map responseMap = responseAsMap(client().performRequest(request)); + assertThat(responseMap.get("has_failures"), is(false)); + } + // add cold-frozen + updateIndexSettings(index, Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, policyFrozen)); + String partiallyMountedIndexName = SearchableSnapshotAction.PARTIAL_RESTORED_INDEX_PREFIX + fullMountedIndexName; + assertBusy(() -> { + logger.info("--> waiting for [{}] to exist...", partiallyMountedIndexName); + assertTrue(indexExists(partiallyMountedIndexName)); + }, 30, TimeUnit.SECONDS); + + assertBusy(() -> { + Step.StepKey stepKeyForIndex = getStepKeyForIndex(client(), partiallyMountedIndexName); + assertThat(stepKeyForIndex.phase(), is("frozen")); + assertThat(stepKeyForIndex.name(), is(PhaseCompleteStep.NAME)); + }, 30, TimeUnit.SECONDS); + + // Ensure the searchable snapshot is not deleted when the index was deleted because it was not created by this + // policy. We add the delete phase now to ensure that the index will not be deleted before we verify the above + // assertions + createPolicy( + client(), + policyFrozen, + null, + null, + new Phase( + "cold", + TimeValue.ZERO, + singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) + ), + new Phase( + "frozen", + TimeValue.ZERO, + singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) + ), + new Phase("delete", TimeValue.ZERO, singletonMap(DeleteAction.NAME, WITH_SNAPSHOT_DELETE)) + ); + assertBusy(() -> { + logger.info("--> waiting for [{}] to be deleted...", partiallyMountedIndexName); + assertThat(indexExists(partiallyMountedIndexName), is(false)); + Request getSnaps = new Request("GET", "/_snapshot/" + snapshotRepo + "/_all"); + Map responseMap = responseAsMap(client().performRequest(getSnaps)); + assertThat(((List>) responseMap.get("snapshots")).size(), equalTo(1)); + }, 30, TimeUnit.SECONDS); + } + + @SuppressWarnings("unchecked") + public void testResumingSearchableSnapshotFromPartialToFull() throws Exception { + String index = "myindex-" + randomAlphaOfLength(4).toLowerCase(Locale.ROOT); + createSnapshotRepo(client(), snapshotRepo, randomBoolean()); + var policyCold = "policy-cold"; + createPolicy( + client(), + policyCold, + null, + null, + new Phase( + "cold", + TimeValue.ZERO, + singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) + ), + null, + null + ); + var policyColdFrozen = "policy-cold-frozen"; + createPolicy( + client(), + policyColdFrozen, + + null, + null, + new Phase( + "cold", + TimeValue.ZERO, + singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) + ), + new Phase( + "frozen", + TimeValue.ZERO, + singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) + ), + null + ); + + createIndex(index, Settings.EMPTY); + ensureGreen(index); + indexDocument(client(), index, true); + + // enable ILM after we indexed a document as otherwise ILM might sometimes run so fast the indexDocument call will fail with + // `index_not_found_exception` + updateIndexSettings(index, Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, policyColdFrozen)); + + final String fullMountedIndexName = SearchableSnapshotAction.FULL_RESTORED_INDEX_PREFIX + index; + final String partialMountedIndexName = SearchableSnapshotAction.PARTIAL_RESTORED_INDEX_PREFIX + fullMountedIndexName; + + assertBusy(() -> { + logger.info("--> waiting for [{}] to exist...", partialMountedIndexName); + assertTrue(indexExists(partialMountedIndexName)); + }, 30, TimeUnit.SECONDS); + + assertBusy(() -> { + Step.StepKey stepKeyForIndex = getStepKeyForIndex(client(), partialMountedIndexName); + assertThat(stepKeyForIndex.phase(), is("frozen")); + assertThat(stepKeyForIndex.name(), is(PhaseCompleteStep.NAME)); + }, 30, TimeUnit.SECONDS); + + // remove ILM from the partially mounted searchable snapshot + { + Request request = new Request("POST", "/" + partialMountedIndexName + "/_ilm/remove"); + Map responseMap = responseAsMap(client().performRequest(request)); + assertThat(responseMap.get("has_failures"), is(false)); + } + // add a policy that will only include the fully mounted searchable snapshot + updateIndexSettings(index, Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, policyCold)); + String restoredPartiallyMountedIndexName = SearchableSnapshotAction.FULL_RESTORED_INDEX_PREFIX + partialMountedIndexName; + assertBusy(() -> { + logger.info("--> waiting for [{}] to exist...", restoredPartiallyMountedIndexName); + assertTrue(indexExists(restoredPartiallyMountedIndexName)); + }, 30, TimeUnit.SECONDS); + + assertBusy(() -> { + Step.StepKey stepKeyForIndex = getStepKeyForIndex(client(), restoredPartiallyMountedIndexName); + assertThat(stepKeyForIndex.phase(), is("cold")); + assertThat(stepKeyForIndex.name(), is(PhaseCompleteStep.NAME)); + }, 30, TimeUnit.SECONDS); + + // Ensure the searchable snapshot is not deleted when the index was deleted because it was not created by this + // policy. We add the delete phase now to ensure that the index will not be deleted before we verify the above + // assertions + createPolicy( + client(), + policyCold, + null, + null, + new Phase( + "cold", + TimeValue.ZERO, + singletonMap(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo, randomBoolean())) + ), + null, + new Phase("delete", TimeValue.ZERO, singletonMap(DeleteAction.NAME, WITH_SNAPSHOT_DELETE)) + ); + assertBusy(() -> { + logger.info("--> waiting for [{}] to be deleted...", restoredPartiallyMountedIndexName); + assertThat(indexExists(restoredPartiallyMountedIndexName), is(false)); + Request getSnaps = new Request("GET", "/_snapshot/" + snapshotRepo + "/_all"); + Map responseMap = responseAsMap(client().performRequest(getSnaps)); + assertThat(((List>) responseMap.get("snapshots")).size(), equalTo(1)); + }, 30, TimeUnit.SECONDS); + } + public void testSecondSearchableSnapshotUsingDifferentRepoThrows() throws Exception { String secondRepo = randomAlphaOfLengthBetween(10, 20); createSnapshotRepo(client(), snapshotRepo, randomBoolean()); From 5c3d118031dea20ef2e121da6d319c63a2ddb724 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Wed, 6 Dec 2023 08:15:18 -0800 Subject: [PATCH 07/45] Unmute HeapAttack tests (#102942) This PR re-enables two more heap attack tests. I have run more than 100 iterations with these tests without hitting any failures. --- .../elasticsearch/xpack/esql/qa/heap_attack/HeapAttackIT.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/x-pack/plugin/esql/qa/server/heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/heap_attack/HeapAttackIT.java b/x-pack/plugin/esql/qa/server/heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/heap_attack/HeapAttackIT.java index 2cc13117a299f..37f2c86dbc251 100644 --- a/x-pack/plugin/esql/qa/server/heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/heap_attack/HeapAttackIT.java +++ b/x-pack/plugin/esql/qa/server/heap-attack/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/heap_attack/HeapAttackIT.java @@ -113,7 +113,6 @@ public void testGroupOnSomeLongs() throws IOException { /** * This groups on 5000 columns which used to throw a {@link StackOverflowError}. */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/100640") public void testGroupOnManyLongs() throws IOException { initManyLongs(); Map map = XContentHelper.convertToMap( @@ -182,7 +181,6 @@ private Response concat(int evals) throws IOException { /** * Returns many moderately long strings. */ - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/100678") public void testManyConcat() throws IOException { initManyLongs(); Map map = XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(manyConcat(300).getEntity()), false); From c183b92585862a2d790fe6210dcf27a1fe4b30f0 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Wed, 6 Dec 2023 08:27:39 -0800 Subject: [PATCH 08/45] Fast path for reading single doc with ordinals (#102902) This optimization is added for enrich lookups, which are likely to match a single document. The change decreases the latency of the enrich operation in the nyc_taxis benchmark from 100ms to 70ms. When combined with #102901, it further reduces the latency to below 40ms, better than the previous performance before the regression. Relates #102625 --- docs/changelog/102902.yaml | 5 +++++ .../index/mapper/BlockDocValuesReader.java | 12 ++++++++++++ 2 files changed, 17 insertions(+) create mode 100644 docs/changelog/102902.yaml diff --git a/docs/changelog/102902.yaml b/docs/changelog/102902.yaml new file mode 100644 index 0000000000000..b33afdd35a603 --- /dev/null +++ b/docs/changelog/102902.yaml @@ -0,0 +1,5 @@ +pr: 102902 +summary: Fast path for reading single doc with ordinals +area: ES|QL +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BlockDocValuesReader.java b/server/src/main/java/org/elasticsearch/index/mapper/BlockDocValuesReader.java index 11e57e030dfe7..2160f52cbec02 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BlockDocValuesReader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BlockDocValuesReader.java @@ -555,8 +555,20 @@ private static class SingletonOrdinals extends BlockDocValuesReader { this.ordinals = ordinals; } + private BlockLoader.Block readSingleDoc(BlockFactory factory, int docId) throws IOException { + if (ordinals.advanceExact(docId)) { + BytesRef v = ordinals.lookupOrd(ordinals.ordValue()); + return factory.constantBytes(v); + } else { + return factory.constantNulls(); + } + } + @Override public BlockLoader.Block read(BlockFactory factory, Docs docs) throws IOException { + if (docs.count() == 1) { + return readSingleDoc(factory, docs.get(0)); + } try (BlockLoader.SingletonOrdinalsBuilder builder = factory.singletonOrdinalsBuilder(ordinals, docs.count())) { for (int i = 0; i < docs.count(); i++) { int doc = docs.get(i); From be1277a769c13156caa1ede1c3a1d46f536ab947 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Wed, 6 Dec 2023 09:14:39 -0800 Subject: [PATCH 09/45] Collect warnings in compute service (#103031) We have implemented #99927 in DriverRunner. However, we also need to implement this in ComputeService, where we spawn multiple requests to avoid losing response headers. Relates #99927 Closes #100163 Closes #102982 Closes #102871 Closes #103028 --- docs/changelog/103031.yaml | 9 ++ .../compute/operator/DriverRunner.java | 31 +------ .../operator/ResponseHeadersCollector.java | 60 +++++++++++++ .../ResponseHeadersCollectorTests.java | 72 ++++++++++++++++ .../src/main/resources/ip.csv-spec | 3 +- .../xpack/esql/action/WarningsIT.java | 85 +++++++++++++++++++ .../xpack/esql/plugin/ComputeService.java | 14 ++- 7 files changed, 241 insertions(+), 33 deletions(-) create mode 100644 docs/changelog/103031.yaml create mode 100644 x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ResponseHeadersCollector.java create mode 100644 x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ResponseHeadersCollectorTests.java create mode 100644 x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/WarningsIT.java diff --git a/docs/changelog/103031.yaml b/docs/changelog/103031.yaml new file mode 100644 index 0000000000000..f63094139f5ca --- /dev/null +++ b/docs/changelog/103031.yaml @@ -0,0 +1,9 @@ +pr: 103031 +summary: Collect warnings in compute service +area: ES|QL +type: bug +issues: + - 100163 + - 103028 + - 102871 + - 102982 diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverRunner.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverRunner.java index 4f16a615572b7..5de017fbd279e 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverRunner.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverRunner.java @@ -9,16 +9,11 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.tasks.TaskCancelledException; -import java.util.HashMap; -import java.util.LinkedHashSet; import java.util.List; -import java.util.Map; -import java.util.Set; import java.util.concurrent.atomic.AtomicReference; /** @@ -41,11 +36,10 @@ public DriverRunner(ThreadContext threadContext) { */ public void runToCompletion(List drivers, ActionListener listener) { AtomicReference failure = new AtomicReference<>(); - AtomicArray>> responseHeaders = new AtomicArray<>(drivers.size()); + var responseHeadersCollector = new ResponseHeadersCollector(threadContext); CountDown counter = new CountDown(drivers.size()); for (int i = 0; i < drivers.size(); i++) { Driver driver = drivers.get(i); - int driverIndex = i; ActionListener driverListener = new ActionListener<>() { @Override public void onResponse(Void unused) { @@ -80,9 +74,9 @@ public void onFailure(Exception e) { } private void done() { - responseHeaders.setOnce(driverIndex, threadContext.getResponseHeaders()); + responseHeadersCollector.collect(); if (counter.countDown()) { - mergeResponseHeaders(responseHeaders); + responseHeadersCollector.finish(); Exception error = failure.get(); if (error != null) { listener.onFailure(error); @@ -96,23 +90,4 @@ private void done() { start(driver, driverListener); } } - - private void mergeResponseHeaders(AtomicArray>> responseHeaders) { - final Map> merged = new HashMap<>(); - for (int i = 0; i < responseHeaders.length(); i++) { - final Map> resp = responseHeaders.get(i); - if (resp == null || resp.isEmpty()) { - continue; - } - for (Map.Entry> e : resp.entrySet()) { - // Use LinkedHashSet to retain the order of the values - merged.computeIfAbsent(e.getKey(), k -> new LinkedHashSet<>(e.getValue().size())).addAll(e.getValue()); - } - } - for (Map.Entry> e : merged.entrySet()) { - for (String v : e.getValue()) { - threadContext.addResponseHeader(e.getKey(), v); - } - } - } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ResponseHeadersCollector.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ResponseHeadersCollector.java new file mode 100644 index 0000000000000..8f40664be74d4 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/ResponseHeadersCollector.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.operator; + +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.ThreadContext; + +import java.util.HashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Queue; +import java.util.Set; + +/** + * A helper class that can be used to collect and merge response headers from multiple child requests. + */ +public final class ResponseHeadersCollector { + private final ThreadContext threadContext; + private final Queue>> collected = ConcurrentCollections.newQueue(); + + public ResponseHeadersCollector(ThreadContext threadContext) { + this.threadContext = threadContext; + } + + /** + * Called when a child request is completed to collect the response headers of the responding thread + */ + public void collect() { + Map> responseHeaders = threadContext.getResponseHeaders(); + if (responseHeaders.isEmpty() == false) { + collected.add(responseHeaders); + } + } + + /** + * Called when all child requests are completed. This will merge all collected response headers + * from the child requests and restore to the current thread. + */ + public void finish() { + final Map> merged = new HashMap<>(); + Map> resp; + while ((resp = collected.poll()) != null) { + for (Map.Entry> e : resp.entrySet()) { + // Use LinkedHashSet to retain the order of the values + merged.computeIfAbsent(e.getKey(), k -> new LinkedHashSet<>(e.getValue().size())).addAll(e.getValue()); + } + } + for (Map.Entry> e : merged.entrySet()) { + for (String v : e.getValue()) { + threadContext.addResponseHeader(e.getKey(), v); + } + } + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ResponseHeadersCollectorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ResponseHeadersCollectorTests.java new file mode 100644 index 0000000000000..b09372f3a962c --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ResponseHeadersCollectorTests.java @@ -0,0 +1,72 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.operator; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRunnable; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.RefCountingListener; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.FixedExecutorBuilder; +import org.elasticsearch.threadpool.TestThreadPool; + +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.TimeUnit; + +import static org.hamcrest.Matchers.equalTo; + +public class ResponseHeadersCollectorTests extends ESTestCase { + + public void testCollect() { + int numThreads = randomIntBetween(1, 10); + TestThreadPool threadPool = new TestThreadPool( + getTestClass().getSimpleName(), + new FixedExecutorBuilder(Settings.EMPTY, "test", numThreads, 1024, "test", EsExecutors.TaskTrackingConfig.DEFAULT) + ); + Set expectedWarnings = new HashSet<>(); + try { + ThreadContext threadContext = threadPool.getThreadContext(); + var collector = new ResponseHeadersCollector(threadContext); + PlainActionFuture future = new PlainActionFuture<>(); + Runnable mergeAndVerify = () -> { + collector.finish(); + List actualWarnings = threadContext.getResponseHeaders().getOrDefault("Warnings", List.of()); + assertThat(Sets.newHashSet(actualWarnings), equalTo(expectedWarnings)); + }; + try (RefCountingListener refs = new RefCountingListener(ActionListener.runAfter(future, mergeAndVerify))) { + CyclicBarrier barrier = new CyclicBarrier(numThreads); + for (int i = 0; i < numThreads; i++) { + String warning = "warning-" + i; + expectedWarnings.add(warning); + ActionListener listener = ActionListener.runBefore(refs.acquire(), collector::collect); + threadPool.schedule(new ActionRunnable<>(listener) { + @Override + protected void doRun() throws Exception { + barrier.await(30, TimeUnit.SECONDS); + try (ThreadContext.StoredContext ignored = threadContext.stashContext()) { + threadContext.addResponseHeader("Warnings", warning); + listener.onResponse(null); + } + } + }, TimeValue.timeValueNanos(between(0, 1000_000)), threadPool.executor("test")); + } + } + future.actionGet(TimeValue.timeValueSeconds(30)); + } finally { + terminate(threadPool); + } + } +} diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec index e0167ce451e80..02e9db6ededf1 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec @@ -257,8 +257,7 @@ eth1 |alpha |::1 |::1 eth0 |beta |127.0.0.1 |::1 ; -// AwaitsFix: https://github.com/elastic/elasticsearch/issues/103028 -pushDownIPWithComparision#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only]-Ignore +pushDownIPWithComparision#[skip:-8.11.99, reason:Lucene multivalue warning introduced in 8.12 only] from hosts | where ip1 > to_ip("127.0.0.1") | keep card, ip1; ignoreOrder:true warning:Line 1:20: evaluation of [ip1 > to_ip(\"127.0.0.1\")] failed, treating result as null. Only first 20 failures recorded. diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/WarningsIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/WarningsIT.java new file mode 100644 index 0000000000000..12897979a47e0 --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/WarningsIT.java @@ -0,0 +1,85 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.transport.TransportService; + +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; + +@TestLogging(value = "org.elasticsearch.xpack.esql:TRACE", reason = "debug") +public class WarningsIT extends AbstractEsqlIntegTestCase { + + public void testCollectWarnings() { + final String node1, node2; + if (randomBoolean()) { + internalCluster().ensureAtLeastNumDataNodes(2); + node1 = randomDataNode().getName(); + node2 = randomValueOtherThan(node1, () -> randomDataNode().getName()); + } else { + node1 = randomDataNode().getName(); + node2 = randomDataNode().getName(); + } + + int numDocs1 = randomIntBetween(1, 15); + assertAcked( + client().admin() + .indices() + .prepareCreate("index-1") + .setSettings(Settings.builder().put("index.routing.allocation.require._name", node1)) + .setMapping("host", "type=keyword") + ); + for (int i = 0; i < numDocs1; i++) { + client().prepareIndex("index-1").setSource("host", "192." + i).get(); + } + int numDocs2 = randomIntBetween(1, 15); + assertAcked( + client().admin() + .indices() + .prepareCreate("index-2") + .setSettings(Settings.builder().put("index.routing.allocation.require._name", node2)) + .setMapping("host", "type=keyword") + ); + for (int i = 0; i < numDocs2; i++) { + client().prepareIndex("index-2").setSource("host", "10." + i).get(); + } + + DiscoveryNode coordinator = randomFrom(clusterService().state().nodes().stream().toList()); + client().admin().indices().prepareRefresh("index-1", "index-2").get(); + + EsqlQueryRequest request = new EsqlQueryRequest(); + request.query("FROM index-* | EVAL ip = to_ip(host) | STATS s = COUNT(*) by ip | KEEP ip | LIMIT 100"); + request.pragmas(randomPragmas()); + PlainActionFuture future = new PlainActionFuture<>(); + client(coordinator.getName()).execute(EsqlQueryAction.INSTANCE, request, ActionListener.runBefore(future, () -> { + var threadpool = internalCluster().getInstance(TransportService.class, coordinator.getName()).getThreadPool(); + Map> responseHeaders = threadpool.getThreadContext().getResponseHeaders(); + List warnings = responseHeaders.getOrDefault("Warning", List.of()) + .stream() + .filter(w -> w.contains("is not an IP string literal")) + .toList(); + int expectedWarnings = Math.min(20, numDocs1 + numDocs2); + // we cap the number of warnings per node + assertThat(warnings.size(), greaterThanOrEqualTo(expectedWarnings)); + })); + future.actionGet(30, TimeUnit.SECONDS).close(); + } + + private DiscoveryNode randomDataNode() { + return randomFrom(clusterService().state().nodes().getDataNodes().values()); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java index dd5ae00294ed0..b7b31868d65e2 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java @@ -32,6 +32,7 @@ import org.elasticsearch.compute.operator.Driver; import org.elasticsearch.compute.operator.DriverProfile; import org.elasticsearch.compute.operator.DriverTaskRunner; +import org.elasticsearch.compute.operator.ResponseHeadersCollector; import org.elasticsearch.compute.operator.exchange.ExchangeResponse; import org.elasticsearch.compute.operator.exchange.ExchangeService; import org.elasticsearch.compute.operator.exchange.ExchangeSinkHandler; @@ -162,6 +163,8 @@ public void execute( LOGGER.debug("Sending data node plan\n{}\n with filter [{}]", dataNodePlan, requestFilter); + final var responseHeadersCollector = new ResponseHeadersCollector(transportService.getThreadPool().getThreadContext()); + listener = ActionListener.runBefore(listener, responseHeadersCollector::finish); String[] originalIndices = PlannerUtils.planOriginalIndices(physicalPlan); computeTargetNodes( rootTask, @@ -193,6 +196,7 @@ public void execute( computeContext, coordinatorPlan, cancelOnFailure(rootTask, cancelled, requestRefs.acquire()).map(driverProfiles -> { + responseHeadersCollector.collect(); if (configuration.profile()) { collectedProfiles.addAll(driverProfiles); } @@ -208,6 +212,7 @@ public void execute( exchangeSource, targetNodes, () -> cancelOnFailure(rootTask, cancelled, requestRefs.acquire()).map(response -> { + responseHeadersCollector.collect(); if (configuration.profile()) { collectedProfiles.addAll(response.profiles); } @@ -501,9 +506,12 @@ public void messageReceived(DataNodeRequest request, TransportChannel channel, T runCompute(parentTask, computeContext, request.plan(), ActionListener.wrap(driverProfiles -> { // don't return until all pages are fetched exchangeSink.addCompletionListener( - ActionListener.releaseAfter( - listener.map(nullValue -> new DataNodeResponse(driverProfiles)), - () -> exchangeService.finishSinkHandler(sessionId, null) + ContextPreservingActionListener.wrapPreservingContext( + ActionListener.releaseAfter( + listener.map(nullValue -> new DataNodeResponse(driverProfiles)), + () -> exchangeService.finishSinkHandler(sessionId, null) + ), + transportService.getThreadPool().getThreadContext() ) ); }, e -> { From 84f72797f54175fa03c533998442cc973d7950c6 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 6 Dec 2023 19:10:00 +0100 Subject: [PATCH 10/45] Fix search response leaks in EQL tests (#103068) Fixing all EQL tests --- .../assembler/ImplicitTiebreakerTests.java | 3 +- .../assembler/SequenceSpecTests.java | 3 +- .../execution/sample/CircuitBreakerTests.java | 25 +++---- .../sequence/CircuitBreakerTests.java | 70 ++++++++++--------- 4 files changed, 51 insertions(+), 50 deletions(-) diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/ImplicitTiebreakerTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/ImplicitTiebreakerTests.java index e1a70dffef79a..85a34d7b6a943 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/ImplicitTiebreakerTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/ImplicitTiebreakerTests.java @@ -84,8 +84,7 @@ public void query(QueryRequest r, ActionListener l) { ); SearchHits searchHits = new SearchHits(new SearchHit[] { searchHit }, new TotalHits(1, Relation.EQUAL_TO), 0.0f); SearchResponseSections internal = new SearchResponseSections(searchHits, null, null, false, false, null, 0); - SearchResponse s = new SearchResponse(internal, null, 0, 1, 0, 0, null, Clusters.EMPTY); - l.onResponse(s); + ActionListener.respondAndRelease(l, new SearchResponse(internal, null, 0, 1, 0, 0, null, Clusters.EMPTY)); } @Override diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/SequenceSpecTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/SequenceSpecTests.java index 87fd105ddf56f..336526a1153a5 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/SequenceSpecTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/assembler/SequenceSpecTests.java @@ -222,8 +222,7 @@ public void query(QueryRequest r, ActionListener l) { 0.0f ); SearchResponseSections internal = new SearchResponseSections(searchHits, null, null, false, false, null, 0); - SearchResponse s = new SearchResponse(internal, null, 0, 1, 0, 0, null, Clusters.EMPTY); - l.onResponse(s); + ActionListener.respondAndRelease(l, new SearchResponse(internal, null, 0, 1, 0, 0, null, Clusters.EMPTY)); } @Override diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java index bac694996526d..e787505f7dfe3 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java @@ -224,19 +224,20 @@ void handleSearchRequest(ActionListener l) { ); SearchHits searchHits = new SearchHits(new SearchHit[] { searchHit }, new TotalHits(1, Relation.EQUAL_TO), 0.0f); SearchResponseSections internal = new SearchResponseSections(searchHits, null, null, false, false, null, 0); - SearchResponse s = new SearchResponse(internal, null, 0, 1, 0, 0, null, Clusters.EMPTY); - l.onResponse(s); + ActionListener.respondAndRelease(l, new SearchResponse(internal, null, 0, 1, 0, 0, null, Clusters.EMPTY)); } @Override @@ -451,7 +450,7 @@ void handleSearchRequest(ActionListener 0); // at this point the algorithm already started adding up to memory usage } - listener.onResponse((Response) response); + ActionListener.respondAndRelease(listener, (Response) response); } } @@ -479,18 +478,20 @@ void handleSearchRequest(ActionListener 0); // at this point the algorithm already started adding up to memory usage ShardSearchFailure[] failures = new ShardSearchFailure[] { @@ -504,28 +505,29 @@ void handleSearchRequest(ActionListener Date: Wed, 6 Dec 2023 12:18:47 -0600 Subject: [PATCH 11/45] Update IronBank docker image base to ubi:9.3 (#102721) --- distribution/docker/src/docker/Dockerfile | 2 +- .../docker/src/docker/iron_bank/hardening_manifest.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/distribution/docker/src/docker/Dockerfile b/distribution/docker/src/docker/Dockerfile index 8fac93211d82b..32f35b05015b9 100644 --- a/distribution/docker/src/docker/Dockerfile +++ b/distribution/docker/src/docker/Dockerfile @@ -22,7 +22,7 @@ <% if (docker_base == 'iron_bank') { %> ARG BASE_REGISTRY=registry1.dso.mil ARG BASE_IMAGE=ironbank/redhat/ubi/ubi9 -ARG BASE_TAG=9.2 +ARG BASE_TAG=9.3 <% } %> ################################################################################ diff --git a/distribution/docker/src/docker/iron_bank/hardening_manifest.yaml b/distribution/docker/src/docker/iron_bank/hardening_manifest.yaml index 7152f6d18f1d2..38ce16a413af2 100644 --- a/distribution/docker/src/docker/iron_bank/hardening_manifest.yaml +++ b/distribution/docker/src/docker/iron_bank/hardening_manifest.yaml @@ -14,7 +14,7 @@ tags: # Build args passed to Dockerfile ARGs args: BASE_IMAGE: "redhat/ubi/ubi9" - BASE_TAG: "9.2" + BASE_TAG: "9.3" # Docker image labels labels: From 7413e4169d128c3c6004e79ee9dd3bb59acd74f3 Mon Sep 17 00:00:00 2001 From: Bogdan Pintea Date: Wed, 6 Dec 2023 19:33:52 +0100 Subject: [PATCH 12/45] Fix headers check in SingleValueQueryTests (#103074) If the generated docs have no values at all for the first documents which are checked, don't expect Warnings either. Closes #102997 --- .../xpack/esql/querydsl/query/SingleValueQueryTests.java | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java index 4322e5fbac2ef..f5fc643d98fe6 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java @@ -156,15 +156,18 @@ private void runCase(List> fieldValues, int count, Integer docsStar int expected = 0; int min = docsStart != null ? docsStart : 0; int max = docsStop != null ? docsStop : fieldValues.size(); + int valuesCount = 0; for (int i = min; i < max; i++) { - if (fieldValues.get(i).size() == 1) { + int mvCount = fieldValues.get(i).size(); + if (mvCount == 1) { expected++; } + valuesCount += mvCount; } assertThat(count, equalTo(expected)); // query's count runs against the full set, not just min-to-max - if (fieldValues.stream().anyMatch(x -> x.size() > 1)) { + if (valuesCount > 0 && fieldValues.stream().anyMatch(x -> x.size() > 1)) { assertWarnings( "Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", "Line -1:-1: java.lang.IllegalArgumentException: single-value function encountered multi-value" From 48144ba1589d0a471dc854d156436255d4843346 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Wed, 6 Dec 2023 20:11:47 +0100 Subject: [PATCH 13/45] Fix SearchResponse reference count leaks in ML module (#103009) Fixing all kinds of leaks in both ml prod and test code. Added a new utility for a very common operation in tests that I'm planning on replacing other use sites with in a follow up. --- .../search/SearchResponseUtils.java | 23 ++++ .../xpack/core/ClientHelperTests.java | 4 +- .../ClassificationHousePricingIT.java | 60 ++++++----- .../ml/integration/DatafeedWithAggsIT.java | 7 +- .../ml/integration/DeleteExpiredDataIT.java | 31 +++--- .../integration/RunDataFrameAnalyticsIT.java | 12 ++- .../license/MachineLearningLicensingIT.java | 2 +- .../BucketCorrelationAggregationIT.java | 64 ++++++----- .../CategorizeTextAggregationIT.java | 100 ++++++++++-------- .../integration/DataFrameAnalyticsCRUDIT.java | 19 ++-- .../xpack/ml/integration/DatafeedCcsIT.java | 6 +- .../xpack/ml/integration/IndexLayoutIT.java | 6 +- .../integration/JobStorageDeletionTaskIT.java | 10 +- .../ml/integration/TrainedModelCRUDIT.java | 11 +- .../DatafeedDelayedDataDetector.java | 20 ++-- .../AbstractAggregationDataExtractor.java | 12 ++- .../CompositeAggregationDataExtractor.java | 34 ++++-- .../chunked/ChunkedDataExtractor.java | 70 +++++++----- .../extractor/scroll/ScrollDataExtractor.java | 60 +++++++---- .../extractor/DataFrameDataExtractor.java | 24 +++-- .../dataframe/inference/InferenceRunner.java | 31 +++--- .../process/AnalyticsProcessManager.java | 6 +- .../process/NativeAnalyticsProcess.java | 14 ++- .../TrainTestSplitterFactory.java | 48 +++++---- .../ChunkedTrainedModelRestorer.java | 84 ++++++++------- .../job/persistence/JobResultsPersister.java | 12 ++- .../job/persistence/JobResultsProvider.java | 70 ++++++------ .../ml/job/persistence/StateStreamer.java | 34 +++--- .../ml/process/IndexingStateProcessor.java | 10 +- .../persistence/BatchedDocumentsIterator.java | 8 +- .../persistence/ResultsPersisterService.java | 5 +- .../SearchAfterDocumentsIterator.java | 10 +- .../input/search/ExecutableSearchInput.java | 58 +++++----- .../search/ExecutableSearchTransform.java | 16 +-- 34 files changed, 574 insertions(+), 407 deletions(-) create mode 100644 test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java diff --git a/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java b/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java new file mode 100644 index 0000000000000..e61b89fcff42c --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/search/SearchResponseUtils.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ +package org.elasticsearch.search; + +import org.elasticsearch.action.search.SearchRequestBuilder; + +public enum SearchResponseUtils { + ; + + public static long getTotalHitsValue(SearchRequestBuilder request) { + var resp = request.get(); + try { + return resp.getHits().getTotalHits().value; + } finally { + resp.decRef(); + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ClientHelperTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ClientHelperTests.java index 0f3a58350c36a..99826b5537258 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ClientHelperTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ClientHelperTests.java @@ -340,7 +340,7 @@ private void assertExecutionWithOrigin(Map storedHeaders, Client assertThat(headers, not(hasEntry(AuthenticationServiceField.RUN_AS_USER_HEADER, "anything"))); return client.search(new SearchRequest()).actionGet(); - }); + }).decRef(); } /** @@ -356,7 +356,7 @@ public void assertRunAsExecution(Map storedHeaders, Consumer destDoc = getDestDoc(config, hit); - assertNotNull(destDoc); - Map resultsObject = getFieldValue(destDoc, "ml"); - assertThat(resultsObject.containsKey(predictionField), is(true)); - String predictionValue = (String) resultsObject.get(predictionField); - assertNotNull(predictionValue); - assertThat(resultsObject.containsKey("feature_importance"), is(true)); - @SuppressWarnings("unchecked") - List> importanceArray = (List>) resultsObject.get("feature_importance"); - assertThat( - Strings.format(str, modelId, numberTrees) + predictionValue + hyperparameters + modelDefinition, - importanceArray, - hasSize(greaterThan(0)) - ); + try { + // obtain addition information for investigation of #90599 + String modelId = getModelId(jobId); + TrainedModelMetadata modelMetadata = getModelMetadata(modelId); + assertThat(modelMetadata.getHyperparameters().size(), greaterThan(0)); + StringBuilder hyperparameters = new StringBuilder(); // used to investigate #90019 + for (Hyperparameters hyperparameter : modelMetadata.getHyperparameters()) { + hyperparameters.append(hyperparameter.hyperparameterName).append(": ").append(hyperparameter.value).append("\n"); + } + TrainedModelDefinition modelDefinition = getModelDefinition(modelId); + Ensemble ensemble = (Ensemble) modelDefinition.getTrainedModel(); + int numberTrees = ensemble.getModels().size(); + String str = "Failure: failed for modelId %s numberTrees %d\n"; + for (SearchHit hit : sourceData.getHits()) { + Map destDoc = getDestDoc(config, hit); + assertNotNull(destDoc); + Map resultsObject = getFieldValue(destDoc, "ml"); + assertThat(resultsObject.containsKey(predictionField), is(true)); + String predictionValue = (String) resultsObject.get(predictionField); + assertNotNull(predictionValue); + assertThat(resultsObject.containsKey("feature_importance"), is(true)); + @SuppressWarnings("unchecked") + List> importanceArray = (List>) resultsObject.get("feature_importance"); + assertThat( + Strings.format(str, modelId, numberTrees) + predictionValue + hyperparameters + modelDefinition, + importanceArray, + hasSize(greaterThan(0)) + ); + } + } finally { + sourceData.decRef(); } - } static void indexData(String sourceIndex) { diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DatafeedWithAggsIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DatafeedWithAggsIT.java index 9773a4d3b3d82..b1b645c224e34 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DatafeedWithAggsIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DatafeedWithAggsIT.java @@ -35,6 +35,7 @@ import java.util.List; import java.util.concurrent.TimeUnit; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -163,7 +164,7 @@ private void testDfWithAggs(AggregatorFactories.Builder aggs, Detector.Builder d bucket.getEventCount() ); // Confirm that it's possible to search for the same buckets by @timestamp - proves that @timestamp works as a field alias - assertThat( + assertHitCount( prepareSearch(AnomalyDetectorsIndex.jobResultsAliasedName(jobId)).setQuery( QueryBuilders.boolQuery() .filter(QueryBuilders.termQuery("job_id", jobId)) @@ -171,8 +172,8 @@ private void testDfWithAggs(AggregatorFactories.Builder aggs, Detector.Builder d .filter( QueryBuilders.rangeQuery("@timestamp").gte(bucket.getTimestamp().getTime()).lte(bucket.getTimestamp().getTime()) ) - ).setTrackTotalHits(true).get().getHits().getTotalHits().value, - equalTo(1L) + ).setTrackTotalHits(true), + 1 ); } } diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java index cf73b5a4a7544..00fdaa348409a 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/DeleteExpiredDataIT.java @@ -20,6 +20,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; @@ -268,14 +269,13 @@ private void testExpiredDeletion(Float customThrottle, int numUnusedState) throw retainAllSnapshots("snapshots-retention-with-retain"); - long totalModelSizeStatsBeforeDelete = prepareSearch("*").setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED_HIDDEN) - .setQuery(QueryBuilders.termQuery("result_type", "model_size_stats")) - .get() - .getHits() - .getTotalHits().value; - long totalNotificationsCountBeforeDelete = prepareSearch(NotificationsIndex.NOTIFICATIONS_INDEX).get() - .getHits() - .getTotalHits().value; + long totalModelSizeStatsBeforeDelete = SearchResponseUtils.getTotalHitsValue( + prepareSearch("*").setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED_HIDDEN) + .setQuery(QueryBuilders.termQuery("result_type", "model_size_stats")) + ); + long totalNotificationsCountBeforeDelete = SearchResponseUtils.getTotalHitsValue( + prepareSearch(NotificationsIndex.NOTIFICATIONS_INDEX) + ); assertThat(totalModelSizeStatsBeforeDelete, greaterThan(0L)); assertThat(totalNotificationsCountBeforeDelete, greaterThan(0L)); @@ -319,14 +319,13 @@ private void testExpiredDeletion(Float customThrottle, int numUnusedState) throw assertThat(getRecords("results-and-snapshots-retention").size(), equalTo(0)); assertThat(getModelSnapshots("results-and-snapshots-retention").size(), equalTo(1)); - long totalModelSizeStatsAfterDelete = prepareSearch("*").setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED_HIDDEN) - .setQuery(QueryBuilders.termQuery("result_type", "model_size_stats")) - .get() - .getHits() - .getTotalHits().value; - long totalNotificationsCountAfterDelete = prepareSearch(NotificationsIndex.NOTIFICATIONS_INDEX).get() - .getHits() - .getTotalHits().value; + long totalModelSizeStatsAfterDelete = SearchResponseUtils.getTotalHitsValue( + prepareSearch("*").setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN_CLOSED_HIDDEN) + .setQuery(QueryBuilders.termQuery("result_type", "model_size_stats")) + ); + long totalNotificationsCountAfterDelete = SearchResponseUtils.getTotalHitsValue( + prepareSearch(NotificationsIndex.NOTIFICATIONS_INDEX) + ); assertThat(totalModelSizeStatsAfterDelete, equalTo(totalModelSizeStatsBeforeDelete)); assertThat(totalNotificationsCountAfterDelete, greaterThanOrEqualTo(totalNotificationsCountBeforeDelete)); diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/RunDataFrameAnalyticsIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/RunDataFrameAnalyticsIT.java index 2ab5ecb00aa00..8fbad7ccd3877 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/RunDataFrameAnalyticsIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/RunDataFrameAnalyticsIT.java @@ -20,6 +20,7 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.xcontent.XContentType; import org.elasticsearch.xpack.core.ml.action.GetDataFrameAnalyticsStatsAction; import org.elasticsearch.xpack.core.ml.action.NodeAcknowledgedResponse; @@ -396,11 +397,12 @@ public void testStopOutlierDetectionWithEnoughDocumentsToScroll() throws Excepti assertResponse(prepareSearch(config.getDest().getIndex()).setTrackTotalHits(true), searchResponse -> { if (searchResponse.getHits().getTotalHits().value == docCount) { - searchResponse = prepareSearch(config.getDest().getIndex()).setTrackTotalHits(true) - .setQuery(QueryBuilders.existsQuery("custom_ml.outlier_score")) - .get(); - logger.debug("We stopped during analysis: [{}] < [{}]", searchResponse.getHits().getTotalHits().value, docCount); - assertThat(searchResponse.getHits().getTotalHits().value, lessThan((long) docCount)); + long seenCount = SearchResponseUtils.getTotalHitsValue( + prepareSearch(config.getDest().getIndex()).setTrackTotalHits(true) + .setQuery(QueryBuilders.existsQuery("custom_ml.outlier_score")) + ); + logger.debug("We stopped during analysis: [{}] < [{}]", seenCount, docCount); + assertThat(seenCount, lessThan((long) docCount)); } else { logger.debug("We stopped during reindexing: [{}] < [{}]", searchResponse.getHits().getTotalHits().value, docCount); } diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/license/MachineLearningLicensingIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/license/MachineLearningLicensingIT.java index 81ae60ecfa9ae..a98dfa223b8ae 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/license/MachineLearningLicensingIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/license/MachineLearningLicensingIT.java @@ -756,7 +756,7 @@ public void testInferenceAggRestricted() { SearchRequest search = new SearchRequest(index); search.source().aggregation(termsAgg); - client().search(search).actionGet(); + client().search(search).actionGet().decRef(); // Pick a license that does not allow machine learning License.OperationMode mode = randomInvalidLicenseType(); diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/BucketCorrelationAggregationIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/BucketCorrelationAggregationIT.java index fc35c8491094e..c15750de3b336 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/BucketCorrelationAggregationIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/BucketCorrelationAggregationIT.java @@ -12,7 +12,6 @@ import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.core.Tuple; import org.elasticsearch.search.aggregations.AggregationBuilders; @@ -31,6 +30,7 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Stream; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.closeTo; public class BucketCorrelationAggregationIT extends MlSingleNodeTestCase { @@ -71,34 +71,42 @@ public void testCountCorrelation() { AtomicLong counter = new AtomicLong(); double[] steps = Stream.generate(() -> counter.getAndAdd(2L)).limit(50).mapToDouble(l -> (double) l).toArray(); - SearchResponse percentilesSearch = client().prepareSearch("data") - .addAggregation(AggregationBuilders.percentiles("percentiles").field("metric").percentiles(steps)) - .setSize(0) - .setTrackTotalHits(true) - .get(); - long totalHits = percentilesSearch.getHits().getTotalHits().value; - Percentiles percentiles = percentilesSearch.getAggregations().get("percentiles"); - Tuple aggs = buildRangeAggAndSetExpectations( - percentiles, - steps, - totalHits, - "metric" + assertResponse( + client().prepareSearch("data") + .addAggregation(AggregationBuilders.percentiles("percentiles").field("metric").percentiles(steps)) + .setSize(0) + .setTrackTotalHits(true), + percentilesSearch -> { + long totalHits = percentilesSearch.getHits().getTotalHits().value; + Percentiles percentiles = percentilesSearch.getAggregations().get("percentiles"); + Tuple aggs = buildRangeAggAndSetExpectations( + percentiles, + steps, + totalHits, + "metric" + ); + + assertResponse( + client().prepareSearch("data") + .setSize(0) + .setTrackTotalHits(false) + .addAggregation( + AggregationBuilders.terms("buckets").field("term").subAggregation(aggs.v1()).subAggregation(aggs.v2()) + ), + countCorrelations -> { + + Terms terms = countCorrelations.getAggregations().get("buckets"); + Terms.Bucket catBucket = terms.getBucketByKey("cat"); + Terms.Bucket dogBucket = terms.getBucketByKey("dog"); + NumericMetricsAggregation.SingleValue approxCatCorrelation = catBucket.getAggregations().get("correlates"); + NumericMetricsAggregation.SingleValue approxDogCorrelation = dogBucket.getAggregations().get("correlates"); + + assertThat(approxCatCorrelation.value(), closeTo(catCorrelation, 0.1)); + assertThat(approxDogCorrelation.value(), closeTo(dogCorrelation, 0.1)); + } + ); + } ); - - SearchResponse countCorrelations = client().prepareSearch("data") - .setSize(0) - .setTrackTotalHits(false) - .addAggregation(AggregationBuilders.terms("buckets").field("term").subAggregation(aggs.v1()).subAggregation(aggs.v2())) - .get(); - - Terms terms = countCorrelations.getAggregations().get("buckets"); - Terms.Bucket catBucket = terms.getBucketByKey("cat"); - Terms.Bucket dogBucket = terms.getBucketByKey("dog"); - NumericMetricsAggregation.SingleValue approxCatCorrelation = catBucket.getAggregations().get("correlates"); - NumericMetricsAggregation.SingleValue approxDogCorrelation = dogBucket.getAggregations().get("correlates"); - - assertThat(approxCatCorrelation.value(), closeTo(catCorrelation, 0.1)); - assertThat(approxDogCorrelation.value(), closeTo(dogCorrelation, 0.1)); } private static Tuple buildRangeAggAndSetExpectations( diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/CategorizeTextAggregationIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/CategorizeTextAggregationIT.java index d356fe49f9120..d4b29e3c92538 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/CategorizeTextAggregationIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/CategorizeTextAggregationIT.java @@ -10,7 +10,6 @@ import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.aggregations.AggregationBuilders; @@ -21,6 +20,7 @@ import org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase; import org.junit.Before; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertResponse; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; @@ -39,53 +39,69 @@ public void setupCluster() { } public void testAggregation() { - SearchResponse response = prepareSearch(DATA_INDEX).setSize(0) - .setTrackTotalHits(false) - .addAggregation( - new CategorizeTextAggregationBuilder("categorize", "msg").subAggregation(AggregationBuilders.max("max").field("time")) - .subAggregation(AggregationBuilders.min("min").field("time")) - ) - .get(); - - InternalCategorizationAggregation agg = response.getAggregations().get("categorize"); - assertThat(agg.getBuckets(), hasSize(3)); - - assertCategorizationBucket(agg.getBuckets().get(0), "Node started", 3); - assertCategorizationBucket(agg.getBuckets().get(1), "Failed to shutdown error org.aaaa.bbbb.Cccc line caused by foo exception", 2); - assertCategorizationBucket(agg.getBuckets().get(2), "Node stopped", 1); + assertResponse( + prepareSearch(DATA_INDEX).setSize(0) + .setTrackTotalHits(false) + .addAggregation( + new CategorizeTextAggregationBuilder("categorize", "msg").subAggregation(AggregationBuilders.max("max").field("time")) + .subAggregation(AggregationBuilders.min("min").field("time")) + ), + response -> { + + InternalCategorizationAggregation agg = response.getAggregations().get("categorize"); + assertThat(agg.getBuckets(), hasSize(3)); + + assertCategorizationBucket(agg.getBuckets().get(0), "Node started", 3); + assertCategorizationBucket( + agg.getBuckets().get(1), + "Failed to shutdown error org.aaaa.bbbb.Cccc line caused by foo exception", + 2 + ); + assertCategorizationBucket(agg.getBuckets().get(2), "Node stopped", 1); + } + ); } public void testAggregationWithOnlyOneBucket() { - SearchResponse response = prepareSearch(DATA_INDEX).setSize(0) - .setTrackTotalHits(false) - .addAggregation( - new CategorizeTextAggregationBuilder("categorize", "msg").size(1) - .subAggregation(AggregationBuilders.max("max").field("time")) - .subAggregation(AggregationBuilders.min("min").field("time")) - ) - .get(); - InternalCategorizationAggregation agg = response.getAggregations().get("categorize"); - assertThat(agg.getBuckets(), hasSize(1)); - - assertCategorizationBucket(agg.getBuckets().get(0), "Node started", 3); + assertResponse( + prepareSearch(DATA_INDEX).setSize(0) + .setTrackTotalHits(false) + .addAggregation( + new CategorizeTextAggregationBuilder("categorize", "msg").size(1) + .subAggregation(AggregationBuilders.max("max").field("time")) + .subAggregation(AggregationBuilders.min("min").field("time")) + ), + response -> { + InternalCategorizationAggregation agg = response.getAggregations().get("categorize"); + assertThat(agg.getBuckets(), hasSize(1)); + assertCategorizationBucket(agg.getBuckets().get(0), "Node started", 3); + } + ); } public void testAggregationWithBroadCategories() { - SearchResponse response = prepareSearch(DATA_INDEX).setSize(0) - .setTrackTotalHits(false) - .addAggregation( - // Overriding the similarity threshold to just 11% (default is 70%) results in the - // "Node started" and "Node stopped" messages being grouped in the same category - new CategorizeTextAggregationBuilder("categorize", "msg").setSimilarityThreshold(11) - .subAggregation(AggregationBuilders.max("max").field("time")) - .subAggregation(AggregationBuilders.min("min").field("time")) - ) - .get(); - InternalCategorizationAggregation agg = response.getAggregations().get("categorize"); - assertThat(agg.getBuckets(), hasSize(2)); - - assertCategorizationBucket(agg.getBuckets().get(0), "Node", 4); - assertCategorizationBucket(agg.getBuckets().get(1), "Failed to shutdown error org.aaaa.bbbb.Cccc line caused by foo exception", 2); + assertResponse( + prepareSearch(DATA_INDEX).setSize(0) + .setTrackTotalHits(false) + .addAggregation( + // Overriding the similarity threshold to just 11% (default is 70%) results in the + // "Node started" and "Node stopped" messages being grouped in the same category + new CategorizeTextAggregationBuilder("categorize", "msg").setSimilarityThreshold(11) + .subAggregation(AggregationBuilders.max("max").field("time")) + .subAggregation(AggregationBuilders.min("min").field("time")) + ), + response -> { + InternalCategorizationAggregation agg = response.getAggregations().get("categorize"); + assertThat(agg.getBuckets(), hasSize(2)); + + assertCategorizationBucket(agg.getBuckets().get(0), "Node", 4); + assertCategorizationBucket( + agg.getBuckets().get(1), + "Failed to shutdown error org.aaaa.bbbb.Cccc line caused by foo exception", + 2 + ); + } + ); } private void assertCategorizationBucket(InternalCategorizationAggregation.Bucket bucket, String key, long docCount) { diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/DataFrameAnalyticsCRUDIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/DataFrameAnalyticsCRUDIT.java index 12bc1a6019119..a8e97263647ea 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/DataFrameAnalyticsCRUDIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/DataFrameAnalyticsCRUDIT.java @@ -25,6 +25,7 @@ import java.util.concurrent.atomic.AtomicReference; import static java.util.Collections.emptyMap; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -96,7 +97,7 @@ public void testDeleteConfigWithStateAndStats() throws InterruptedException { client().execute(DeleteDataFrameAnalyticsAction.INSTANCE, new DeleteDataFrameAnalyticsAction.Request(configId)).actionGet(); - assertThat( + assertHitCount( originSettingClient.prepareSearch(".ml-state-*") .setQuery( QueryBuilders.idsQuery() @@ -105,21 +106,15 @@ public void testDeleteConfigWithStateAndStats() throws InterruptedException { "data_frame_analytics-delete-config-with-state-and-stats-progress" ) ) - .setTrackTotalHits(true) - .get() - .getHits() - .getTotalHits().value, - equalTo(0L) + .setTrackTotalHits(true), + 0 ); - assertThat( + assertHitCount( originSettingClient.prepareSearch(".ml-stats-*") .setQuery(QueryBuilders.idsQuery().addIds("delete-config-with-state-and-stats_1", "delete-config-with-state-and-stats_2")) - .setTrackTotalHits(true) - .get() - .getHits() - .getTotalHits().value, - equalTo(0L) + .setTrackTotalHits(true), + 0 ); } diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/DatafeedCcsIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/DatafeedCcsIT.java index a5a4103d0cb7a..b71ecd4858533 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/DatafeedCcsIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/DatafeedCcsIT.java @@ -191,7 +191,11 @@ private boolean doesLocalAuditMessageExist(String message) { SearchResponse response = client(LOCAL_CLUSTER).prepareSearch(".ml-notifications*") .setQuery(new MatchPhraseQueryBuilder("message", message)) .get(); - return response.getHits().getTotalHits().value > 0; + try { + return response.getHits().getTotalHits().value > 0; + } finally { + response.decRef(); + } } catch (ElasticsearchException e) { return false; } diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/IndexLayoutIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/IndexLayoutIT.java index db88cb5dc266e..99052c771fb49 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/IndexLayoutIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/IndexLayoutIT.java @@ -26,6 +26,7 @@ import java.time.temporal.ChronoUnit; import java.util.Collections; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.equalTo; @@ -140,10 +141,7 @@ public void testForceCloseDoesNotCreateState() throws Exception { arrayContaining(".ml-state-000001") ); - assertThat( - client.prepareSearch(AnomalyDetectorsIndex.jobStateIndexPattern()).setTrackTotalHits(true).get().getHits().getTotalHits().value, - equalTo(0L) - ); + assertHitCount(client.prepareSearch(AnomalyDetectorsIndex.jobStateIndexPattern()).setTrackTotalHits(true), 0); } } diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/JobStorageDeletionTaskIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/JobStorageDeletionTaskIT.java index aa8b29228b790..b7bd8fed3e83c 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/JobStorageDeletionTaskIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/JobStorageDeletionTaskIT.java @@ -43,6 +43,7 @@ import java.util.HashSet; import java.util.concurrent.atomic.AtomicReference; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; @@ -202,7 +203,7 @@ public void testDeleteDedicatedJobWithDataInShared() throws Exception { ); // Make sure all results referencing the dedicated job are gone - assertThat( + assertHitCount( prepareSearch().setIndices(AnomalyDetectorsIndex.jobResultsIndexPrefix() + "*") .setIndicesOptions(IndicesOptions.lenientExpandOpenHidden()) .setTrackTotalHits(true) @@ -210,11 +211,8 @@ public void testDeleteDedicatedJobWithDataInShared() throws Exception { .setSource( SearchSourceBuilder.searchSource() .query(QueryBuilders.boolQuery().filter(QueryBuilders.termQuery(Job.ID.getPreferredName(), jobIdDedicated))) - ) - .get() - .getHits() - .getTotalHits().value, - equalTo(0L) + ), + 0 ); } diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TrainedModelCRUDIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TrainedModelCRUDIT.java index 9b1f4c856df85..3a08b56ed38a4 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TrainedModelCRUDIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TrainedModelCRUDIT.java @@ -23,6 +23,7 @@ import java.util.Base64; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.isA; @@ -102,15 +103,7 @@ public void testPutTrainedModelAndDefinition() { client().execute(DeleteTrainedModelAction.INSTANCE, new DeleteTrainedModelAction.Request(modelId)).actionGet(); - assertThat( - client().prepareSearch(InferenceIndexConstants.nativeDefinitionStore()) - .setTrackTotalHitsUpTo(1) - .setSize(0) - .get() - .getHits() - .getTotalHits().value, - equalTo(0L) - ); + assertHitCount(client().prepareSearch(InferenceIndexConstants.nativeDefinitionStore()).setTrackTotalHitsUpTo(1).setSize(0), 0); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DatafeedDelayedDataDetector.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DatafeedDelayedDataDetector.java index 341746a097bb2..0374dbf8eb1fe 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DatafeedDelayedDataDetector.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DatafeedDelayedDataDetector.java @@ -135,16 +135,20 @@ private Map checkCurrentBucketEventCount(long start, long end) { SearchRequest searchRequest = new SearchRequest(datafeedIndices).source(searchSourceBuilder).indicesOptions(indicesOptions); try (ThreadContext.StoredContext ignore = client.threadPool().getThreadContext().stashWithOrigin(ML_ORIGIN)) { SearchResponse response = client.execute(TransportSearchAction.TYPE, searchRequest).actionGet(); - List buckets = ((Histogram) response.getAggregations().get(DATE_BUCKETS)).getBuckets(); - Map hashMap = Maps.newMapWithExpectedSize(buckets.size()); - for (Histogram.Bucket bucket : buckets) { - long bucketTime = toHistogramKeyToEpoch(bucket.getKey()); - if (bucketTime < 0) { - throw new IllegalStateException("Histogram key [" + bucket.getKey() + "] cannot be converted to a timestamp"); + try { + List buckets = ((Histogram) response.getAggregations().get(DATE_BUCKETS)).getBuckets(); + Map hashMap = Maps.newMapWithExpectedSize(buckets.size()); + for (Histogram.Bucket bucket : buckets) { + long bucketTime = toHistogramKeyToEpoch(bucket.getKey()); + if (bucketTime < 0) { + throw new IllegalStateException("Histogram key [" + bucket.getKey() + "] cannot be converted to a timestamp"); + } + hashMap.put(bucketTime, bucket.getDocCount()); } - hashMap.put(bucketTime, bucket.getDocCount()); + return hashMap; + } finally { + response.decRef(); } - return hashMap; } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AbstractAggregationDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AbstractAggregationDataExtractor.java index 421581e2622ab..fd57419abaa83 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AbstractAggregationDataExtractor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AbstractAggregationDataExtractor.java @@ -122,10 +122,14 @@ private Aggregations search() { T searchRequest = buildSearchRequest(buildBaseSearchSource()); assert searchRequest.request().allowPartialSearchResults() == false; SearchResponse searchResponse = executeSearchRequest(searchRequest); - checkForSkippedClusters(searchResponse); - LOGGER.debug("[{}] Search response was obtained", context.jobId); - timingStatsReporter.reportSearchDuration(searchResponse.getTook()); - return validateAggs(searchResponse.getAggregations()); + try { + checkForSkippedClusters(searchResponse); + LOGGER.debug("[{}] Search response was obtained", context.jobId); + timingStatsReporter.reportSearchDuration(searchResponse.getTook()); + return validateAggs(searchResponse.getAggregations()); + } finally { + searchResponse.decRef(); + } } private void initAggregationProcessor(Aggregations aggs) throws IOException { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/CompositeAggregationDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/CompositeAggregationDataExtractor.java index 859dd506a7712..d4bd75c92eb18 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/CompositeAggregationDataExtractor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/CompositeAggregationDataExtractor.java @@ -138,17 +138,21 @@ private Aggregations search() { searchSourceBuilder.aggregation(compositeAggregationBuilder); ActionRequestBuilder searchRequest = requestBuilder.build(searchSourceBuilder); SearchResponse searchResponse = executeSearchRequest(searchRequest); - LOGGER.trace(() -> "[" + context.jobId + "] Search composite response was obtained"); - timingStatsReporter.reportSearchDuration(searchResponse.getTook()); - Aggregations aggregations = searchResponse.getAggregations(); - if (aggregations == null) { - return null; - } - CompositeAggregation compositeAgg = aggregations.get(compositeAggregationBuilder.getName()); - if (compositeAgg == null || compositeAgg.getBuckets().isEmpty()) { - return null; + try { + LOGGER.trace(() -> "[" + context.jobId + "] Search composite response was obtained"); + timingStatsReporter.reportSearchDuration(searchResponse.getTook()); + Aggregations aggregations = searchResponse.getAggregations(); + if (aggregations == null) { + return null; + } + CompositeAggregation compositeAgg = aggregations.get(compositeAggregationBuilder.getName()); + if (compositeAgg == null || compositeAgg.getBuckets().isEmpty()) { + return null; + } + return aggregations; + } finally { + searchResponse.decRef(); } - return aggregations; } protected SearchResponse executeSearchRequest(ActionRequestBuilder searchRequestBuilder) { @@ -158,7 +162,15 @@ protected SearchResponse executeSearchRequest(ActionRequestBuilder 0) { - Aggregations aggregations = searchResponse.getAggregations(); - Min min = aggregations.get(EARLIEST_TIME); - earliestTime = (long) min.value(); - Max max = aggregations.get(LATEST_TIME); - latestTime = (long) max.value(); + try { + LOGGER.debug("[{}] Scrolling Data summary response was obtained", context.jobId); + timingStatsReporter.reportSearchDuration(searchResponse.getTook()); + + long earliestTime = 0; + long latestTime = 0; + long totalHits = searchResponse.getHits().getTotalHits().value; + if (totalHits > 0) { + Aggregations aggregations = searchResponse.getAggregations(); + Min min = aggregations.get(EARLIEST_TIME); + earliestTime = (long) min.value(); + Max max = aggregations.get(LATEST_TIME); + latestTime = (long) max.value(); + } + return new ScrolledDataSummary(earliestTime, latestTime, totalHits); + } finally { + searchResponse.decRef(); } - return new ScrolledDataSummary(earliestTime, latestTime, totalHits); } private DataSummary newAggregatedDataSummary() { @@ -253,20 +265,24 @@ private DataSummary newAggregatedDataSummary() { ActionRequestBuilder searchRequestBuilder = dataExtractorFactory instanceof RollupDataExtractorFactory ? rollupRangeSearchRequest() : rangeSearchRequest(); SearchResponse searchResponse = executeSearchRequest(searchRequestBuilder); - LOGGER.debug("[{}] Aggregating Data summary response was obtained", context.jobId); - timingStatsReporter.reportSearchDuration(searchResponse.getTook()); - - Aggregations aggregations = searchResponse.getAggregations(); - // This can happen if all the indices the datafeed is searching are deleted after it started. - // Note that unlike the scrolled data summary method above we cannot check for this situation - // by checking for zero hits, because aggregations that work on rollups return zero hits even - // when they retrieve data. - if (aggregations == null) { - return AggregatedDataSummary.noDataSummary(context.histogramInterval); + try { + LOGGER.debug("[{}] Aggregating Data summary response was obtained", context.jobId); + timingStatsReporter.reportSearchDuration(searchResponse.getTook()); + + Aggregations aggregations = searchResponse.getAggregations(); + // This can happen if all the indices the datafeed is searching are deleted after it started. + // Note that unlike the scrolled data summary method above we cannot check for this situation + // by checking for zero hits, because aggregations that work on rollups return zero hits even + // when they retrieve data. + if (aggregations == null) { + return AggregatedDataSummary.noDataSummary(context.histogramInterval); + } + Min min = aggregations.get(EARLIEST_TIME); + Max max = aggregations.get(LATEST_TIME); + return new AggregatedDataSummary(min.value(), max.value(), context.histogramInterval); + } finally { + searchResponse.decRef(); } - Min min = aggregations.get(EARLIEST_TIME); - Max max = aggregations.get(LATEST_TIME); - return new AggregatedDataSummary(min.value(), max.value(), context.histogramInterval); } private SearchSourceBuilder rangeSearchBuilder() { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java index c721462697b65..4cfcf6509faa0 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java @@ -122,12 +122,15 @@ private Optional tryNextStream() throws IOException { protected InputStream initScroll(long startTimestamp) throws IOException { logger.debug("[{}] Initializing scroll with start time [{}]", context.jobId, startTimestamp); SearchResponse searchResponse = executeSearchRequest(buildSearchRequest(startTimestamp)); - logger.debug("[{}] Search response was obtained", context.jobId); - timingStatsReporter.reportSearchDuration(searchResponse.getTook()); - scrollId = searchResponse.getScrollId(); - SearchHit hits[] = searchResponse.getHits().getHits(); - searchResponse = null; - return processAndConsumeSearchHits(hits); + try { + logger.debug("[{}] Search response was obtained", context.jobId); + timingStatsReporter.reportSearchDuration(searchResponse.getTook()); + scrollId = searchResponse.getScrollId(); + SearchHit hits[] = searchResponse.getHits().getHits(); + return processAndConsumeSearchHits(hits); + } finally { + searchResponse.decRef(); + } } protected SearchResponse executeSearchRequest(SearchRequestBuilder searchRequestBuilder) { @@ -137,11 +140,17 @@ protected SearchResponse executeSearchRequest(SearchRequestBuilder searchRequest client, searchRequestBuilder::get ); + boolean success = false; try { checkForSkippedClusters(searchResponse); + success = true; } catch (ResourceNotFoundException e) { clearScrollLoggingExceptions(searchResponse.getScrollId()); throw e; + } finally { + if (success == false) { + searchResponse.decRef(); + } } return searchResponse; } @@ -213,23 +222,28 @@ private InputStream processAndConsumeSearchHits(SearchHit hits[]) throws IOExcep private InputStream continueScroll() throws IOException { logger.debug("[{}] Continuing scroll with id [{}]", context.jobId, scrollId); - SearchResponse searchResponse; + SearchResponse searchResponse = null; try { - searchResponse = executeSearchScrollRequest(scrollId); - } catch (SearchPhaseExecutionException searchExecutionException) { - if (searchHasShardFailure) { - throw searchExecutionException; + try { + searchResponse = executeSearchScrollRequest(scrollId); + } catch (SearchPhaseExecutionException searchExecutionException) { + if (searchHasShardFailure) { + throw searchExecutionException; + } + logger.debug("[{}] search failed due to SearchPhaseExecutionException. Will attempt again with new scroll", context.jobId); + markScrollAsErrored(); + searchResponse = executeSearchRequest(buildSearchRequest(lastTimestamp == null ? context.start : lastTimestamp)); + } + logger.debug("[{}] Search response was obtained", context.jobId); + timingStatsReporter.reportSearchDuration(searchResponse.getTook()); + scrollId = searchResponse.getScrollId(); + SearchHit hits[] = searchResponse.getHits().getHits(); + return processAndConsumeSearchHits(hits); + } finally { + if (searchResponse != null) { + searchResponse.decRef(); } - logger.debug("[{}] search failed due to SearchPhaseExecutionException. Will attempt again with new scroll", context.jobId); - markScrollAsErrored(); - searchResponse = executeSearchRequest(buildSearchRequest(lastTimestamp == null ? context.start : lastTimestamp)); } - logger.debug("[{}] Search response was obtained", context.jobId); - timingStatsReporter.reportSearchDuration(searchResponse.getTook()); - scrollId = searchResponse.getScrollId(); - SearchHit hits[] = searchResponse.getHits().getHits(); - searchResponse = null; - return processAndConsumeSearchHits(hits); } void markScrollAsErrored() { @@ -250,11 +264,17 @@ protected SearchResponse executeSearchScrollRequest(String scrollId) { client, () -> new SearchScrollRequestBuilder(client).setScroll(SCROLL_TIMEOUT).setScrollId(scrollId).get() ); + boolean success = false; try { checkForSkippedClusters(searchResponse); + success = true; } catch (ResourceNotFoundException e) { clearScrollLoggingExceptions(searchResponse.getScrollId()); throw e; + } finally { + if (success == false) { + searchResponse.decRef(); + } } return searchResponse; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractor.java index ab6ee250df5c6..6c3fb28fe2c83 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/extractor/DataFrameDataExtractor.java @@ -175,14 +175,18 @@ private List tryRequestWithSearchResponse(Supplier request) // We've set allow_partial_search_results to false which means if something // goes wrong the request will throw. SearchResponse searchResponse = request.get(); - LOGGER.trace(() -> "[" + context.jobId + "] Search response was obtained"); + try { + LOGGER.trace(() -> "[" + context.jobId + "] Search response was obtained"); - List rows = processSearchResponse(searchResponse); + List rows = processSearchResponse(searchResponse); - // Request was successfully executed and processed so we can restore the flag to retry if a future failure occurs - hasPreviousSearchFailed = false; + // Request was successfully executed and processed so we can restore the flag to retry if a future failure occurs + hasPreviousSearchFailed = false; - return rows; + return rows; + } finally { + searchResponse.decRef(); + } } catch (Exception e) { if (hasPreviousSearchFailed) { throw e; @@ -370,9 +374,13 @@ public ExtractedFields getExtractedFields() { public DataSummary collectDataSummary() { SearchRequestBuilder searchRequestBuilder = buildDataSummarySearchRequestBuilder(); SearchResponse searchResponse = executeSearchRequest(searchRequestBuilder); - long rows = searchResponse.getHits().getTotalHits().value; - LOGGER.debug(() -> format("[%s] Data summary rows [%s]", context.jobId, rows)); - return new DataSummary(rows, organicFeatures.length + processedFeatures.length); + try { + long rows = searchResponse.getHits().getTotalHits().value; + LOGGER.debug(() -> format("[%s] Data summary rows [%s]", context.jobId, rows)); + return new DataSummary(rows, organicFeatures.length + processedFeatures.length); + } finally { + searchResponse.decRef(); + } } public void collectDataSummaryAsync(ActionListener dataSummaryActionListener) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/inference/InferenceRunner.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/inference/InferenceRunner.java index cc59903436e2f..c9ce6e0d4e3c7 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/inference/InferenceRunner.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/inference/InferenceRunner.java @@ -155,21 +155,24 @@ private InferenceState restoreInferenceState() { client, () -> client.search(searchRequest).actionGet() ); - - Max maxIncrementalIdAgg = searchResponse.getAggregations().get(DestinationIndex.INCREMENTAL_ID); - long processedTestDocCount = searchResponse.getHits().getTotalHits().value; - Long lastIncrementalId = processedTestDocCount == 0 ? null : (long) maxIncrementalIdAgg.value(); - if (lastIncrementalId != null) { - LOGGER.debug( - () -> format( - "[%s] Resuming inference; last incremental id [%s]; processed test doc count [%s]", - config.getId(), - lastIncrementalId, - processedTestDocCount - ) - ); + try { + Max maxIncrementalIdAgg = searchResponse.getAggregations().get(DestinationIndex.INCREMENTAL_ID); + long processedTestDocCount = searchResponse.getHits().getTotalHits().value; + Long lastIncrementalId = processedTestDocCount == 0 ? null : (long) maxIncrementalIdAgg.value(); + if (lastIncrementalId != null) { + LOGGER.debug( + () -> format( + "[%s] Resuming inference; last incremental id [%s]; processed test doc count [%s]", + config.getId(), + lastIncrementalId, + processedTestDocCount + ) + ); + } + return new InferenceState(lastIncrementalId, processedTestDocCount); + } finally { + searchResponse.decRef(); } - return new InferenceState(lastIncrementalId, processedTestDocCount); } // Visible for testing diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessManager.java index de306b2ece1a2..d4c10e25a2ade 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/AnalyticsProcessManager.java @@ -170,7 +170,11 @@ private boolean hasModelState(DataFrameAnalyticsConfig config) { .setFetchSource(false) .setQuery(QueryBuilders.idsQuery().addIds(config.getAnalysis().getStateDocIdPrefix(config.getId()) + "1")) .get(); - return searchResponse.getHits().getHits().length == 1; + try { + return searchResponse.getHits().getHits().length == 1; + } finally { + searchResponse.decRef(); + } } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/NativeAnalyticsProcess.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/NativeAnalyticsProcess.java index a77280d7ba0c8..ed59c7f86fdd9 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/NativeAnalyticsProcess.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/process/NativeAnalyticsProcess.java @@ -85,12 +85,16 @@ public void restoreState(Client client, String stateDocIdPrefix) throws IOExcept .setSize(1) .setQuery(QueryBuilders.idsQuery().addIds(stateDocIdPrefix + ++docNum)) .get(); - if (stateResponse.getHits().getHits().length == 0) { - break; + try { + if (stateResponse.getHits().getHits().length == 0) { + break; + } + SearchHit stateDoc = stateResponse.getHits().getAt(0); + logger.debug(() -> format("[%s] Restoring state document [%s]", config.jobId(), stateDoc.getId())); + StateToProcessWriterHelper.writeStateToStream(stateDoc.getSourceRef(), restoreStream); + } finally { + stateResponse.decRef(); } - SearchHit stateDoc = stateResponse.getHits().getAt(0); - logger.debug(() -> format("[%s] Restoring state document [%s]", config.jobId(), stateDoc.getId())); - StateToProcessWriterHelper.writeStateToStream(stateDoc.getSourceRef(), restoreStream); } } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/traintestsplit/TrainTestSplitterFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/traintestsplit/TrainTestSplitterFactory.java index d3f33b91936d8..ebe4295f8efbf 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/traintestsplit/TrainTestSplitterFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/traintestsplit/TrainTestSplitterFactory.java @@ -64,13 +64,17 @@ private TrainTestSplitter createSingleClassSplitter(Regression regression) { client, searchRequestBuilder::get ); - return new SingleClassReservoirTrainTestSplitter( - fieldNames, - regression.getDependentVariable(), - regression.getTrainingPercent(), - regression.getRandomizeSeed(), - searchResponse.getHits().getTotalHits().value - ); + try { + return new SingleClassReservoirTrainTestSplitter( + fieldNames, + regression.getDependentVariable(), + regression.getTrainingPercent(), + regression.getRandomizeSeed(), + searchResponse.getHits().getTotalHits().value + ); + } finally { + searchResponse.decRef(); + } } catch (Exception e) { String msg = "[" + config.getId() + "] Error searching total number of training docs"; LOGGER.error(msg, e); @@ -96,20 +100,24 @@ private TrainTestSplitter createStratifiedSplitter(Classification classification client, searchRequestBuilder::get ); - Aggregations aggs = searchResponse.getAggregations(); - Terms terms = aggs.get(aggName); - Map classCounts = new HashMap<>(); - for (Terms.Bucket bucket : terms.getBuckets()) { - classCounts.put(String.valueOf(bucket.getKey()), bucket.getDocCount()); - } + try { + Aggregations aggs = searchResponse.getAggregations(); + Terms terms = aggs.get(aggName); + Map classCounts = new HashMap<>(); + for (Terms.Bucket bucket : terms.getBuckets()) { + classCounts.put(String.valueOf(bucket.getKey()), bucket.getDocCount()); + } - return new StratifiedTrainTestSplitter( - fieldNames, - classification.getDependentVariable(), - classCounts, - classification.getTrainingPercent(), - classification.getRandomizeSeed() - ); + return new StratifiedTrainTestSplitter( + fieldNames, + classification.getDependentVariable(), + classCounts, + classification.getTrainingPercent(), + classification.getRandomizeSeed() + ); + } finally { + searchResponse.decRef(); + } } catch (Exception e) { String msg = "[" + config.getId() + "] Dependent variable terms search failed"; LOGGER.error(msg, e); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/ChunkedTrainedModelRestorer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/ChunkedTrainedModelRestorer.java index 3ace40e0deb6b..0a34915083982 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/ChunkedTrainedModelRestorer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/ChunkedTrainedModelRestorer.java @@ -158,54 +158,58 @@ private void doSearch( SEARCH_RETRY_LIMIT, SEARCH_FAILURE_RETRY_WAIT_TIME ); - if (searchResponse.getHits().getHits().length == 0) { - errorConsumer.accept(new ResourceNotFoundException(Messages.getMessage(Messages.MODEL_DEFINITION_NOT_FOUND, modelId))); - return; - } + try { + if (searchResponse.getHits().getHits().length == 0) { + errorConsumer.accept(new ResourceNotFoundException(Messages.getMessage(Messages.MODEL_DEFINITION_NOT_FOUND, modelId))); + return; + } - // Set lastNum to a non-zero to prevent an infinite loop of - // search after requests in the absolute worse case where - // it has all gone wrong. - // Docs are numbered 0..N. we must have seen at least - // this many docs so far. - int lastNum = numDocsWritten - 1; - for (SearchHit hit : searchResponse.getHits().getHits()) { - logger.debug(() -> format("[%s] Restoring model definition doc with id [%s]", modelId, hit.getId())); - try { - TrainedModelDefinitionDoc doc = parseModelDefinitionDocLenientlyFromSource( - hit.getSourceRef(), - modelId, - xContentRegistry - ); - lastNum = doc.getDocNum(); + // Set lastNum to a non-zero to prevent an infinite loop of + // search after requests in the absolute worse case where + // it has all gone wrong. + // Docs are numbered 0..N. we must have seen at least + // this many docs so far. + int lastNum = numDocsWritten - 1; + for (SearchHit hit : searchResponse.getHits().getHits()) { + logger.debug(() -> format("[%s] Restoring model definition doc with id [%s]", modelId, hit.getId())); + try { + TrainedModelDefinitionDoc doc = parseModelDefinitionDocLenientlyFromSource( + hit.getSourceRef(), + modelId, + xContentRegistry + ); + lastNum = doc.getDocNum(); - boolean continueSearching = modelConsumer.apply(doc); - if (continueSearching == false) { - // signal the search has finished early - successConsumer.accept(Boolean.FALSE); + boolean continueSearching = modelConsumer.apply(doc); + if (continueSearching == false) { + // signal the search has finished early + successConsumer.accept(Boolean.FALSE); + return; + } + + } catch (IOException e) { + logger.error(() -> "[" + modelId + "] error writing model definition", e); + errorConsumer.accept(e); return; } - - } catch (IOException e) { - logger.error(() -> "[" + modelId + "] error writing model definition", e); - errorConsumer.accept(e); - return; } - } - numDocsWritten += searchResponse.getHits().getHits().length; + numDocsWritten += searchResponse.getHits().getHits().length; - boolean endOfSearch = searchResponse.getHits().getHits().length < searchSize - || searchResponse.getHits().getTotalHits().value == numDocsWritten; + boolean endOfSearch = searchResponse.getHits().getHits().length < searchSize + || searchResponse.getHits().getTotalHits().value == numDocsWritten; - if (endOfSearch) { - successConsumer.accept(Boolean.TRUE); - } else { - // search again with after - SearchHit lastHit = searchResponse.getHits().getAt(searchResponse.getHits().getHits().length - 1); - SearchRequestBuilder searchRequestBuilder = buildSearchBuilder(client, modelId, index, searchSize); - searchRequestBuilder.searchAfter(new Object[] { lastHit.getIndex(), lastNum }); - executorService.execute(() -> doSearch(searchRequestBuilder.request(), modelConsumer, successConsumer, errorConsumer)); + if (endOfSearch) { + successConsumer.accept(Boolean.TRUE); + } else { + // search again with after + SearchHit lastHit = searchResponse.getHits().getAt(searchResponse.getHits().getHits().length - 1); + SearchRequestBuilder searchRequestBuilder = buildSearchBuilder(client, modelId, index, searchSize); + searchRequestBuilder.searchAfter(new Object[] { lastHit.getIndex(), lastNum }); + executorService.execute(() -> doSearch(searchRequestBuilder.request(), modelConsumer, successConsumer, errorConsumer)); + } + } finally { + searchResponse.decRef(); } } catch (Exception e) { if (ExceptionsHelper.unwrapCause(e) instanceof ResourceNotFoundException) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersister.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersister.java index 761bfa16e66bb..6fbe16192a875 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersister.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersister.java @@ -328,10 +328,14 @@ public void persistQuantiles(Quantiles quantiles, Supplier shouldRetry) shouldRetry, retryMessage -> logger.debug("[{}] {} {}", jobId, quantilesDocId, retryMessage) ); - String indexOrAlias = searchResponse.getHits().getHits().length > 0 - ? searchResponse.getHits().getHits()[0].getIndex() - : AnomalyDetectorsIndex.jobStateIndexWriteAlias(); - + final String indexOrAlias; + try { + indexOrAlias = searchResponse.getHits().getHits().length > 0 + ? searchResponse.getHits().getHits()[0].getIndex() + : AnomalyDetectorsIndex.jobStateIndexWriteAlias(); + } finally { + searchResponse.decRef(); + } Persistable persistable = new Persistable(indexOrAlias, quantiles.getJobId(), quantiles, quantilesDocId); persistable.persist(shouldRetry, AnomalyDetectorsIndex.jobStateIndexWriteAlias().equals(indexOrAlias)); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java index d309ee2e5dc95..7b41f3e055874 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsProvider.java @@ -1422,24 +1422,27 @@ public QueryPage modelPlot(String jobId, int from, int size) { .setTrackTotalHits(true) .get(); } - - List results = new ArrayList<>(); - - for (SearchHit hit : searchResponse.getHits().getHits()) { - BytesReference source = hit.getSourceRef(); - try ( - InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), stream) - ) { - ModelPlot modelPlot = ModelPlot.LENIENT_PARSER.apply(parser, null); - results.add(modelPlot); - } catch (IOException e) { - throw new ElasticsearchParseException("failed to parse modelPlot", e); + try { + List results = new ArrayList<>(); + + for (SearchHit hit : searchResponse.getHits().getHits()) { + BytesReference source = hit.getSourceRef(); + try ( + InputStream stream = source.streamInput(); + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), stream) + ) { + ModelPlot modelPlot = ModelPlot.LENIENT_PARSER.apply(parser, null); + results.add(modelPlot); + } catch (IOException e) { + throw new ElasticsearchParseException("failed to parse modelPlot", e); + } } - } - return new QueryPage<>(results, searchResponse.getHits().getTotalHits().value, ModelPlot.RESULTS_FIELD); + return new QueryPage<>(results, searchResponse.getHits().getTotalHits().value, ModelPlot.RESULTS_FIELD); + } finally { + searchResponse.decRef(); + } } public QueryPage categorizerStats(String jobId, int from, int size) { @@ -1456,24 +1459,27 @@ public QueryPage categorizerStats(String jobId, int from, int .setTrackTotalHits(true) .get(); } - - List results = new ArrayList<>(); - - for (SearchHit hit : searchResponse.getHits().getHits()) { - BytesReference source = hit.getSourceRef(); - try ( - InputStream stream = source.streamInput(); - XContentParser parser = XContentFactory.xContent(XContentType.JSON) - .createParser(XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), stream) - ) { - CategorizerStats categorizerStats = CategorizerStats.LENIENT_PARSER.apply(parser, null).build(); - results.add(categorizerStats); - } catch (IOException e) { - throw new ElasticsearchParseException("failed to parse categorizerStats", e); + try { + List results = new ArrayList<>(); + + for (SearchHit hit : searchResponse.getHits().getHits()) { + BytesReference source = hit.getSourceRef(); + try ( + InputStream stream = source.streamInput(); + XContentParser parser = XContentFactory.xContent(XContentType.JSON) + .createParser(XContentParserConfiguration.EMPTY.withDeprecationHandler(LoggingDeprecationHandler.INSTANCE), stream) + ) { + CategorizerStats categorizerStats = CategorizerStats.LENIENT_PARSER.apply(parser, null).build(); + results.add(categorizerStats); + } catch (IOException e) { + throw new ElasticsearchParseException("failed to parse categorizerStats", e); + } } - } - return new QueryPage<>(results, searchResponse.getHits().getTotalHits().value, ModelPlot.RESULTS_FIELD); + return new QueryPage<>(results, searchResponse.getHits().getTotalHits().value, ModelPlot.RESULTS_FIELD); + } finally { + searchResponse.decRef(); + } } /** diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/StateStreamer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/StateStreamer.java index 20d29c1f0a2de..d97f564e0d50a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/StateStreamer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/StateStreamer.java @@ -76,17 +76,21 @@ public void restoreStateToStream(String jobId, ModelSnapshot modelSnapshot, Outp .setSize(1) .setQuery(QueryBuilders.idsQuery().addIds(stateDocId)) .get(); - if (stateResponse.getHits().getHits().length == 0) { - LOGGER.error( - "Expected {} documents for model state for {} snapshot {} but failed to find {}", - modelSnapshot.getSnapshotDocCount(), - jobId, - modelSnapshot.getSnapshotId(), - stateDocId - ); - break; + try { + if (stateResponse.getHits().getHits().length == 0) { + LOGGER.error( + "Expected {} documents for model state for {} snapshot {} but failed to find {}", + modelSnapshot.getSnapshotDocCount(), + jobId, + modelSnapshot.getSnapshotId(), + stateDocId + ); + break; + } + writeStateToStream(stateResponse.getHits().getAt(0).getSourceRef(), restoreStream); + } finally { + stateResponse.decRef(); } - writeStateToStream(stateResponse.getHits().getAt(0).getSourceRef(), restoreStream); } } @@ -108,10 +112,14 @@ public void restoreStateToStream(String jobId, ModelSnapshot modelSnapshot, Outp .setSize(1) .setQuery(QueryBuilders.idsQuery().addIds(docId)) .get(); - if (stateResponse.getHits().getHits().length == 0) { - break; + try { + if (stateResponse.getHits().getHits().length == 0) { + break; + } + writeStateToStream(stateResponse.getHits().getAt(0).getSourceRef(), restoreStream); + } finally { + stateResponse.decRef(); } - writeStateToStream(stateResponse.getHits().getAt(0).getSourceRef(), restoreStream); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/IndexingStateProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/IndexingStateProcessor.java index cb911c56ece40..56b0483e07c78 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/IndexingStateProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/IndexingStateProcessor.java @@ -228,8 +228,12 @@ private String getConcreteIndexOrWriteAlias(String documentId) { () -> true, retryMessage -> LOGGER.debug("[{}] {} {}", jobId, documentId, retryMessage) ); - return searchResponse.getHits().getHits().length > 0 - ? searchResponse.getHits().getHits()[0].getIndex() - : AnomalyDetectorsIndex.jobStateIndexWriteAlias(); + try { + return searchResponse.getHits().getHits().length > 0 + ? searchResponse.getHits().getHits()[0].getIndex() + : AnomalyDetectorsIndex.jobStateIndexWriteAlias(); + } finally { + searchResponse.decRef(); + } } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/BatchedDocumentsIterator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/BatchedDocumentsIterator.java index 0223898444115..c1e600aa66ba5 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/BatchedDocumentsIterator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/BatchedDocumentsIterator.java @@ -85,8 +85,12 @@ public Deque next() { SearchScrollRequest searchScrollRequest = new SearchScrollRequest(scrollId).scroll(CONTEXT_ALIVE_DURATION); searchResponse = client.searchScroll(searchScrollRequest).actionGet(); } - scrollId = searchResponse.getScrollId(); - return mapHits(searchResponse); + try { + scrollId = searchResponse.getScrollId(); + return mapHits(searchResponse); + } finally { + searchResponse.decRef(); + } } private SearchResponse initScroll() { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/ResultsPersisterService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/ResultsPersisterService.java index e87fbf48ca421..5630f16e63351 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/ResultsPersisterService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/ResultsPersisterService.java @@ -294,7 +294,10 @@ public SearchResponse searchWithRetry( client, () -> (isShutdown == false) && shouldRetry.get(), retryMsgHandler, - removeListener + removeListener.delegateFailure((l, r) -> { + r.mustIncRef(); + l.onResponse(r); + }) ); onGoingRetryableSearchActions.put(key, mlRetryableAction); mlRetryableAction.run(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/SearchAfterDocumentsIterator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/SearchAfterDocumentsIterator.java index 0d892209c8eda..f63f6e0549179 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/SearchAfterDocumentsIterator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/SearchAfterDocumentsIterator.java @@ -108,10 +108,14 @@ public Deque next() { } SearchResponse searchResponse = doSearch(searchAfterFields()); - if (trackTotalHits && totalHits.get() == 0) { - totalHits.set(searchResponse.getHits().getTotalHits().value); + try { + if (trackTotalHits && totalHits.get() == 0) { + totalHits.set(searchResponse.getHits().getTotalHits().value); + } + return mapHits(searchResponse); + } finally { + searchResponse.decRef(); } - return mapHits(searchResponse); } private SearchResponse doSearch(Object[] searchAfterValues) { diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/search/ExecutableSearchInput.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/search/ExecutableSearchInput.java index 6dbcef08481d1..9d6186e9c1c48 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/search/ExecutableSearchInput.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/search/ExecutableSearchInput.java @@ -88,36 +88,40 @@ SearchInput.Result doExecute(WatchExecutionContext ctx, WatcherSearchTemplateReq client, () -> client.search(searchRequest).actionGet(timeout) ); + try { - if (logger.isDebugEnabled()) { - logger.debug("[{}] found [{}] hits", ctx.id(), response.getHits().getTotalHits().value); - } + if (logger.isDebugEnabled()) { + logger.debug("[{}] found [{}] hits", ctx.id(), response.getHits().getTotalHits().value); + } - final Payload payload; - final Params params; - if (request.isRestTotalHitsAsint()) { - params = new MapParams(Collections.singletonMap("rest_total_hits_as_int", "true")); - } else { - params = EMPTY_PARAMS; - } - if (input.getExtractKeys() != null) { - BytesReference bytes = XContentHelper.toXContent(response, XContentType.SMILE, params, false); - // EMPTY is safe here because we never use namedObject - try ( - XContentParser parser = XContentHelper.createParser( - NamedXContentRegistry.EMPTY, - LoggingDeprecationHandler.INSTANCE, - bytes, - XContentType.SMILE - ) - ) { - Map filteredKeys = XContentFilterKeysUtils.filterMapOrdered(input.getExtractKeys(), parser); - payload = new Payload.Simple(filteredKeys); + final Payload payload; + final Params params; + if (request.isRestTotalHitsAsint()) { + params = new MapParams(Collections.singletonMap("rest_total_hits_as_int", "true")); + } else { + params = EMPTY_PARAMS; + } + if (input.getExtractKeys() != null) { + BytesReference bytes = XContentHelper.toXContent(response, XContentType.SMILE, params, false); + // EMPTY is safe here because we never use namedObject + try ( + XContentParser parser = XContentHelper.createParser( + NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, + bytes, + XContentType.SMILE + ) + ) { + Map filteredKeys = XContentFilterKeysUtils.filterMapOrdered(input.getExtractKeys(), parser); + payload = new Payload.Simple(filteredKeys); + } + } else { + payload = new Payload.XContent(response, params); } - } else { - payload = new Payload.XContent(response, params); - } - return new SearchInput.Result(request, payload); + return new SearchInput.Result(request, payload); + } finally { + response.decRef(); + } } } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transform/search/ExecutableSearchTransform.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transform/search/ExecutableSearchTransform.java index 2498fb6b4e50b..2a67d48c98f4e 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transform/search/ExecutableSearchTransform.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transform/search/ExecutableSearchTransform.java @@ -62,13 +62,17 @@ public SearchTransform.Result execute(WatchExecutionContext ctx, Payload payload client, () -> client.search(searchRequest).actionGet(timeout) ); - final Params params; - if (request.isRestTotalHitsAsint()) { - params = new MapParams(Collections.singletonMap("rest_total_hits_as_int", "true")); - } else { - params = EMPTY_PARAMS; + try { + final Params params; + if (request.isRestTotalHitsAsint()) { + params = new MapParams(Collections.singletonMap("rest_total_hits_as_int", "true")); + } else { + params = EMPTY_PARAMS; + } + return new SearchTransform.Result(request, new Payload.XContent(resp, params)); + } finally { + resp.decRef(); } - return new SearchTransform.Result(request, new Payload.XContent(resp, params)); } catch (Exception e) { logger.error(() -> format("failed to execute [%s] transform for [%s]", TYPE, ctx.id()), e); return new SearchTransform.Result(request, e); From d6e8217b0090730a91eadd09f736bce0b93d14c4 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Wed, 6 Dec 2023 14:33:05 -0500 Subject: [PATCH 14/45] Ensure dynamicMapping updates are handled in insertion order (#103047) The switch to holding dynamic fields in a hashmap effectively randomizes their iteration order. This can be troublesome when building the mapping update required by these updates. When iterating in an unknown order, recursing to the leaf mapper can occur many times `O(n^2)`. However, starting with insertion order, it will occur only `O(n)` times. closes: https://github.com/elastic/elasticsearch/issues/103011 --- docs/changelog/103047.yaml | 5 +++++ .../elasticsearch/index/mapper/DocumentParserContext.java | 6 +++--- 2 files changed, 8 insertions(+), 3 deletions(-) create mode 100644 docs/changelog/103047.yaml diff --git a/docs/changelog/103047.yaml b/docs/changelog/103047.yaml new file mode 100644 index 0000000000000..59f86d679b55f --- /dev/null +++ b/docs/changelog/103047.yaml @@ -0,0 +1,5 @@ +pr: 103047 +summary: Ensure `dynamicMapping` updates are handled in insertion order +area: Mapping +type: bug +issues: [] diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java index f47b392115f81..9d5cb374a9a89 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java @@ -22,8 +22,8 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; -import java.util.HashMap; import java.util.HashSet; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Set; @@ -166,9 +166,9 @@ protected DocumentParserContext( mappingParserContext, source, new HashSet<>(), - new HashMap<>(), + new LinkedHashMap<>(), new HashSet<>(), - new HashMap<>(), + new LinkedHashMap<>(), new ArrayList<>(), null, null, From 4dd9e2a7722580f158de6a22b41d1e5517210c9b Mon Sep 17 00:00:00 2001 From: Kathleen DeRusso Date: Wed, 6 Dec 2023 17:16:56 -0500 Subject: [PATCH 15/45] [Query Rules] Add some usability clarifications to docs (#102990) * [Query Rules] Add some usability clarifications to docs * Fix typo --- docs/reference/query-dsl/rule-query.asciidoc | 1 + .../reference/query-rules/apis/put-query-ruleset.asciidoc | 3 +++ .../search-your-data/search-using-query-rules.asciidoc | 8 ++++++++ 3 files changed, 12 insertions(+) diff --git a/docs/reference/query-dsl/rule-query.asciidoc b/docs/reference/query-dsl/rule-query.asciidoc index cf79a564f81a3..f92a9e67b5344 100644 --- a/docs/reference/query-dsl/rule-query.asciidoc +++ b/docs/reference/query-dsl/rule-query.asciidoc @@ -10,6 +10,7 @@ preview::[] Applies <> to the query before returning results. This feature is used to promote documents in the manner of a <> based on matching defined rules. If no matching query rules are defined, the "organic" matches for the query are returned. +All matching rules are applied in the order in which they appear in the query ruleset. [NOTE] ==== diff --git a/docs/reference/query-rules/apis/put-query-ruleset.asciidoc b/docs/reference/query-rules/apis/put-query-ruleset.asciidoc index 6805201ce9d7c..0d41496a505da 100644 --- a/docs/reference/query-rules/apis/put-query-ruleset.asciidoc +++ b/docs/reference/query-rules/apis/put-query-ruleset.asciidoc @@ -29,6 +29,9 @@ Requires the `manage_search_query_rules` privilege. `rules`:: (Required, array of objects) The specific rules included in this query ruleset. +There is a limit of 100 rules per ruleset. +This can be increased up to 1000 using the `xpack.applications.rules.max_rules_per_ruleset` cluster setting. + Each rule must have the following information: - `rule_id` (Required, string) A unique identifier for this rule. diff --git a/docs/reference/search/search-your-data/search-using-query-rules.asciidoc b/docs/reference/search/search-your-data/search-using-query-rules.asciidoc index ccd06b6681aad..5f61865f8ad67 100644 --- a/docs/reference/search/search-your-data/search-using-query-rules.asciidoc +++ b/docs/reference/search/search-your-data/search-using-query-rules.asciidoc @@ -174,6 +174,9 @@ PUT /_query_rules/my-ruleset The API response returns a results of `created` or `updated` depending on whether this was a new or edited ruleset. +NOTE: There is a limit of 100 rules per ruleset. +This can be increased up to 1000 using the `xpack.applications.rules.max_rules_per_ruleset` cluster setting. + [source,console-result] ---- { @@ -217,3 +220,8 @@ GET /my-index-000001/_search This rule query will match against `rule1` in the defined query ruleset, and will convert the organic query into a pinned query with `id1` and `id2` pinned as the top hits. Any other matches from the organic query will be returned below the pinned results. + +It's possible to have multiple rules in a ruleset match a single `rule_query`. In this case, the pinned documents are returned in the following order: + +- Where the matching rule appears in the ruleset +- If multiple documents are specified in a single rule, in the order they are specified From 5856539f4d34741b2daa1042f54651511ed31768 Mon Sep 17 00:00:00 2001 From: Brian Seeders Date: Wed, 6 Dec 2023 18:30:34 -0500 Subject: [PATCH 16/45] Bump to version 8.13.0 --- .backportrc.json | 4 ++-- .buildkite/pipelines/intake.yml | 2 +- .buildkite/pipelines/periodic-packaging.yml | 16 +++++++++++++++ .buildkite/pipelines/periodic.yml | 12 ++++++++++- .ci/bwcVersions | 1 + .ci/snapshotBwcVersions | 1 + build-tools-internal/version.properties | 2 +- docs/reference/migration/index.asciidoc | 2 ++ .../reference/migration/migrate_8_13.asciidoc | 20 +++++++++++++++++++ docs/reference/release-notes.asciidoc | 2 ++ docs/reference/release-notes/8.13.0.asciidoc | 8 ++++++++ .../release-notes/highlights.asciidoc | 3 ++- .../main/java/org/elasticsearch/Version.java | 3 ++- 13 files changed, 69 insertions(+), 7 deletions(-) create mode 100644 docs/reference/migration/migrate_8_13.asciidoc create mode 100644 docs/reference/release-notes/8.13.0.asciidoc diff --git a/.backportrc.json b/.backportrc.json index 9e654b51c7673..c0d0dbc15a821 100644 --- a/.backportrc.json +++ b/.backportrc.json @@ -1,9 +1,9 @@ { "upstream" : "elastic/elasticsearch", - "targetBranchChoices" : [ "main", "8.11", "8.10", "8.9", "8.8", "8.7", "8.6", "8.5", "8.4", "8.3", "8.2", "8.1", "8.0", "7.17", "6.8" ], + "targetBranchChoices" : [ "main", "8.12", "8.11", "8.10", "8.9", "8.8", "8.7", "8.6", "8.5", "8.4", "8.3", "8.2", "8.1", "8.0", "7.17", "6.8" ], "targetPRLabels" : [ "backport" ], "branchLabelMapping" : { - "^v8.12.0$" : "main", + "^v8.13.0$" : "main", "^v(\\d+).(\\d+).\\d+(?:-(?:alpha|beta|rc)\\d+)?$" : "$1.$2" } } \ No newline at end of file diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index 49c2d34df7e31..b6bbc62e6bc0e 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -48,7 +48,7 @@ steps: timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["7.17.16", "8.11.2", "8.12.0"] + BWC_VERSION: ["7.17.16", "8.11.2", "8.12.0", "8.13.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index fab90c8ed6d17..c0e51b609faee 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -1761,6 +1761,22 @@ steps: env: BWC_VERSION: 8.12.0 + - label: "{{matrix.image}} / 8.13.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.13.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.13.0 + - group: packaging-tests-windows steps: - label: "{{matrix.image}} / packaging-tests-windows" diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 88738c88ef5a0..9fb66a8062ab2 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -658,7 +658,7 @@ steps: agents: provider: gcp image: family/elasticsearch-ubuntu-2004 - machineType: custom-32-98304 + machineType: n1-standard-32 buildDirectory: /dev/shm/bk env: BWC_VERSION: 7.17.16 @@ -1082,6 +1082,16 @@ steps: buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.12.0 + - label: 8.13.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.13.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: n1-standard-32 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.13.0 - label: concurrent-search-tests command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dtests.jvm.argline=-Des.concurrent_search=true -Des.concurrent_search=true check timeout_in_minutes: 420 diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 581ec2f1565b6..0c29d210149bc 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -107,3 +107,4 @@ BWC_VERSION: - "8.11.1" - "8.11.2" - "8.12.0" + - "8.13.0" diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index 7970d655f4014..6fbe04325c898 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -2,3 +2,4 @@ BWC_VERSION: - "7.17.16" - "8.11.2" - "8.12.0" + - "8.13.0" diff --git a/build-tools-internal/version.properties b/build-tools-internal/version.properties index f0e599a9c0e87..adf33dd070a22 100644 --- a/build-tools-internal/version.properties +++ b/build-tools-internal/version.properties @@ -1,4 +1,4 @@ -elasticsearch = 8.12.0 +elasticsearch = 8.13.0 lucene = 9.9.0 bundled_jdk_vendor = openjdk diff --git a/docs/reference/migration/index.asciidoc b/docs/reference/migration/index.asciidoc index d4f38f1b5d9ff..02fa88f409f27 100644 --- a/docs/reference/migration/index.asciidoc +++ b/docs/reference/migration/index.asciidoc @@ -1,5 +1,6 @@ include::migration_intro.asciidoc[] +* <> * <> * <> * <> @@ -14,6 +15,7 @@ include::migration_intro.asciidoc[] * <> * <> +include::migrate_8_13.asciidoc[] include::migrate_8_12.asciidoc[] include::migrate_8_11.asciidoc[] include::migrate_8_10.asciidoc[] diff --git a/docs/reference/migration/migrate_8_13.asciidoc b/docs/reference/migration/migrate_8_13.asciidoc new file mode 100644 index 0000000000000..c2f431da388f1 --- /dev/null +++ b/docs/reference/migration/migrate_8_13.asciidoc @@ -0,0 +1,20 @@ +[[migrating-8.13]] +== Migrating to 8.13 +++++ +8.13 +++++ + +This section discusses the changes that you need to be aware of when migrating +your application to {es} 8.13. + +See also <> and <>. + +coming::[8.13.0] + + +[discrete] +[[breaking-changes-8.13]] +=== Breaking changes + +There are no breaking changes in {es} 8.13. + diff --git a/docs/reference/release-notes.asciidoc b/docs/reference/release-notes.asciidoc index 011c44216cc0c..038ae06480e48 100644 --- a/docs/reference/release-notes.asciidoc +++ b/docs/reference/release-notes.asciidoc @@ -6,6 +6,7 @@ This section summarizes the changes in each release. +* <> * <> * <> * <> @@ -55,6 +56,7 @@ This section summarizes the changes in each release. -- +include::release-notes/8.13.0.asciidoc[] include::release-notes/8.12.0.asciidoc[] include::release-notes/8.11.1.asciidoc[] include::release-notes/8.11.0.asciidoc[] diff --git a/docs/reference/release-notes/8.13.0.asciidoc b/docs/reference/release-notes/8.13.0.asciidoc new file mode 100644 index 0000000000000..5b7d4f90f98de --- /dev/null +++ b/docs/reference/release-notes/8.13.0.asciidoc @@ -0,0 +1,8 @@ +[[release-notes-8.13.0]] +== {es} version 8.13.0 + +coming[8.13.0] + +Also see <>. + + diff --git a/docs/reference/release-notes/highlights.asciidoc b/docs/reference/release-notes/highlights.asciidoc index 5b43f6940cbfa..f5252ae6a884f 100644 --- a/docs/reference/release-notes/highlights.asciidoc +++ b/docs/reference/release-notes/highlights.asciidoc @@ -11,7 +11,8 @@ For detailed information about this release, see the <> and // Add previous release to the list Other versions: -{ref-bare}/8.11/release-highlights.html[8.11] +{ref-bare}/8.12/release-highlights.html[8.12] +| {ref-bare}/8.11/release-highlights.html[8.11] | {ref-bare}/8.10/release-highlights.html[8.10] | {ref-bare}/8.9/release-highlights.html[8.9] | {ref-bare}/8.8/release-highlights.html[8.8] diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 5dd9a3a055043..2ecc9703b2398 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -158,7 +158,8 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_8_11_1 = new Version(8_11_01_99); public static final Version V_8_11_2 = new Version(8_11_02_99); public static final Version V_8_12_0 = new Version(8_12_00_99); - public static final Version CURRENT = V_8_12_0; + public static final Version V_8_13_0 = new Version(8_13_00_99); + public static final Version CURRENT = V_8_13_0; private static final NavigableMap VERSION_IDS; private static final Map VERSION_STRINGS; From 6219ccdcf247e350d0db12025a54db310bd27d9a Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 6 Dec 2023 15:49:00 -0800 Subject: [PATCH 17/45] Add 8.12 branch --- branches.json | 3 +++ 1 file changed, 3 insertions(+) diff --git a/branches.json b/branches.json index c76417a198c57..b33bb30e77cc4 100644 --- a/branches.json +++ b/branches.json @@ -4,6 +4,9 @@ { "branch": "main" }, + { + "branch": "8.12" + }, { "branch": "8.11" }, From 04cc3353ec3720517924bd851b893bbc7b35e1f2 Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 6 Dec 2023 15:57:25 -0800 Subject: [PATCH 18/45] Mute LicensesMetadataSerializationTests.testLicenseTombstoneWithUsedTrialFromXContext --- .../license/LicensesMetadataSerializationTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesMetadataSerializationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesMetadataSerializationTests.java index 4ed306bf734fc..be43705984435 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesMetadataSerializationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesMetadataSerializationTests.java @@ -121,6 +121,7 @@ public void testLicenseTombstoneFromXContext() throws Exception { assertThat(metadataFromXContent.getLicense(), equalTo(LicensesMetadata.LICENSE_TOMBSTONE)); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103093") public void testLicenseTombstoneWithUsedTrialFromXContext() throws Exception { final XContentBuilder builder = XContentFactory.jsonBuilder(); builder.startObject(); From 99a840e5daa4024161a318b6d4f7d83f8104b11d Mon Sep 17 00:00:00 2001 From: Mark Vieira Date: Wed, 6 Dec 2023 16:41:12 -0800 Subject: [PATCH 19/45] Mute CcrRollingUpgradeIT.testCannotFollowLeaderInUpgradedCluster --- .../java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java b/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java index 27250dd4e3367..703b9e608db17 100644 --- a/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade-multi-cluster/src/test/java/org/elasticsearch/upgrades/CcrRollingUpgradeIT.java @@ -206,6 +206,7 @@ public void testAutoFollowing() throws Exception { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/103094") public void testCannotFollowLeaderInUpgradedCluster() throws Exception { if (upgradeState != UpgradeState.ALL) { return; From 72efee244bb79a6bdf6cd9cd300377e3b9d1a39b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lorenzo=20Dematt=C3=A9?= Date: Thu, 7 Dec 2023 01:45:46 +0100 Subject: [PATCH 20/45] Fix FullClusterRestartIT BwC tests (#103092) --- .../cluster/metadata/DataStreamTestHelper.java | 2 -- .../test/rest/RestTestLegacyFeatures.java | 8 +------- .../xpack/restart/FullClusterRestartIT.java | 18 ++++++++++-------- 3 files changed, 11 insertions(+), 17 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java index e3e11907534e2..db4b1ec0a99c8 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/metadata/DataStreamTestHelper.java @@ -8,7 +8,6 @@ package org.elasticsearch.cluster.metadata; import org.apache.lucene.tests.util.LuceneTestCase; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.rollover.MetadataRolloverService; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -82,7 +81,6 @@ public final class DataStreamTestHelper { - private static final Version DATE_IN_BACKING_INDEX_VERSION = Version.V_7_11_0; private static final Settings.Builder SETTINGS = ESTestCase.settings(IndexVersion.current()).put("index.hidden", true); private static final int NUMBER_OF_SHARDS = 1; private static final int NUMBER_OF_REPLICAS = 1; diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestLegacyFeatures.java b/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestLegacyFeatures.java index aedd916c0a0f3..a58810e91e186 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestLegacyFeatures.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestLegacyFeatures.java @@ -54,13 +54,9 @@ public class RestTestLegacyFeatures implements FeatureSpecification { public static final NodeFeature TRANSFORM_NEW_API_ENDPOINT = new NodeFeature("transform.new_api_endpoint"); // Ref: https://github.com/elastic/elasticsearch/pull/65205 @UpdateForV9 - public static final NodeFeature DATA_STREAMS_DATE_IN_INDEX_NAME = new NodeFeature("data-streams.date_in_index_name"); - @UpdateForV9 public static final NodeFeature ML_INDICES_HIDDEN = new NodeFeature("ml.indices_hidden"); @UpdateForV9 public static final NodeFeature ML_ANALYTICS_MAPPINGS = new NodeFeature("ml.analytics_mappings"); - @UpdateForV9 - public static final NodeFeature SLM_SUPPORTED = new NodeFeature("slm.supported"); @Override public Map getHistoricalFeatures() { @@ -78,10 +74,8 @@ public Map getHistoricalFeatures() { entry(SECURITY_ROLE_DESCRIPTORS_OPTIONAL, Version.V_7_3_0), entry(SEARCH_AGGREGATIONS_FORCE_INTERVAL_SELECTION_DATE_HISTOGRAM, Version.V_7_2_0), entry(TRANSFORM_NEW_API_ENDPOINT, Version.V_7_5_0), - entry(DATA_STREAMS_DATE_IN_INDEX_NAME, Version.V_7_11_0), entry(ML_INDICES_HIDDEN, Version.V_7_7_0), - entry(ML_ANALYTICS_MAPPINGS, Version.V_7_3_0), - entry(SLM_SUPPORTED, Version.V_7_4_0) + entry(ML_ANALYTICS_MAPPINGS, Version.V_7_3_0) ); } } diff --git a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index bd422c0c578d8..3bff0027f8752 100644 --- a/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -586,6 +586,9 @@ public void testTransformLegacyTemplateCleanup() throws Exception { } public void testSlmPolicyAndStats() throws IOException { + @UpdateForV9 + var originalClusterSupportsSlm = parseLegacyVersion(getOldClusterVersion()).map(v -> v.onOrAfter(Version.V_7_4_0)).orElse(true); + SnapshotLifecyclePolicy slmPolicy = new SnapshotLifecyclePolicy( "test-policy", "test-policy", @@ -594,7 +597,7 @@ public void testSlmPolicyAndStats() throws IOException { Collections.singletonMap("indices", Collections.singletonList("*")), null ); - if (isRunningAgainstOldCluster() && clusterHasFeature(RestTestLegacyFeatures.SLM_SUPPORTED)) { + if (isRunningAgainstOldCluster() && originalClusterSupportsSlm) { Request createRepoRequest = new Request("PUT", "_snapshot/test-repo"); String repoCreateJson = "{" + " \"type\": \"fs\"," + " \"settings\": {" + " \"location\": \"test-repo\"" + " }" + "}"; createRepoRequest.setJsonEntity(repoCreateJson); @@ -608,7 +611,7 @@ public void testSlmPolicyAndStats() throws IOException { client().performRequest(createSlmPolicyRequest); } - if (isRunningAgainstOldCluster() == false && clusterHasFeature(RestTestLegacyFeatures.SLM_SUPPORTED)) { + if (isRunningAgainstOldCluster() == false && originalClusterSupportsSlm) { Request getSlmPolicyRequest = new Request("GET", "_slm/policy/test-policy"); Response response = client().performRequest(getSlmPolicyRequest); Map responseMap = entityAsMap(response); @@ -940,6 +943,10 @@ public void testDataStreams() throws Exception { var originalClusterSupportsDataStreams = parseLegacyVersion(getOldClusterVersion()).map(v -> v.onOrAfter(Version.V_7_9_0)) .orElse(true); + @UpdateForV9 + var originalClusterDataStreamHasDateInIndexName = parseLegacyVersion(getOldClusterVersion()).map(v -> v.onOrAfter(Version.V_7_11_0)) + .orElse(true); + assumeTrue("no data streams in versions before 7.9.0", originalClusterSupportsDataStreams); if (isRunningAgainstOldCluster()) { createComposableTemplate(client(), "dst", "ds"); @@ -977,12 +984,7 @@ public void testDataStreams() throws Exception { assertEquals("ds", ds.get("name")); assertEquals(1, indices.size()); assertEquals( - DataStreamTestHelper.getLegacyDefaultBackingIndexName( - "ds", - 1, - timestamp, - clusterHasFeature(RestTestLegacyFeatures.DATA_STREAMS_DATE_IN_INDEX_NAME) - ), + DataStreamTestHelper.getLegacyDefaultBackingIndexName("ds", 1, timestamp, originalClusterDataStreamHasDateInIndexName), indices.get(0).get("index_name") ); assertNumHits("ds", 1, 1); From 084bd5b53981cf1907a102a34ba21df8f0ad2030 Mon Sep 17 00:00:00 2001 From: Andrew Wilkins Date: Thu, 7 Dec 2023 10:11:43 +0800 Subject: [PATCH 21/45] x-pack/plugin/core: rename double_metrics template (#103033) * x-pack/plugin/core: rename double_metrics template Rename double_metrics to float_metrics, and create a new double_metrics (with lower priority) that has no match_mapping_type. The new one is intended to be used with the dynamic_templates request parameter. --- docs/changelog/103033.yaml | 5 +++++ .../resources/rest-api-spec/test/10_apm.yml | 2 +- .../src/main/resources/metrics@mappings.json | 10 +++++++++- 3 files changed, 15 insertions(+), 2 deletions(-) create mode 100644 docs/changelog/103033.yaml diff --git a/docs/changelog/103033.yaml b/docs/changelog/103033.yaml new file mode 100644 index 0000000000000..30f8e182b9998 --- /dev/null +++ b/docs/changelog/103033.yaml @@ -0,0 +1,5 @@ +pr: 103033 +summary: "X-pack/plugin/core: rename `double_metrics` template" +area: Data streams +type: enhancement +issues: [] diff --git a/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/10_apm.yml b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/10_apm.yml index 0030040b572c9..b8fdebf9a938b 100644 --- a/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/10_apm.yml +++ b/x-pack/plugin/apm-data/src/yamlRestTest/resources/rest-api-spec/test/10_apm.yml @@ -175,7 +175,7 @@ setup: full_name: double_metric mapping: double_metric: - type: float + type: double index: false summary_metric: full_name: summary_metric diff --git a/x-pack/plugin/core/template-resources/src/main/resources/metrics@mappings.json b/x-pack/plugin/core/template-resources/src/main/resources/metrics@mappings.json index 4e48f6b7adaed..b4aa999697632 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/metrics@mappings.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/metrics@mappings.json @@ -12,7 +12,7 @@ } }, { - "double_metrics": { + "float_metrics": { "match_mapping_type": "double", "mapping": { "type": "float", @@ -20,6 +20,14 @@ } } }, + { + "double_metrics": { + "mapping": { + "type": "double", + "index": false + } + } + }, { "histogram_metrics": { "mapping": { From d7c6c2253d5c9c7a21befc4f7199dbde42f2977a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tim=20R=C3=BChsen?= Date: Thu, 7 Dec 2023 07:40:45 +0100 Subject: [PATCH 22/45] [Profiling] Improve parameterized request test (#102972) * [Profiling] Add co2/cost params to stacktrace request test * Extract class GetStackTracesResponseBuilder.java * Cover GetStackTracesResponseBuilder in tests * Assert-test single response fields --------- Co-authored-by: Elastic Machine --- .../GetStackTracesResponseBuilder.java | 155 ++++++++++++++++ .../TransportGetStackTracesAction.java | 166 ++---------------- .../RestGetStackTracesActionTests.java | 38 +++- 3 files changed, 195 insertions(+), 164 deletions(-) create mode 100644 x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesResponseBuilder.java diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesResponseBuilder.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesResponseBuilder.java new file mode 100644 index 0000000000000..ccafe99c31d9a --- /dev/null +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesResponseBuilder.java @@ -0,0 +1,155 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.profiling; + +import java.time.Instant; +import java.util.List; +import java.util.Map; + +class GetStackTracesResponseBuilder { + private Map stackTraces; + private Instant start; + private Instant end; + private int totalFrames; + private Map stackFrames; + private Map executables; + private Map stackTraceEvents; + private List hostEventCounts; + private double samplingRate; + private long totalSamples; + private Double requestedDuration; + private final Double awsCostFactor; + private final Double customCO2PerKWH; + private final Double customDatacenterPUE; + private final Double customPerCoreWattX86; + private final Double customPerCoreWattARM64; + private final Double customCostPerCoreHour; + + public void setStackTraces(Map stackTraces) { + this.stackTraces = stackTraces; + } + + public Instant getStart() { + return start; + } + + public void setStart(Instant start) { + this.start = start; + } + + public Instant getEnd() { + return end; + } + + public void setEnd(Instant end) { + this.end = end; + } + + public void setTotalFrames(int totalFrames) { + this.totalFrames = totalFrames; + } + + public void setStackFrames(Map stackFrames) { + this.stackFrames = stackFrames; + } + + public void setExecutables(Map executables) { + this.executables = executables; + } + + public void setStackTraceEvents(Map stackTraceEvents) { + this.stackTraceEvents = stackTraceEvents; + } + + public void setHostEventCounts(List hostEventCounts) { + this.hostEventCounts = hostEventCounts; + } + + public List getHostEventCounts() { + return hostEventCounts; + } + + public Map getStackTraceEvents() { + return stackTraceEvents; + } + + public void setSamplingRate(double rate) { + this.samplingRate = rate; + } + + public double getSamplingRate() { + return samplingRate; + } + + public void setRequestedDuration(Double requestedDuration) { + this.requestedDuration = requestedDuration; + } + + public double getRequestedDuration() { + if (requestedDuration != null) { + return requestedDuration; + } + // If "requested_duration" wasn't specified, we use the time range from the query response. + return end.getEpochSecond() - start.getEpochSecond(); + } + + public Double getAWSCostFactor() { + return awsCostFactor; + } + + public Double getCustomCO2PerKWH() { + return customCO2PerKWH; + } + + public Double getCustomDatacenterPUE() { + return customDatacenterPUE; + } + + public Double getCustomPerCoreWattX86() { + return customPerCoreWattX86; + } + + public Double getCustomPerCoreWattARM64() { + return customPerCoreWattARM64; + } + + public Double getCustomCostPerCoreHour() { + return customCostPerCoreHour; + } + + public void setTotalSamples(long totalSamples) { + this.totalSamples = totalSamples; + } + + GetStackTracesResponseBuilder(GetStackTracesRequest request) { + this.requestedDuration = request.getRequestedDuration(); + this.awsCostFactor = request.getAwsCostFactor(); + this.customCO2PerKWH = request.getCustomCO2PerKWH(); + this.customDatacenterPUE = request.getCustomDatacenterPUE(); + this.customPerCoreWattX86 = request.getCustomPerCoreWattX86(); + this.customPerCoreWattARM64 = request.getCustomPerCoreWattARM64(); + this.customCostPerCoreHour = request.getCustomCostPerCoreHour(); + } + + public GetStackTracesResponse build() { + // Merge the TraceEvent data into the StackTraces. + if (stackTraces != null) { + for (Map.Entry entry : stackTraces.entrySet()) { + String stacktraceID = entry.getKey(); + TraceEvent event = stackTraceEvents.get(stacktraceID); + if (event != null) { + StackTrace stackTrace = entry.getValue(); + stackTrace.count = event.count; + stackTrace.annualCO2Tons = event.annualCO2Tons; + stackTrace.annualCostsUSD = event.annualCostsUSD; + } + } + } + return new GetStackTracesResponse(stackTraces, stackFrames, executables, stackTraceEvents, totalFrames, samplingRate, totalSamples); + } +} diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java index a51d8b509003a..27feb8cc9e22a 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java @@ -149,14 +149,7 @@ public TransportGetStackTracesAction( @Override protected void doExecute(Task submitTask, GetStackTracesRequest request, ActionListener submitListener) { licenseChecker.requireSupportedLicense(); - GetStackTracesResponseBuilder responseBuilder = new GetStackTracesResponseBuilder(); - responseBuilder.setRequestedDuration(request.getRequestedDuration()); - responseBuilder.setAwsCostFactor(request.getAwsCostFactor()); - responseBuilder.setCustomCO2PerKWH(request.getCustomCO2PerKWH()); - responseBuilder.setCustomDatacenterPUE(request.getCustomDatacenterPUE()); - responseBuilder.setCustomPerCoreWattX86(request.getCustomPerCoreWattX86()); - responseBuilder.setCustomPerCoreWattARM64(request.getCustomPerCoreWattARM64()); - responseBuilder.setCustomCostPerCoreHour(request.getCustomCostPerCoreHour()); + GetStackTracesResponseBuilder responseBuilder = new GetStackTracesResponseBuilder(request); Client client = new ParentTaskAssigningClient(this.nodeClient, transportService.getLocalNode(), submitTask); if (request.getIndices() == null) { searchProfilingEvents(submitTask, client, request, submitListener, responseBuilder); @@ -413,8 +406,8 @@ private void retrieveStackTraces( List indices = resolver.resolve(clusterState, "profiling-stacktraces", responseBuilder.getStart(), responseBuilder.getEnd()); // Build a set of unique host IDs. - Set uniqueHostIDs = new HashSet<>(responseBuilder.hostEventCounts.size()); - for (HostEventCount hec : responseBuilder.hostEventCounts) { + Set uniqueHostIDs = new HashSet<>(responseBuilder.getHostEventCounts().size()); + for (HostEventCount hec : responseBuilder.getHostEventCounts()) { uniqueHostIDs.add(hec.hostID); } @@ -557,21 +550,21 @@ public void calculateCO2AndCosts() { instanceTypeService, hostMetadata, responseBuilder.getRequestedDuration(), - responseBuilder.customCO2PerKWH, - responseBuilder.customDatacenterPUE, - responseBuilder.customPerCoreWattX86, - responseBuilder.customPerCoreWattARM64 + responseBuilder.getCustomCO2PerKWH(), + responseBuilder.getCustomDatacenterPUE(), + responseBuilder.getCustomPerCoreWattX86(), + responseBuilder.getCustomPerCoreWattARM64() ); CostCalculator costCalculator = new CostCalculator( instanceTypeService, hostMetadata, responseBuilder.getRequestedDuration(), - responseBuilder.awsCostFactor, - responseBuilder.customCostPerCoreHour + responseBuilder.getAWSCostFactor(), + responseBuilder.getCustomCostPerCoreHour() ); - Map events = responseBuilder.stackTraceEvents; + Map events = responseBuilder.getStackTraceEvents(); List missingStackTraces = new ArrayList<>(); - for (HostEventCount hec : responseBuilder.hostEventCounts) { + for (HostEventCount hec : responseBuilder.getHostEventCounts()) { TraceEvent event = events.get(hec.stacktraceID); if (event == null) { // If this happens, hostEventsCounts and events are out of sync, which indicates a bug. @@ -768,142 +761,5 @@ private void mget(Client client, List indices, List slice, Action } } - private static class GetStackTracesResponseBuilder { - private Map stackTraces; - private Instant start; - private Instant end; - private int totalFrames; - private Map stackFrames; - private Map executables; - private Map stackTraceEvents; - private List hostEventCounts; - private double samplingRate; - private long totalSamples; - private Double requestedDuration; - private Double awsCostFactor; - private Double customCO2PerKWH; - private Double customDatacenterPUE; - private Double customPerCoreWattX86; - private Double customPerCoreWattARM64; - private Double customCostPerCoreHour; - - public void setStackTraces(Map stackTraces) { - this.stackTraces = stackTraces; - } - - public Instant getStart() { - return start; - } - - public void setStart(Instant start) { - this.start = start; - } - - public Instant getEnd() { - return end; - } - - public void setEnd(Instant end) { - this.end = end; - } - - public void setTotalFrames(int totalFrames) { - this.totalFrames = totalFrames; - } - - public void setStackFrames(Map stackFrames) { - this.stackFrames = stackFrames; - } - - public void setExecutables(Map executables) { - this.executables = executables; - } - - public void setStackTraceEvents(Map stackTraceEvents) { - this.stackTraceEvents = stackTraceEvents; - } - - public void setHostEventCounts(List hostEventCounts) { - this.hostEventCounts = hostEventCounts; - } - - public Map getStackTraceEvents() { - return stackTraceEvents; - } - - public void setSamplingRate(double rate) { - this.samplingRate = rate; - } - - public double getSamplingRate() { - return samplingRate; - } - - public void setRequestedDuration(Double requestedDuration) { - this.requestedDuration = requestedDuration; - } - - public double getRequestedDuration() { - if (requestedDuration != null) { - return requestedDuration; - } - // If "requested_duration" wasn't specified, we use the time range from the query response. - return end.getEpochSecond() - start.getEpochSecond(); - } - - public void setAwsCostFactor(Double awsCostFactor) { - this.awsCostFactor = awsCostFactor; - } - - public void setCustomCO2PerKWH(Double customCO2PerKWH) { - this.customCO2PerKWH = customCO2PerKWH; - } - - public void setCustomDatacenterPUE(Double customDatacenterPUE) { - this.customDatacenterPUE = customDatacenterPUE; - } - - public void setCustomPerCoreWattX86(Double customPerCoreWattX86) { - this.customPerCoreWattX86 = customPerCoreWattX86; - } - - public void setCustomPerCoreWattARM64(Double customPerCoreWattARM64) { - this.customPerCoreWattARM64 = customPerCoreWattARM64; - } - - public void setCustomCostPerCoreHour(Double customCostPerCoreHour) { - this.customCostPerCoreHour = customCostPerCoreHour; - } - - public void setTotalSamples(long totalSamples) { - this.totalSamples = totalSamples; - } - - public GetStackTracesResponse build() { - // Merge the TraceEvent data into the StackTraces. - if (stackTraces != null) { - for (Map.Entry entry : stackTraces.entrySet()) { - String stacktraceID = entry.getKey(); - TraceEvent event = stackTraceEvents.get(stacktraceID); - if (event != null) { - StackTrace stackTrace = entry.getValue(); - stackTrace.count = event.count; - stackTrace.annualCO2Tons = event.annualCO2Tons; - stackTrace.annualCostsUSD = event.annualCostsUSD; - } - } - } - return new GetStackTracesResponse( - stackTraces, - stackFrames, - executables, - stackTraceEvents, - totalFrames, - samplingRate, - totalSamples - ); - } - } - record HostEventCount(String hostID, String stacktraceID, int count) {} } diff --git a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/RestGetStackTracesActionTests.java b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/RestGetStackTracesActionTests.java index 171b43e4be4d5..af9b28b5a1831 100644 --- a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/RestGetStackTracesActionTests.java +++ b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/RestGetStackTracesActionTests.java @@ -66,17 +66,31 @@ public void testPrepareParameterizedRequest() { assertThat(request, instanceOf(GetStackTracesRequest.class)); GetStackTracesRequest getStackTracesRequest = (GetStackTracesRequest) request; assertThat(getStackTracesRequest.getSampleSize(), is(10_000)); + assertThat(getStackTracesRequest.getRequestedDuration(), is(3_600.0d)); + assertThat(getStackTracesRequest.getAwsCostFactor(), is(1.0d)); + assertThat(getStackTracesRequest.getCustomCO2PerKWH(), is(0.005d)); + assertThat(getStackTracesRequest.getCustomDatacenterPUE(), is(1.5d)); + assertThat(getStackTracesRequest.getCustomPerCoreWattX86(), is(7.5d)); + assertThat(getStackTracesRequest.getCustomPerCoreWattARM64(), is(2.0d)); + assertThat(getStackTracesRequest.getCustomCostPerCoreHour(), is(0.083d)); assertThat(getStackTracesRequest.getQuery(), notNullValue(QueryBuilder.class)); executeCalled.set(true); - return new GetStackTracesResponse( - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - Collections.emptyMap(), - 0, - 0.0d, - 0L - ); + + GetStackTracesResponseBuilder responseBuilder = new GetStackTracesResponseBuilder(getStackTracesRequest); + responseBuilder.setSamplingRate(0.04d); + responseBuilder.setTotalFrames(523); + responseBuilder.setTotalSamples(3L); + + GetStackTracesResponse response = responseBuilder.build(); + assertNull(response.getStackTraces()); + assertNull(response.getStackFrames()); + assertNull(response.getExecutables()); + assertNull(response.getStackTraceEvents()); + assertEquals(response.getSamplingRate(), 0.04d, 0.0001d); + assertEquals(response.getTotalFrames(), 523); + assertEquals(response.getTotalSamples(), 3L); + + return response; }); RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withMethod(RestRequest.Method.POST) .withPath("/_profiling/stacktraces") @@ -84,6 +98,12 @@ public void testPrepareParameterizedRequest() { { "sample_size": 10000, "requested_duration": 3600, + "aws_cost_factor": 1.0, + "co2_per_kwh": 0.005, + "datacenter_pue": 1.5, + "per_core_watt_x86": 7.5, + "per_core_watt_arm64": 2.0, + "cost_per_core_hour": 0.083, "query": { "bool": { "filter": [ From 077b47db3703323fe4cdfa07ba7dbfda3380c491 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20Fred=C3=A9n?= <109296772+jfreden@users.noreply.github.com> Date: Thu, 7 Dec 2023 08:55:46 +0100 Subject: [PATCH 23/45] Add support for splitting saml groups by delimiter (#102769) * Add support for splitting saml groups by delimiter --- .../settings/security-settings.asciidoc | 14 ++ .../authc/saml/SamlRealmSettings.java | 39 +++- .../xpack/security/authc/saml/SamlRealm.java | 62 +++++- .../security/authc/saml/SamlRealmTests.java | 183 ++++++++++++++++-- 4 files changed, 284 insertions(+), 14 deletions(-) diff --git a/docs/reference/settings/security-settings.asciidoc b/docs/reference/settings/security-settings.asciidoc index 5cf55b8434f9d..5729a31b6c728 100644 --- a/docs/reference/settings/security-settings.asciidoc +++ b/docs/reference/settings/security-settings.asciidoc @@ -1253,6 +1253,20 @@ As per `attribute_patterns.principal`, but for the _mail_ property. As per `attribute_patterns.principal`, but for the _dn_ property. // end::saml-attributes-patterns-dn-tag[] +// tag::saml-attributes-delimiters-groups-tag[] +`attribute_delimiters.groups` {ess-icon}:: +(<>) +A plain string that is used as a delimiter to split a single-valued SAML +attribute specified by attributes.groups before it is applied to the user's +groups property. For example, splitting the SAML attribute value +engineering,elasticsearch-admins,employees on a delimiter value of , will +result in engineering, elasticsearch-admins, and employees as the list of +groups for the user. The delimiter will always be split on, regardless of +escaping in the input string. This setting does not support multi-valued SAML +attributes. It cannot be used together with the attribute_patterns setting. +You can only configure this setting for the groups attribute. +// end::saml-attributes-delimiters-groups-tag[] + // tag::saml-nameid-format-tag[] `nameid_format` {ess-icon}:: (<>) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/saml/SamlRealmSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/saml/SamlRealmSettings.java index ea3cd08dbc4ff..83d197e78f583 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/saml/SamlRealmSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/saml/SamlRealmSettings.java @@ -15,6 +15,7 @@ import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; import org.elasticsearch.xpack.core.ssl.X509KeyPairSettings; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.List; @@ -102,7 +103,7 @@ public class SamlRealmSettings { ); public static final AttributeSetting PRINCIPAL_ATTRIBUTE = new AttributeSetting("principal"); - public static final AttributeSetting GROUPS_ATTRIBUTE = new AttributeSetting("groups"); + public static final AttributeSettingWithDelimiter GROUPS_ATTRIBUTE = new AttributeSettingWithDelimiter("groups"); public static final AttributeSetting DN_ATTRIBUTE = new AttributeSetting("dn"); public static final AttributeSetting NAME_ATTRIBUTE = new AttributeSetting("name"); public static final AttributeSetting MAIL_ATTRIBUTE = new AttributeSetting("mail"); @@ -221,4 +222,40 @@ public Setting.AffixSetting getPattern() { return pattern; } } + + /** + * The SAML realm offers a setting where a multivalued attribute can be configured to have a delimiter for its values, for the case + * when all values are provided in a single string item, separated by a delimiter. + * As in {@link AttributeSetting} there are two settings: + *
    + *
  • The name of the SAML attribute to use
  • + *
  • A delimiter to apply to that attribute value in order to extract the substrings that should be used.
  • + *
+ * For example, the Elasticsearch Group could be configured to come from the SAML "department" attribute, where all groups are provided + * as a csv value in a single list item. + */ + public static final class AttributeSettingWithDelimiter { + public static final String ATTRIBUTE_DELIMITERS_PREFIX = "attribute_delimiters."; + private final Setting.AffixSetting delimiter; + private final AttributeSetting attributeSetting; + + public AttributeSetting getAttributeSetting() { + return attributeSetting; + } + + public AttributeSettingWithDelimiter(String name) { + this.attributeSetting = new AttributeSetting(name); + this.delimiter = RealmSettings.simpleString(TYPE, ATTRIBUTE_DELIMITERS_PREFIX + name, Setting.Property.NodeScope); + } + + public Setting.AffixSetting getDelimiter() { + return this.delimiter; + } + + public Collection> settings() { + List> settings = new ArrayList<>(attributeSetting.settings()); + settings.add(getDelimiter()); + return settings; + } + } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRealm.java index 856fdc4ba9555..56446907ad8f6 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlRealm.java @@ -278,7 +278,7 @@ public SpConfiguration getServiceProvider() { this.populateUserMetadata = config.getSetting(POPULATE_USER_METADATA); this.principalAttribute = AttributeParser.forSetting(logger, PRINCIPAL_ATTRIBUTE, config, true); - this.groupsAttribute = AttributeParser.forSetting(logger, GROUPS_ATTRIBUTE, config, false); + this.groupsAttribute = AttributeParser.forSetting(logger, GROUPS_ATTRIBUTE, config); this.dnAttribute = AttributeParser.forSetting(logger, DN_ATTRIBUTE, config, false); this.nameAttribute = AttributeParser.forSetting(logger, NAME_ATTRIBUTE, config, false); this.mailAttribute = AttributeParser.forSetting(logger, MAIL_ATTRIBUTE, config, false); @@ -1004,6 +1004,66 @@ public String toString() { return name; } + static AttributeParser forSetting(Logger logger, SamlRealmSettings.AttributeSettingWithDelimiter setting, RealmConfig realmConfig) { + SamlRealmSettings.AttributeSetting attributeSetting = setting.getAttributeSetting(); + if (realmConfig.hasSetting(setting.getDelimiter())) { + if (realmConfig.hasSetting(attributeSetting.getAttribute()) == false) { + throw new SettingsException( + "Setting [" + + RealmSettings.getFullSettingKey(realmConfig, setting.getDelimiter()) + + "] cannot be set unless [" + + RealmSettings.getFullSettingKey(realmConfig, attributeSetting.getAttribute()) + + "] is also set" + ); + } + if (realmConfig.hasSetting(attributeSetting.getPattern())) { + throw new SettingsException( + "Setting [" + + RealmSettings.getFullSettingKey(realmConfig, attributeSetting.getPattern()) + + "] can not be set when [" + + RealmSettings.getFullSettingKey(realmConfig, setting.getDelimiter()) + + "] is set" + ); + } + + String attributeName = realmConfig.getSetting(attributeSetting.getAttribute()); + String delimiter = realmConfig.getSetting(setting.getDelimiter()); + return new AttributeParser( + "SAML Attribute [" + + attributeName + + "] with delimiter [" + + delimiter + + "] for [" + + attributeSetting.name(realmConfig) + + "]", + attributes -> { + List attributeValues = attributes.getAttributeValues(attributeName); + if (attributeValues.size() > 1) { + throw SamlUtils.samlException( + "Expected single string value for attribute: [" + + attributeName + + "], but got list with " + + attributeValues.size() + + " values" + ); + } + return attributeValues.stream() + .map(s -> s.split(Pattern.quote(delimiter))) + .flatMap(Arrays::stream) + .filter(attribute -> { + if (Strings.isNullOrEmpty(attribute)) { + logger.debug("Attribute [{}] has empty components when using delimiter [{}]", attributeName, delimiter); + return false; + } + return true; + }) + .collect(Collectors.toList()); + } + ); + } + return AttributeParser.forSetting(logger, attributeSetting, realmConfig, false); + } + static AttributeParser forSetting( Logger logger, SamlRealmSettings.AttributeSetting setting, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRealmTests.java index f46015106d204..3ba9b18b24036 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/saml/SamlRealmTests.java @@ -352,14 +352,30 @@ public void testAuthenticateWithRoleMapping() throws Exception { final boolean principalIsEmailAddress = randomBoolean(); final Boolean populateUserMetadata = randomFrom(Boolean.TRUE, Boolean.FALSE, null); final String authenticatingRealm = randomBoolean() ? REALM_NAME : null; - AuthenticationResult result = performAuthentication( - roleMapper, - useNameId, - principalIsEmailAddress, - populateUserMetadata, - false, - authenticatingRealm - ); + final boolean testWithDelimiter = randomBoolean(); + final AuthenticationResult result; + + if (testWithDelimiter) { + result = performAuthentication( + roleMapper, + useNameId, + principalIsEmailAddress, + populateUserMetadata, + false, + authenticatingRealm, + List.of("STRIKE Team: Delta$shield"), + "$" + ); + } else { + result = performAuthentication( + roleMapper, + useNameId, + principalIsEmailAddress, + populateUserMetadata, + false, + authenticatingRealm + ); + } assertThat(result, notNullValue()); assertThat(result.getStatus(), equalTo(AuthenticationResult.Status.SUCCESS)); assertThat(result.getValue().principal(), equalTo(useNameId ? "clint.barton" : "cbarton")); @@ -377,7 +393,11 @@ public void testAuthenticateWithRoleMapping() throws Exception { } assertThat(userData.get().getUsername(), equalTo(useNameId ? "clint.barton" : "cbarton")); - assertThat(userData.get().getGroups(), containsInAnyOrder("avengers", "shield")); + if (testWithDelimiter) { + assertThat(userData.get().getGroups(), containsInAnyOrder("STRIKE Team: Delta", "shield")); + } else { + assertThat(userData.get().getGroups(), containsInAnyOrder("avengers", "shield")); + } } public void testAuthenticateWithAuthorizingRealm() throws Exception { @@ -431,6 +451,28 @@ private AuthenticationResult performAuthentication( Boolean populateUserMetadata, boolean useAuthorizingRealm, String authenticatingRealm + ) throws Exception { + return performAuthentication( + roleMapper, + useNameId, + principalIsEmailAddress, + populateUserMetadata, + useAuthorizingRealm, + authenticatingRealm, + Arrays.asList("avengers", "shield"), + null + ); + } + + private AuthenticationResult performAuthentication( + UserRoleMapper roleMapper, + boolean useNameId, + boolean principalIsEmailAddress, + Boolean populateUserMetadata, + boolean useAuthorizingRealm, + String authenticatingRealm, + List groups, + String groupsDelimiter ) throws Exception { final EntityDescriptor idp = mockIdp(); final SpConfiguration sp = new SpConfiguration("", "https://saml/", null, null, null, Collections.emptyList()); @@ -453,8 +495,12 @@ private AuthenticationResult performAuthentication( final Settings.Builder settingsBuilder = Settings.builder() .put(getFullSettingKey(REALM_NAME, SamlRealmSettings.PRINCIPAL_ATTRIBUTE.getAttribute()), useNameId ? "nameid" : "uid") - .put(getFullSettingKey(REALM_NAME, SamlRealmSettings.GROUPS_ATTRIBUTE.getAttribute()), "groups") + .put(getFullSettingKey(REALM_NAME, SamlRealmSettings.GROUPS_ATTRIBUTE.getAttributeSetting().getAttribute()), "groups") .put(getFullSettingKey(REALM_NAME, SamlRealmSettings.MAIL_ATTRIBUTE.getAttribute()), "mail"); + + if (groupsDelimiter != null) { + settingsBuilder.put(getFullSettingKey(REALM_NAME, SamlRealmSettings.GROUPS_ATTRIBUTE.getDelimiter()), groupsDelimiter); + } if (principalIsEmailAddress) { final boolean anchoredMatch = randomBoolean(); settingsBuilder.put( @@ -497,7 +543,7 @@ private AuthenticationResult performAuthentication( randomAlphaOfLength(16), Arrays.asList( new SamlAttributes.SamlAttribute("urn:oid:0.9.2342.19200300.100.1.1", "uid", Collections.singletonList(uidValue)), - new SamlAttributes.SamlAttribute("urn:oid:1.3.6.1.4.1.5923.1.5.1.1", "groups", Arrays.asList("avengers", "shield")), + new SamlAttributes.SamlAttribute("urn:oid:1.3.6.1.4.1.5923.1.5.1.1", "groups", groups), new SamlAttributes.SamlAttribute("urn:oid:0.9.2342.19200300.100.1.3", "mail", Arrays.asList("cbarton@shield.gov")) ) ); @@ -534,7 +580,120 @@ public SamlRealm buildRealm( } } - public void testAttributeSelectionWithRegex() throws Exception { + public void testAttributeSelectionWithSplit() { + List strings = performAttributeSelectionWithSplit(",", "departments", "engineering", "elasticsearch-admins", "employees"); + assertThat("For attributes: " + strings, strings, contains("engineering", "elasticsearch-admins", "employees")); + } + + public void testAttributeSelectionWithSplitEmptyInput() { + List strings = performAttributeSelectionWithSplit(",", "departments"); + assertThat("For attributes: " + strings, strings, is(empty())); + } + + public void testAttributeSelectionWithSplitJustDelimiter() { + List strings = performAttributeSelectionWithSplit(",", ","); + assertThat("For attributes: " + strings, strings, is(empty())); + } + + public void testAttributeSelectionWithSplitNoDelimiter() { + List strings = performAttributeSelectionWithSplit(",", "departments", "elasticsearch-team"); + assertThat("For attributes: " + strings, strings, contains("elasticsearch-team")); + } + + private List performAttributeSelectionWithSplit(String delimiter, String groupAttributeName, String... returnedGroups) { + final Settings settings = Settings.builder() + .put(REALM_SETTINGS_PREFIX + ".attributes.groups", groupAttributeName) + .put(REALM_SETTINGS_PREFIX + ".attribute_delimiters.groups", delimiter) + .build(); + + final RealmConfig config = buildConfig(settings); + + final SamlRealmSettings.AttributeSettingWithDelimiter groupSetting = new SamlRealmSettings.AttributeSettingWithDelimiter("groups"); + final SamlRealm.AttributeParser parser = SamlRealm.AttributeParser.forSetting(logger, groupSetting, config); + + final SamlAttributes attributes = new SamlAttributes( + new SamlNameId(NameIDType.TRANSIENT, randomAlphaOfLength(24), null, null, null), + randomAlphaOfLength(16), + Collections.singletonList( + new SamlAttributes.SamlAttribute( + "departments", + "departments", + Collections.singletonList(String.join(delimiter, returnedGroups)) + ) + ) + ); + return parser.getAttribute(attributes); + } + + public void testAttributeSelectionWithDelimiterAndPatternThrowsSettingsException() throws Exception { + final Settings settings = Settings.builder() + .put(REALM_SETTINGS_PREFIX + ".attributes.groups", "departments") + .put(REALM_SETTINGS_PREFIX + ".attribute_delimiters.groups", ",") + .put(REALM_SETTINGS_PREFIX + ".attribute_patterns.groups", "^(.+)@\\w+.example.com$") + .build(); + + final RealmConfig config = buildConfig(settings); + + final SamlRealmSettings.AttributeSettingWithDelimiter groupSetting = new SamlRealmSettings.AttributeSettingWithDelimiter("groups"); + + final SettingsException settingsException = expectThrows( + SettingsException.class, + () -> SamlRealm.AttributeParser.forSetting(logger, groupSetting, config) + ); + + assertThat(settingsException.getMessage(), containsString(REALM_SETTINGS_PREFIX + ".attribute_delimiters.groups")); + assertThat(settingsException.getMessage(), containsString(REALM_SETTINGS_PREFIX + ".attribute_patterns.groups")); + } + + public void testAttributeSelectionNoGroupsConfiguredThrowsSettingsException() { + String delimiter = ","; + final Settings settings = Settings.builder().put(REALM_SETTINGS_PREFIX + ".attribute_delimiters.groups", delimiter).build(); + final RealmConfig config = buildConfig(settings); + final SamlRealmSettings.AttributeSettingWithDelimiter groupSetting = new SamlRealmSettings.AttributeSettingWithDelimiter("groups"); + + final SettingsException settingsException = expectThrows( + SettingsException.class, + () -> SamlRealm.AttributeParser.forSetting(logger, groupSetting, config) + ); + + assertThat(settingsException.getMessage(), containsString(REALM_SETTINGS_PREFIX + ".attribute_delimiters.groups")); + assertThat(settingsException.getMessage(), containsString(REALM_SETTINGS_PREFIX + ".attributes.groups")); + } + + public void testAttributeSelectionWithSplitAndListThrowsSecurityException() { + String delimiter = ","; + + final Settings settings = Settings.builder() + .put(REALM_SETTINGS_PREFIX + ".attributes.groups", "departments") + .put(REALM_SETTINGS_PREFIX + ".attribute_delimiters.groups", delimiter) + .build(); + + final RealmConfig config = buildConfig(settings); + + final SamlRealmSettings.AttributeSettingWithDelimiter groupSetting = new SamlRealmSettings.AttributeSettingWithDelimiter("groups"); + final SamlRealm.AttributeParser parser = SamlRealm.AttributeParser.forSetting(logger, groupSetting, config); + + final SamlAttributes attributes = new SamlAttributes( + new SamlNameId(NameIDType.TRANSIENT, randomAlphaOfLength(24), null, null, null), + randomAlphaOfLength(16), + Collections.singletonList( + new SamlAttributes.SamlAttribute( + "departments", + "departments", + List.of("engineering", String.join(delimiter, "elasticsearch-admins", "employees")) + ) + ) + ); + + ElasticsearchSecurityException securityException = expectThrows( + ElasticsearchSecurityException.class, + () -> parser.getAttribute(attributes) + ); + + assertThat(securityException.getMessage(), containsString("departments")); + } + + public void testAttributeSelectionWithRegex() { final boolean useFriendlyName = randomBoolean(); final Settings settings = Settings.builder() .put(REALM_SETTINGS_PREFIX + ".attributes.principal", useFriendlyName ? "mail" : "urn:oid:0.9.2342.19200300.100.1.3") From 37d6690d9b6556cceef005465d810c1325786945 Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Thu, 7 Dec 2023 10:07:37 +0100 Subject: [PATCH 24/45] Always allow sample size for stacktraces (#103101) The profiling get stacktraces API can be used in two different places: 1. In the native profiling UI 2. From within an APM context In the latter case we disallowed the `sample_size` request body parameter because the implementation always considers all samples. However, as this will change in the future and we need to accept the `sample_size` parameter this will lead to rejected requests during upgrades. With this commit we always accept the `sample_size` request body parameter to avoid such cases. --- .../xpack/profiling/GetStackTracesRequest.java | 9 +-------- .../profiling/GetStackTracesRequestTests.java | 17 +++++++++++++++++ 2 files changed, 18 insertions(+), 8 deletions(-) diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesRequest.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesRequest.java index 3ab797e4b16ad..f81b5f01caae3 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesRequest.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesRequest.java @@ -247,13 +247,6 @@ public ActionRequestValidationException validate() { validationException ); } - // we don't do downsampling when a custom index is provided - if (sampleSize != null) { - validationException = addValidationError( - "[" + SAMPLE_SIZE_FIELD.getPreferredName() + "] must not be set", - validationException - ); - } } else { if (stackTraceIds != null) { validationException = addValidationError( @@ -261,8 +254,8 @@ public ActionRequestValidationException validate() { validationException ); } - validationException = requirePositive(SAMPLE_SIZE_FIELD, sampleSize, validationException); } + validationException = requirePositive(SAMPLE_SIZE_FIELD, sampleSize, validationException); validationException = requirePositive(REQUESTED_DURATION_FIELD, requestedDuration, validationException); validationException = requirePositive(AWS_COST_FACTOR_FIELD, awsCostFactor, validationException); validationException = requirePositive(CUSTOM_CO2_PER_KWH, customCO2PerKWH, validationException); diff --git a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/GetStackTracesRequestTests.java b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/GetStackTracesRequestTests.java index 5b6befbe5a2c2..8bf4598cf75f7 100644 --- a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/GetStackTracesRequestTests.java +++ b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/GetStackTracesRequestTests.java @@ -170,6 +170,23 @@ public void testValidateWrongSampleSize() { assertTrue(validationErrors.get(0).contains("[sample_size] must be greater than 0,")); } + public void testValidateSampleSizeIsValidWithCustomIndices() { + GetStackTracesRequest request = new GetStackTracesRequest( + 10, + 1.0d, + 1.0d, + null, + randomAlphaOfLength(7), + randomAlphaOfLength(3), + null, + null, + null, + null, + null + ); + assertNull("Expecting no validation errors", request.validate()); + } + public void testValidateStacktraceWithoutIndices() { GetStackTracesRequest request = new GetStackTracesRequest( 1, From a245fdcd8a7b4ec8b2a403a32b843ea005f2df12 Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Thu, 7 Dec 2023 20:39:36 +1100 Subject: [PATCH 25/45] Assert IO is still owned by the region after read and write (#103100) When the code path goes to CacheFile#populateAndRead, both the read and write are performed when holding the reference of the file region (#102843). Therefore we should be able to assert that the region ownership throughout the processes. --- .../blobcache/shared/SharedBlobCacheService.java | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java index 9867c81808d24..be95f5c883de8 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java @@ -917,6 +917,8 @@ private RangeMissingHandler writerWithOffset(RangeMissingHandler writer, CacheFi return (channel, channelPos, relativePos, len, progressUpdater) -> { assert assertValidRegionAndLength(fileRegion, channelPos, len); adjustedWriter.fillCacheRange(channel, channelPos, relativePos, len, progressUpdater); + assert regionOwners.get(fileRegion.io) == fileRegion + : "File chunk [" + fileRegion.regionKey + "] no longer owns IO [" + fileRegion.io + "]"; }; } return adjustedWriter; @@ -932,7 +934,10 @@ private RangeAvailableHandler readerWithOffset(RangeAvailableHandler reader, Cac if (Assertions.ENABLED) { return (channel, channelPos, relativePos, len) -> { assert assertValidRegionAndLength(fileRegion, channelPos, len); - return adjustedReader.onRangeAvailable(channel, channelPos, relativePos, len); + final int bytesRead = adjustedReader.onRangeAvailable(channel, channelPos, relativePos, len); + assert regionOwners.get(fileRegion.io) == fileRegion + : "File chunk [" + fileRegion.regionKey + "] no longer owns IO [" + fileRegion.io + "]"; + return bytesRead; }; } return adjustedReader; From 468c3dafdba2b7e9e04487410512409d8d4db46b Mon Sep 17 00:00:00 2001 From: Luigi Dell'Aquila Date: Thu, 7 Dec 2023 11:43:31 +0100 Subject: [PATCH 26/45] Fix ToDatetimeTests (#103075) --- .../function/scalar/convert/ToDatetimeTests.java | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetimeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetimeTests.java index c92c8712d1697..2b3c9d166946a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetimeTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDatetimeTests.java @@ -113,7 +113,7 @@ public static Iterable parameters() { new TestCaseSupplier.TypedDataSupplier( "", // millis past "0001-01-01T00:00:00.000Z" to match the default formatter - () -> new BytesRef(Instant.ofEpochMilli(randomLongBetween(-62135596800000L, Long.MAX_VALUE)).toString()), + () -> new BytesRef(randomDateString(-62135596800000L, Long.MAX_VALUE)), DataTypes.KEYWORD ) ), @@ -128,7 +128,7 @@ public static Iterable parameters() { new TestCaseSupplier.TypedDataSupplier( "", // millis before "0001-01-01T00:00:00.000Z" - () -> new BytesRef(Instant.ofEpochMilli(randomLongBetween(Long.MIN_VALUE, -62135596800001L)).toString()), + () -> new BytesRef(randomDateString(Long.MIN_VALUE, -62135596800001L)), DataTypes.KEYWORD ) ), @@ -145,6 +145,15 @@ public static Iterable parameters() { return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); } + private static String randomDateString(long from, long to) { + String result = Instant.ofEpochMilli(randomLongBetween(from, to)).toString(); + if (result.matches(".*:..Z")) { + // it's a zero millisecond date string, Instant.toString() will strip the milliseconds (and the parsing will fail) + return result.replace("Z", ".000Z"); + } + return result; + } + @Override protected Expression build(Source source, List args) { return new ToDatetime(source, args.get(0)); From e1e101e46d8243f41e2cde99c0c7748ef0b7509b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Przemys=C5=82aw=20Witek?= Date: Thu, 7 Dec 2023 12:22:09 +0100 Subject: [PATCH 27/45] [Transform] Implement integration test for transform's deduce_mappings setting (#103052) --- .../test/rest/ESRestTestCase.java | 4 +- .../integration/TransformDestIndexIT.java | 115 ++++++++++++++++++ .../integration/TransformRestTestCase.java | 2 +- .../pivot/AggregationResultUtils.java | 42 +++---- .../transform/transforms/pivot/Pivot.java | 6 +- .../transforms/pivot/SchemaUtil.java | 11 ++ .../pivot/AggregationResultUtilsTests.java | 2 + .../transforms/pivot/SchemaUtilTests.java | 22 ++++ 8 files changed, 174 insertions(+), 30 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index 97f0b45fae462..6dc1f57030140 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -1744,8 +1744,8 @@ protected static Map getIndexMapping(String index) throws IOExce @SuppressWarnings("unchecked") protected Map getIndexMappingAsMap(String index) throws IOException { - Map indexSettings = getIndexMapping(index); - return (Map) ((Map) indexSettings.get(index)).get("mappings"); + Map indexMapping = getIndexMapping(index); + return (Map) ((Map) indexMapping.get(index)).get("mappings"); } protected static boolean indexExists(String index) throws IOException { diff --git a/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformDestIndexIT.java b/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformDestIndexIT.java index 5e3ec897f72b7..49ee0f8bbd9a9 100644 --- a/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformDestIndexIT.java +++ b/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformDestIndexIT.java @@ -9,7 +9,9 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.rest.action.admin.indices.RestPutIndexTemplateAction; import org.elasticsearch.xpack.core.transform.TransformConfigVersion; import org.elasticsearch.xpack.core.transform.transforms.DestAlias; import org.elasticsearch.xpack.core.transform.transforms.SettingsConfig; @@ -21,6 +23,9 @@ import java.util.Map; import java.util.stream.Collectors; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + public class TransformDestIndexIT extends TransformRestTestCase { private static boolean indicesCreated = false; @@ -140,6 +145,116 @@ public void testTransformDestIndexCreatedDuringUpdate() throws Exception { assertTrue(indexExists(destIndex)); } + public void testTransformDestIndexMappings_DeduceMappings() throws Exception { + testTransformDestIndexMappings("test_dest_index_mappings_deduce", true); + } + + public void testTransformDestIndexMappings_NoDeduceMappings() throws Exception { + testTransformDestIndexMappings("test_dest_index_mappings_no_deduce", false); + } + + private void testTransformDestIndexMappings(String transformId, boolean deduceMappings) throws Exception { + String destIndex = transformId + "-dest"; + + { + String destIndexTemplate = Strings.format(""" + { + "index_patterns": [ "%s*" ], + "mappings": { + "properties": { + "timestamp": { + "type": "date" + }, + "reviewer": { + "type": "keyword" + }, + "avg_rating": { + "type": "double" + } + } + } + }""", destIndex); + Request createIndexTemplateRequest = new Request("PUT", "_template/test_dest_index_no_deduce_template"); + createIndexTemplateRequest.setJsonEntity(destIndexTemplate); + createIndexTemplateRequest.setOptions(expectWarnings(RestPutIndexTemplateAction.DEPRECATION_WARNING)); + Map createIndexTemplateResponse = entityAsMap(client().performRequest(createIndexTemplateRequest)); + assertThat(createIndexTemplateResponse.get("acknowledged"), equalTo(Boolean.TRUE)); + } + + // Verify that the destination index does not exist yet, even though the template already exists + assertFalse(indexExists(destIndex)); + + { + String config = Strings.format(""" + { + "dest": { + "index": "%s" + }, + "source": { + "index": "%s" + }, + "sync": { + "time": { + "field": "timestamp", + "delay": "15m" + } + }, + "frequency": "1s", + "pivot": { + "group_by": { + "timestamp": { + "date_histogram": { + "field": "timestamp", + "fixed_interval": "10s" + } + }, + "reviewer": { + "terms": { + "field": "user_id" + } + } + }, + "aggregations": { + "avg_rating": { + "avg": { + "field": "stars" + } + } + } + }, + "settings": { + "unattended": true, + "deduce_mappings": %s + } + }""", destIndex, REVIEWS_INDEX_NAME, deduceMappings); + createReviewsTransform(transformId, null, null, config); + + startTransform(transformId); + waitForTransformCheckpoint(transformId, 1); + } + + // Verify that the destination index now exists and has correct mappings from the template + assertTrue(indexExists(destIndex)); + assertThat( + getIndexMappingAsMap(destIndex), + is( + equalTo( + Map.of( + "properties", + Map.of( + "avg_rating", + Map.of("type", "double"), + "reviewer", + Map.of("type", "keyword"), + "timestamp", + Map.of("type", "date") + ) + ) + ) + ) + ); + } + private static void assertAliases(String index, String... aliases) throws IOException { Map> expectedAliases = Arrays.stream(aliases).collect(Collectors.toMap(a -> a, a -> Map.of())); Response aliasesResponse = client().performRequest(new Request("GET", index + "/_alias")); diff --git a/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformRestTestCase.java b/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformRestTestCase.java index c616c1c238171..6d14c74f8fea9 100644 --- a/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformRestTestCase.java +++ b/x-pack/plugin/transform/qa/single-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/transform/integration/TransformRestTestCase.java @@ -380,7 +380,7 @@ protected void createLatestReviewsTransform(String transformId, String transform createReviewsTransform(transformId, null, null, config); } - private void createReviewsTransform(String transformId, String authHeader, String secondaryAuthHeader, String config) + protected void createReviewsTransform(String transformId, String authHeader, String secondaryAuthHeader, String config) throws IOException { final Request createTransformRequest = createRequestWithSecondaryAuth( "PUT", diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationResultUtils.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationResultUtils.java index ce8a3d33ce42a..1c6c411020d49 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationResultUtils.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationResultUtils.java @@ -49,6 +49,7 @@ import java.util.stream.Stream; import static org.elasticsearch.xpack.transform.transforms.pivot.SchemaUtil.dropFloatingPointComponentIfTypeRequiresIt; +import static org.elasticsearch.xpack.transform.transforms.pivot.SchemaUtil.isDateType; import static org.elasticsearch.xpack.transform.transforms.pivot.SchemaUtil.isNumericType; public final class AggregationResultUtils { @@ -70,16 +71,9 @@ public final class AggregationResultUtils { TYPE_VALUE_EXTRACTOR_MAP = Collections.unmodifiableMap(tempMap); } - private static final Map BUCKET_KEY_EXTRACTOR_MAP; private static final BucketKeyExtractor DEFAULT_BUCKET_KEY_EXTRACTOR = new DefaultBucketKeyExtractor(); private static final BucketKeyExtractor DATES_AS_EPOCH_BUCKET_KEY_EXTRACTOR = new DatesAsEpochBucketKeyExtractor(); - - static { - Map tempMap = new HashMap<>(); - tempMap.put(GeoTileGroupSource.class.getName(), new GeoTileBucketKeyExtractor()); - - BUCKET_KEY_EXTRACTOR_MAP = Collections.unmodifiableMap(tempMap); - } + private static final BucketKeyExtractor GEO_TILE_BUCKET_KEY_EXTRACTOR = new GeoTileBucketKeyExtractor(); private static final String FIELD_TYPE = "type"; private static final String FIELD_COORDINATES = "coordinates"; @@ -150,10 +144,13 @@ public static Stream> extractCompositeAggregationResults( } static BucketKeyExtractor getBucketKeyExtractor(SingleGroupSource groupSource, boolean datesAsEpoch) { - return BUCKET_KEY_EXTRACTOR_MAP.getOrDefault( - groupSource.getClass().getName(), - datesAsEpoch ? DATES_AS_EPOCH_BUCKET_KEY_EXTRACTOR : DEFAULT_BUCKET_KEY_EXTRACTOR - ); + if (groupSource instanceof GeoTileGroupSource) { + return GEO_TILE_BUCKET_KEY_EXTRACTOR; + } else if (datesAsEpoch) { + return DATES_AS_EPOCH_BUCKET_KEY_EXTRACTOR; + } else { + return DEFAULT_BUCKET_KEY_EXTRACTOR; + } } static AggValueExtractor getExtractor(Aggregation aggregation) { @@ -514,7 +511,6 @@ public Object value(Object key, String type) { ); return geoShape; } - } static class DefaultBucketKeyExtractor implements BucketKeyExtractor { @@ -523,16 +519,14 @@ static class DefaultBucketKeyExtractor implements BucketKeyExtractor { public Object value(Object key, String type) { if (isNumericType(type) && key instanceof Double) { return dropFloatingPointComponentIfTypeRequiresIt(type, (Double) key); - } else if ((DateFieldMapper.CONTENT_TYPE.equals(type) || DateFieldMapper.DATE_NANOS_CONTENT_TYPE.equals(type)) - && key instanceof Long) { - // date_histogram return bucket keys with milliseconds since epoch precision, therefore we don't need a - // nanosecond formatter, for the parser on indexing side, time is optional (only the date part is mandatory) - return DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis((Long) key); - } - - return key; + } else if (isDateType(type) && key instanceof Long) { + // date_histogram return bucket keys with milliseconds since epoch precision, therefore we don't need a + // nanosecond formatter, for the parser on indexing side, time is optional (only the date part is mandatory) + return DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.formatMillis((Long) key); + } else { + return key; + } } - } static class DatesAsEpochBucketKeyExtractor implements BucketKeyExtractor { @@ -541,9 +535,9 @@ static class DatesAsEpochBucketKeyExtractor implements BucketKeyExtractor { public Object value(Object key, String type) { if (isNumericType(type) && key instanceof Double) { return dropFloatingPointComponentIfTypeRequiresIt(type, (Double) key); + } else { + return key; } - return key; } - } } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/Pivot.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/Pivot.java index 3edc0b281fa41..9cb96507be0dd 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/Pivot.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/Pivot.java @@ -148,9 +148,9 @@ protected Stream> extractResults( // < 7.11 as epoch millis // >= 7.11 as string // note: it depends on the version when the transform has been created, not the version of the code - boolean datesAsEpoch = settings.getDatesAsEpochMillis() != null ? settings.getDatesAsEpochMillis() - : version.onOrAfter(TransformConfigVersion.V_7_11_0) ? false - : true; + boolean datesAsEpoch = settings.getDatesAsEpochMillis() != null + ? settings.getDatesAsEpochMillis() + : version.before(TransformConfigVersion.V_7_11_0); return AggregationResultUtils.extractCompositeAggregationResults( agg, diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/SchemaUtil.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/SchemaUtil.java index 5cacee644fe3c..3b6ea7758947a 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/SchemaUtil.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/pivot/SchemaUtil.java @@ -15,6 +15,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.Client; import org.elasticsearch.core.Tuple; +import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.index.query.QueryBuilder; @@ -28,6 +29,7 @@ import java.util.HashMap; import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -39,6 +41,11 @@ public final class SchemaUtil { // Full collection of numeric field type strings and whether they are floating point or not private static final Map NUMERIC_FIELD_MAPPER_TYPES; + // Full collection of date field type strings + private static final Set DATE_FIELD_MAPPER_TYPES = Set.of( + DateFieldMapper.CONTENT_TYPE, + DateFieldMapper.DATE_NANOS_CONTENT_TYPE + ); static { Map types = Stream.of(NumberFieldMapper.NumberType.values()) .collect(Collectors.toMap(t -> t.typeName(), t -> t.numericType().isFloatingPoint())); @@ -55,6 +62,10 @@ public static boolean isNumericType(String type) { return type != null && NUMERIC_FIELD_MAPPER_TYPES.containsKey(type); } + public static boolean isDateType(String type) { + return type != null && DATE_FIELD_MAPPER_TYPES.contains(type); + } + /** * Convert a numeric value to a whole number if it's not a floating point number. * diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationResultUtilsTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationResultUtilsTests.java index 7dc441141793d..945161e548b75 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationResultUtilsTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/AggregationResultUtilsTests.java @@ -1081,6 +1081,7 @@ public void testDefaultBucketKeyExtractor() { assertThat(extractor.value(1577836800000L, "date"), equalTo("2020-01-01T00:00:00.000Z")); assertThat(extractor.value(1577836800000L, "date_nanos"), equalTo("2020-01-01T00:00:00.000Z")); assertThat(extractor.value(1577836800000L, "long"), equalTo(1577836800000L)); + assertThat(extractor.value(1577836800000L, null), equalTo(1577836800000L)); } public void testDatesAsEpochBucketKeyExtractor() { @@ -1091,6 +1092,7 @@ public void testDatesAsEpochBucketKeyExtractor() { assertThat(extractor.value(1577836800000L, "date"), equalTo(1577836800000L)); assertThat(extractor.value(1577836800000L, "date_nanos"), equalTo(1577836800000L)); assertThat(extractor.value(1577836800000L, "long"), equalTo(1577836800000L)); + assertThat(extractor.value(1577836800000L, null), equalTo(1577836800000L)); } private void executeTest( diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/SchemaUtilTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/SchemaUtilTests.java index 881d578cb4536..525f97af356da 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/SchemaUtilTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/SchemaUtilTests.java @@ -218,6 +218,28 @@ public void testGetSourceFieldMappingsWithRuntimeMappings() throws InterruptedEx } } + public void testIsNumericType() { + assertFalse(SchemaUtil.isNumericType(null)); + assertFalse(SchemaUtil.isNumericType("non-existing")); + assertTrue(SchemaUtil.isNumericType("double")); + assertTrue(SchemaUtil.isNumericType("integer")); + assertTrue(SchemaUtil.isNumericType("long")); + assertFalse(SchemaUtil.isNumericType("date")); + assertFalse(SchemaUtil.isNumericType("date_nanos")); + assertFalse(SchemaUtil.isNumericType("keyword")); + } + + public void testIsDateType() { + assertFalse(SchemaUtil.isDateType(null)); + assertFalse(SchemaUtil.isDateType("non-existing")); + assertFalse(SchemaUtil.isDateType("double")); + assertFalse(SchemaUtil.isDateType("integer")); + assertFalse(SchemaUtil.isDateType("long")); + assertTrue(SchemaUtil.isDateType("date")); + assertTrue(SchemaUtil.isDateType("date_nanos")); + assertFalse(SchemaUtil.isDateType("keyword")); + } + private static class FieldCapsMockClient extends NoOpClient { FieldCapsMockClient(ThreadPool threadPool) { super(threadPool); From cb407bd85e17374e08da762018ba738e6a60c409 Mon Sep 17 00:00:00 2001 From: Tim Grein Date: Thu, 7 Dec 2023 13:07:24 +0100 Subject: [PATCH 28/45] [Connectors API] Add error and status field checks to set sync job error integration tests. (#103065) Improve set sync job error integration tests --- .../test/entsearch/450_connector_sync_job_error.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/450_connector_sync_job_error.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/450_connector_sync_job_error.yml index 1ba3cf1c50b7c..6f525a2ac2883 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/450_connector_sync_job_error.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/450_connector_sync_job_error.yml @@ -20,7 +20,9 @@ setup: id: test-connector job_type: full trigger_method: on_demand + - set: { id: id } + - do: connector_sync_job.error: connector_sync_job_id: $id @@ -29,6 +31,13 @@ setup: - match: { acknowledged: true } + - do: + connector_sync_job.get: + connector_sync_job_id: $id + + - match: { error: error } + - match: { status: error } + --- "Set an error for a Connector Sync Job - Connector Sync Job does not exist": From ffc657edea752548b616bcb4e052a3d0e0e7240b Mon Sep 17 00:00:00 2001 From: David Roberts Date: Thu, 7 Dec 2023 12:53:20 +0000 Subject: [PATCH 29/45] [ML] Fix `frequent_item_sets` aggregation on empty index (#103116) Previously the `frequent_item_sets` aggregation would fail with an internal server error if run against an empty index. This change makes it return empty output, as expected. Fixes #103067 --- docs/changelog/103116.yaml | 6 +++ .../mr/ItemSetMapReduceAggregator.java | 25 ++++++----- .../test/ml/frequent_item_sets_agg.yml | 45 +++++++++++++++++++ 3 files changed, 65 insertions(+), 11 deletions(-) create mode 100644 docs/changelog/103116.yaml diff --git a/docs/changelog/103116.yaml b/docs/changelog/103116.yaml new file mode 100644 index 0000000000000..402c83e16ec37 --- /dev/null +++ b/docs/changelog/103116.yaml @@ -0,0 +1,6 @@ +pr: 103116 +summary: Fix `frequent_item_sets` aggregation on empty index +area: Machine Learning +type: bug +issues: + - 103067 diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/mr/ItemSetMapReduceAggregator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/mr/ItemSetMapReduceAggregator.java index 7afe6265f61d7..72bfb6f1f0394 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/mr/ItemSetMapReduceAggregator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/mr/ItemSetMapReduceAggregator.java @@ -42,6 +42,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.function.BiConsumer; public abstract class ItemSetMapReduceAggregator< @@ -76,7 +77,7 @@ protected ItemSetMapReduceAggregator( List valueSources = new ArrayList<>(); List fields = new ArrayList<>(); IndexSearcher contextSearcher = context.searcher(); - LeafReaderContext ctx = getLeafReaderForOrdinals(context); + Optional ctx = getLeafReaderForOrdinals(context); int id = 0; this.weightDocumentFilter = documentFilter != null @@ -85,15 +86,17 @@ protected ItemSetMapReduceAggregator( boolean rewriteBasedOnOrdinals = false; - for (var c : configsAndValueFilters) { - ItemSetMapReduceValueSource e = context.getValuesSourceRegistry() - .getAggregator(registryKey, c.v1()) - .build(c.v1(), id++, c.v2(), ordinalOptimization, ctx); - if (e.getField().getName() != null) { - fields.add(e.getField()); - valueSources.add(e); + if (ctx.isPresent()) { + for (var c : configsAndValueFilters) { + ItemSetMapReduceValueSource e = context.getValuesSourceRegistry() + .getAggregator(registryKey, c.v1()) + .build(c.v1(), id++, c.v2(), ordinalOptimization, ctx.get()); + if (e.getField().getName() != null) { + fields.add(e.getField()); + valueSources.add(e); + } + rewriteBasedOnOrdinals |= e.usesOrdinals(); } - rewriteBasedOnOrdinals |= e.usesOrdinals(); } this.rewriteBasedOnOrdinals = rewriteBasedOnOrdinals; @@ -220,8 +223,8 @@ private InternalAggregation buildAggregation(long owningBucketOrdinal) throws IO return new InternalItemSetMapReduceAggregation<>(name, metadata(), mapReducer, context, null, fields, profiling); } - private static LeafReaderContext getLeafReaderForOrdinals(AggregationContext context) { + private static Optional getLeafReaderForOrdinals(AggregationContext context) { IndexReader reader = context.searcher().getIndexReader(); - return reader.leaves().get(0); + return reader.leaves().stream().findFirst(); } } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/frequent_item_sets_agg.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/frequent_item_sets_agg.yml index f5244d271abed..4a88762ddb9ea 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/frequent_item_sets_agg.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/frequent_item_sets_agg.yml @@ -540,3 +540,48 @@ setup: - match: { aggregations.fi.buckets.1.doc_count: 4 } - match: { aggregations.fi.buckets.1.support: 0.4 } - match: { aggregations.fi.buckets.1.key.error_message: ["engine overheated"] } + +--- +"Test frequent items on empty index": + - skip: + features: headers + + - do: + headers: + Authorization: "Basic eF9wYWNrX3Jlc3RfdXNlcjp4LXBhY2stdGVzdC1wYXNzd29yZA==" # run as x_pack_rest_user, i.e. the test setup superuser + indices.create: + index: unavailable-data + body: + mappings: + properties: + features: + type: keyword + error_message: + type: keyword + timestamp: + type: date + geo_point: + type: geo_point + histogram: + type: histogram + + - do: + search: + index: unavailable-data + body: > + { + "size": 0, + "aggs": { + "fi": { + "frequent_item_sets": { + "minimum_set_size": 3, + "minimum_support": 0.3, + "fields": [ + {"field": "features"}, + {"field": "error_message"} + ] + } + } + } + } + - length: { aggregations.fi.buckets: 0 } From 54ffd820056220de6f574facce4ad20b62d3eecb Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 7 Dec 2023 13:02:18 +0000 Subject: [PATCH 30/45] Reduce usage of Engine#writeLock (#102445) `Engine#writeLock` is mostly used to avoid accepting operations while the engine is closing, but is also used in a couple of other places. This looks to be for historical reasons that no longer apply, so this commit downgrades them to using `Engine#readLock`. --- .../elasticsearch/gateway/ReplicaShardAllocatorSyncIdIT.java | 2 +- .../java/org/elasticsearch/index/engine/InternalEngine.java | 4 ++-- .../org/elasticsearch/index/engine/InternalEngineTests.java | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorSyncIdIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorSyncIdIT.java index fffa0ad05496b..116a53f5dbfae 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorSyncIdIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/ReplicaShardAllocatorSyncIdIT.java @@ -97,7 +97,7 @@ void syncFlush(String syncId) throws IOException { // make sure that background merges won't happen; otherwise, IndexWriter#hasUncommittedChanges can become true again forceMerge(false, 1, false, UUIDs.randomBase64UUID()); assertNotNull(indexWriter); - try (ReleasableLock ignored = writeLock.acquire()) { + try (ReleasableLock ignored = readLock.acquire()) { assertThat(getTranslogStats().getUncommittedOperations(), equalTo(0)); Map userData = new HashMap<>(getLastCommittedSegmentInfos().userData); SequenceNumbers.CommitInfo commitInfo = SequenceNumbers.loadSeqNoInfoFromLuceneCommit(userData.entrySet()); diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 6cdd86ce6c9a7..4994eccb31d04 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -518,7 +518,7 @@ public int restoreLocalHistoryFromTranslog(TranslogRecoveryRunner translogRecove @Override public int fillSeqNoGaps(long primaryTerm) throws IOException { - try (ReleasableLock ignored = writeLock.acquire()) { + try (ReleasableLock ignored = readLock.acquire()) { ensureOpen(); final long localCheckpoint = localCheckpointTracker.getProcessedCheckpoint(); final long maxSeqNo = localCheckpointTracker.getMaxSeqNo(); @@ -1941,7 +1941,7 @@ public NoOpResult noOp(final NoOp noOp) throws IOException { } private NoOpResult innerNoOp(final NoOp noOp) throws IOException { - assert readLock.isHeldByCurrentThread() || writeLock.isHeldByCurrentThread(); + assert readLock.isHeldByCurrentThread(); assert noOp.seqNo() > SequenceNumbers.NO_OPS_PERFORMED; final long seqNo = noOp.seqNo(); try (Releasable ignored = noOpKeyedLock.acquire(seqNo)) { diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index ca3ee07de9192..25d3298f82bd7 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -1347,7 +1347,7 @@ public void testSyncedFlushVanishesOnReplay() throws IOException { } void syncFlush(IndexWriter writer, InternalEngine engine, String syncId) throws IOException { - try (ReleasableLock ignored = engine.writeLock.acquire()) { + try (ReleasableLock ignored = engine.readLock.acquire()) { Map userData = new HashMap<>(); writer.getLiveCommitData().forEach(e -> userData.put(e.getKey(), e.getValue())); userData.put(Engine.SYNC_COMMIT_ID, syncId); From 6b4d82a2b0b89bbf42244654c1fad4e692689c12 Mon Sep 17 00:00:00 2001 From: Jaime Pan <33685703+NEUpanning@users.noreply.github.com> Date: Thu, 7 Dec 2023 21:06:40 +0800 Subject: [PATCH 31/45] Prune unnecessary information from TransportNodesStatsAction.NodeStatsRequest (#102559) Relates #100878 --- docs/changelog/102559.yaml | 5 ++ .../org/elasticsearch/TransportVersions.java | 1 + .../cluster/node/stats/NodesStatsRequest.java | 9 +++- .../node/stats/TransportNodesStatsAction.java | 46 +++++++++++++++---- 4 files changed, 50 insertions(+), 11 deletions(-) create mode 100644 docs/changelog/102559.yaml diff --git a/docs/changelog/102559.yaml b/docs/changelog/102559.yaml new file mode 100644 index 0000000000000..ad0867ab087b9 --- /dev/null +++ b/docs/changelog/102559.yaml @@ -0,0 +1,5 @@ +pr: 102559 +summary: "Prune unnecessary information from TransportNodesStatsAction.NodeStatsRequest" +area: Network +type: enhancement +issues: [100878] diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index 5c19edc14075b..ad29384b16f45 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -191,6 +191,7 @@ static TransportVersion def(int id) { public static final TransportVersion MISSED_INDICES_UPDATE_EXCEPTION_ADDED = def(8_558_00_0); public static final TransportVersion INFERENCE_SERVICE_EMBEDDING_SIZE_ADDED = def(8_559_00_0); public static final TransportVersion ENRICH_ELASTICSEARCH_VERSION_REMOVED = def(8_560_00_0); + public static final TransportVersion NODE_STATS_REQUEST_SIMPLIFIED = def(8_561_00_0); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java index c19ff7ea3e46e..71c9e79ef9eed 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java @@ -47,8 +47,12 @@ public NodesStatsRequest(StreamInput in) throws IOException { * for all nodes will be returned. */ public NodesStatsRequest(String... nodesIds) { + this(new NodesStatsRequestParameters(), nodesIds); + } + + public NodesStatsRequest(NodesStatsRequestParameters nodesStatsRequestParameters, String... nodesIds) { super(nodesIds); - nodesStatsRequestParameters = new NodesStatsRequestParameters(); + this.nodesStatsRequestParameters = nodesStatsRequestParameters; } /** @@ -180,4 +184,7 @@ public void writeTo(StreamOutput out) throws IOException { nodesStatsRequestParameters.writeTo(out); } + public NodesStatsRequestParameters getNodesStatsRequestParameters() { + return nodesStatsRequestParameters; + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index 96fc30f93c890..106bad68e482e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.support.nodes.TransportNodesAction; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -27,10 +28,13 @@ import org.elasticsearch.transport.Transports; import java.io.IOException; +import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.Set; +import static org.elasticsearch.TransportVersions.NODE_STATS_REQUEST_SIMPLIFIED; + public class TransportNodesStatsAction extends TransportNodesAction< NodesStatsRequest, NodesStatsResponse, @@ -79,11 +83,11 @@ protected NodeStats newNodeResponse(StreamInput in, DiscoveryNode node) throws I protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest, Task task) { assert task instanceof CancellableTask; - NodesStatsRequest request = nodeStatsRequest.request; - Set metrics = request.requestedMetrics(); + final NodesStatsRequestParameters nodesStatsRequestParameters = nodeStatsRequest.getNodesStatsRequestParameters(); + Set metrics = nodesStatsRequestParameters.requestedMetrics(); return nodeService.stats( - request.indices(), - request.includeShardsStats(), + nodesStatsRequestParameters.indices(), + nodesStatsRequestParameters.includeShardsStats(), NodesStatsRequestParameters.Metric.OS.containedIn(metrics), NodesStatsRequestParameters.Metric.PROCESS.containedIn(metrics), NodesStatsRequestParameters.Metric.JVM.containedIn(metrics), @@ -104,16 +108,24 @@ protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest, Task task) public static class NodeStatsRequest extends TransportRequest { - // TODO don't wrap the whole top-level request, it contains heavy and irrelevant DiscoveryNode things; see #100878 - NodesStatsRequest request; + private NodesStatsRequestParameters nodesStatsRequestParameters; + private String[] nodesIds; public NodeStatsRequest(StreamInput in) throws IOException { super(in); - request = new NodesStatsRequest(in); + if (in.getTransportVersion().onOrAfter(NODE_STATS_REQUEST_SIMPLIFIED)) { + this.nodesStatsRequestParameters = new NodesStatsRequestParameters(in); + this.nodesIds = in.readStringArray(); + } else { + final NodesStatsRequest nodesStatsRequest = new NodesStatsRequest(in); + this.nodesStatsRequestParameters = nodesStatsRequest.getNodesStatsRequestParameters(); + this.nodesIds = nodesStatsRequest.nodesIds(); + } } NodeStatsRequest(NodesStatsRequest request) { - this.request = request; + this.nodesStatsRequestParameters = request.getNodesStatsRequestParameters(); + this.nodesIds = request.nodesIds(); } @Override @@ -121,7 +133,12 @@ public Task createTask(long id, String type, String action, TaskId parentTaskId, return new CancellableTask(id, type, action, "", parentTaskId, headers) { @Override public String getDescription() { - return request.getDescription(); + return Strings.format( + "nodes=%s, metrics=%s, flags=%s", + Arrays.toString(nodesIds), + nodesStatsRequestParameters.requestedMetrics().toString(), + Arrays.toString(nodesStatsRequestParameters.indices().getFlags()) + ); } }; } @@ -129,7 +146,16 @@ public String getDescription() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - request.writeTo(out); + if (out.getTransportVersion().onOrAfter(NODE_STATS_REQUEST_SIMPLIFIED)) { + this.nodesStatsRequestParameters.writeTo(out); + out.writeStringArrayNullable(nodesIds); + } else { + new NodesStatsRequest(nodesStatsRequestParameters, this.nodesIds).writeTo(out); + } + } + + public NodesStatsRequestParameters getNodesStatsRequestParameters() { + return nodesStatsRequestParameters; } } } From 6313275a8bcbd23344f057d09e322711092e0408 Mon Sep 17 00:00:00 2001 From: Jedr Blaszyk Date: Thu, 7 Dec 2023 14:33:34 +0100 Subject: [PATCH 32/45] [Connector API] QA adapt handling null values in parser/toXContent (#103107) --- .../application/connector/Connector.java | 136 ++++++------------ .../connector/action/PostConnectorAction.java | 12 +- .../connector/action/PutConnectorAction.java | 9 +- .../connector/ConnectorTestUtils.java | 6 +- .../application/connector/ConnectorTests.java | 58 ++++++++ 5 files changed, 109 insertions(+), 112 deletions(-) diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/Connector.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/Connector.java index bcb182774e758..11c5a44d45977 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/Connector.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/Connector.java @@ -39,7 +39,7 @@ * and synchronizing external data sources with Elasticsearch. Each Connector instance encapsulates * various settings and state information, including: *
    - *
  • A unique identifier for distinguishing different connectors.
  • + *
  • A doc _id of the connector document.
  • *
  • API key for authenticating with Elasticsearch, ensuring secure access.
  • *
  • A configuration mapping which holds specific settings and parameters for the connector's operation.
  • *
  • A {@link ConnectorCustomSchedule} object that defines custom scheduling.
  • @@ -65,12 +65,11 @@ public class Connector implements NamedWriteable, ToXContentObject { public static final String NAME = Connector.class.getName().toUpperCase(Locale.ROOT); + @Nullable private final String connectorId; @Nullable private final String apiKeyId; - @Nullable private final Map configuration; - @Nullable private final Map customScheduling; @Nullable private final String description; @@ -78,11 +77,9 @@ public class Connector implements NamedWriteable, ToXContentObject { private final String error; @Nullable private final ConnectorFeatures features; - @Nullable private final List filtering; @Nullable private final String indexName; - private final boolean isNative; @Nullable private final String language; @@ -94,7 +91,6 @@ public class Connector implements NamedWriteable, ToXContentObject { private final String name; @Nullable private final ConnectorIngestPipeline pipeline; - @Nullable private final ConnectorScheduling scheduling; @Nullable private final String serviceType; @@ -151,22 +147,22 @@ private Connector( ) { this.connectorId = connectorId; this.apiKeyId = apiKeyId; - this.configuration = configuration; - this.customScheduling = customScheduling; + this.configuration = Objects.requireNonNull(configuration, "[configuration] cannot be null"); + this.customScheduling = Objects.requireNonNull(customScheduling, "[custom_scheduling] cannot be null"); this.description = description; this.error = error; this.features = features; - this.filtering = filtering; - this.indexName = indexName; + this.filtering = Objects.requireNonNull(filtering, "[filtering] cannot be null"); + this.indexName = Objects.requireNonNull(indexName, "[index_name] cannot be null"); this.isNative = isNative; this.language = language; this.lastSeen = lastSeen; this.syncInfo = syncInfo; this.name = name; this.pipeline = pipeline; - this.scheduling = scheduling; + this.scheduling = Objects.requireNonNull(scheduling, "[scheduling] cannot be null"); this.serviceType = serviceType; - this.status = Objects.requireNonNull(status, "connector status cannot be null"); + this.status = Objects.requireNonNull(status, "[status] cannot be null"); this.syncCursor = syncCursor; this.syncNow = syncNow; } @@ -257,31 +253,24 @@ public Connector(StreamInput in) throws IOException { ); static { - PARSER.declareString(optionalConstructorArg(), API_KEY_ID_FIELD); - PARSER.declareField( + PARSER.declareStringOrNull(optionalConstructorArg(), API_KEY_ID_FIELD); + PARSER.declareObject( optionalConstructorArg(), (p, c) -> p.map(HashMap::new, ConnectorConfiguration::fromXContent), - CONFIGURATION_FIELD, - ObjectParser.ValueType.OBJECT + CONFIGURATION_FIELD ); - PARSER.declareField( + PARSER.declareObject( optionalConstructorArg(), (p, c) -> p.map(HashMap::new, ConnectorCustomSchedule::fromXContent), - CUSTOM_SCHEDULING_FIELD, - ObjectParser.ValueType.OBJECT - ); - PARSER.declareString(optionalConstructorArg(), DESCRIPTION_FIELD); - PARSER.declareString(optionalConstructorArg(), ERROR_FIELD); - PARSER.declareField( - optionalConstructorArg(), - (p, c) -> ConnectorFeatures.fromXContent(p), - FEATURES_FIELD, - ObjectParser.ValueType.OBJECT + CUSTOM_SCHEDULING_FIELD ); + PARSER.declareStringOrNull(optionalConstructorArg(), DESCRIPTION_FIELD); + PARSER.declareStringOrNull(optionalConstructorArg(), ERROR_FIELD); + PARSER.declareObjectOrNull(optionalConstructorArg(), (p, c) -> ConnectorFeatures.fromXContent(p), null, FEATURES_FIELD); PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> ConnectorFiltering.fromXContent(p), FILTERING_FIELD); - PARSER.declareString(optionalConstructorArg(), INDEX_NAME_FIELD); + PARSER.declareStringOrNull(optionalConstructorArg(), INDEX_NAME_FIELD); PARSER.declareBoolean(optionalConstructorArg(), IS_NATIVE_FIELD); - PARSER.declareString(optionalConstructorArg(), LANGUAGE_FIELD); + PARSER.declareStringOrNull(optionalConstructorArg(), LANGUAGE_FIELD); PARSER.declareField( optionalConstructorArg(), (p, c) -> p.currentToken() == XContentParser.Token.VALUE_NULL ? null : Instant.parse(p.text()), @@ -330,32 +319,17 @@ public Connector(StreamInput in) throws IOException { ObjectParser.ValueType.STRING_OR_NULL ); - PARSER.declareString(optionalConstructorArg(), NAME_FIELD); - PARSER.declareField( - optionalConstructorArg(), - (p, c) -> ConnectorIngestPipeline.fromXContent(p), - PIPELINE_FIELD, - ObjectParser.ValueType.OBJECT - ); - PARSER.declareField( - optionalConstructorArg(), - (p, c) -> ConnectorScheduling.fromXContent(p), - SCHEDULING_FIELD, - ObjectParser.ValueType.OBJECT - ); - PARSER.declareString(optionalConstructorArg(), SERVICE_TYPE_FIELD); + PARSER.declareStringOrNull(optionalConstructorArg(), NAME_FIELD); + PARSER.declareObjectOrNull(optionalConstructorArg(), (p, c) -> ConnectorIngestPipeline.fromXContent(p), null, PIPELINE_FIELD); + PARSER.declareObject(optionalConstructorArg(), (p, c) -> ConnectorScheduling.fromXContent(p), SCHEDULING_FIELD); + PARSER.declareStringOrNull(optionalConstructorArg(), SERVICE_TYPE_FIELD); PARSER.declareField( optionalConstructorArg(), (p, c) -> ConnectorStatus.connectorStatus(p.text()), STATUS_FIELD, ObjectParser.ValueType.STRING ); - PARSER.declareField( - optionalConstructorArg(), - (parser, context) -> parser.map(), - SYNC_CURSOR_FIELD, - ObjectParser.ValueType.OBJECT_OR_NULL - ); + PARSER.declareObjectOrNull(optionalConstructorArg(), (p, c) -> p.map(), null, SYNC_CURSOR_FIELD); PARSER.declareBoolean(optionalConstructorArg(), SYNC_NOW_FIELD); } @@ -375,59 +349,30 @@ public static Connector fromXContent(XContentParser parser, String docId) throws public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); { + // The "id": connectorId is included in GET and LIST responses to provide the connector's docID. + // Note: This ID is not written to the Elasticsearch index; it's only for API response purposes. if (connectorId != null) { builder.field(ID_FIELD.getPreferredName(), connectorId); } - if (apiKeyId != null) { - builder.field(API_KEY_ID_FIELD.getPreferredName(), apiKeyId); - } - if (configuration != null) { - builder.xContentValuesMap(CONFIGURATION_FIELD.getPreferredName(), configuration); - } - if (customScheduling != null) { - builder.xContentValuesMap(CUSTOM_SCHEDULING_FIELD.getPreferredName(), customScheduling); - } - if (description != null) { - builder.field(DESCRIPTION_FIELD.getPreferredName(), description); - } - if (error != null) { - builder.field(ERROR_FIELD.getPreferredName(), error); - } - if (features != null) { - builder.field(FEATURES_FIELD.getPreferredName(), features); - } - if (filtering != null) { - builder.xContentList(FILTERING_FIELD.getPreferredName(), filtering); - } - if (indexName != null) { - builder.field(INDEX_NAME_FIELD.getPreferredName(), indexName); - } + builder.field(API_KEY_ID_FIELD.getPreferredName(), apiKeyId); + builder.xContentValuesMap(CONFIGURATION_FIELD.getPreferredName(), configuration); + builder.xContentValuesMap(CUSTOM_SCHEDULING_FIELD.getPreferredName(), customScheduling); + builder.field(DESCRIPTION_FIELD.getPreferredName(), description); + builder.field(ERROR_FIELD.getPreferredName(), error); + builder.field(FEATURES_FIELD.getPreferredName(), features); + builder.xContentList(FILTERING_FIELD.getPreferredName(), filtering); + builder.field(INDEX_NAME_FIELD.getPreferredName(), indexName); builder.field(IS_NATIVE_FIELD.getPreferredName(), isNative); - if (language != null) { - builder.field(LANGUAGE_FIELD.getPreferredName(), language); - } + builder.field(LANGUAGE_FIELD.getPreferredName(), language); builder.field(LAST_SEEN_FIELD.getPreferredName(), lastSeen); - if (syncInfo != null) { - syncInfo.toXContent(builder, params); - } - if (name != null) { - builder.field(NAME_FIELD.getPreferredName(), name); - } - if (pipeline != null) { - builder.field(PIPELINE_FIELD.getPreferredName(), pipeline); - } - if (scheduling != null) { - builder.field(SCHEDULING_FIELD.getPreferredName(), scheduling); - } - if (serviceType != null) { - builder.field(SERVICE_TYPE_FIELD.getPreferredName(), serviceType); - } - if (syncCursor != null) { - builder.field(SYNC_CURSOR_FIELD.getPreferredName(), syncCursor); - } + syncInfo.toXContent(builder, params); + builder.field(NAME_FIELD.getPreferredName(), name); + builder.field(PIPELINE_FIELD.getPreferredName(), pipeline); + builder.field(SCHEDULING_FIELD.getPreferredName(), scheduling); + builder.field(SERVICE_TYPE_FIELD.getPreferredName(), serviceType); + builder.field(SYNC_CURSOR_FIELD.getPreferredName(), syncCursor); builder.field(STATUS_FIELD.getPreferredName(), status.toString()); builder.field(SYNC_NOW_FIELD.getPreferredName(), syncNow); - } builder.endObject(); return builder; @@ -608,7 +553,6 @@ public static class Builder { private String indexName; private boolean isNative = false; private String language; - private Instant lastSeen; private ConnectorSyncInfo syncInfo = new ConnectorSyncInfo.Builder().build(); private String name; diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/PostConnectorAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/PostConnectorAction.java index 6570b111d8a0e..947c2f63d4950 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/PostConnectorAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/PostConnectorAction.java @@ -29,6 +29,7 @@ import java.io.IOException; import java.util.Objects; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; public class PostConnectorAction extends ActionType { @@ -44,7 +45,6 @@ public static class Request extends ActionRequest implements ToXContentObject { @Nullable private final String description; - @Nullable private final String indexName; @Nullable private final Boolean isNative; @@ -67,7 +67,7 @@ public Request(String description, String indexName, Boolean isNative, String la public Request(StreamInput in) throws IOException { super(in); this.description = in.readOptionalString(); - this.indexName = in.readOptionalString(); + this.indexName = in.readString(); this.isNative = in.readOptionalBoolean(); this.language = in.readOptionalString(); this.name = in.readOptionalString(); @@ -89,7 +89,7 @@ public Request(StreamInput in) throws IOException { static { PARSER.declareString(optionalConstructorArg(), new ParseField("description")); - PARSER.declareString(optionalConstructorArg(), new ParseField("index_name")); + PARSER.declareString(constructorArg(), new ParseField("index_name")); PARSER.declareBoolean(optionalConstructorArg(), new ParseField("is_native")); PARSER.declareString(optionalConstructorArg(), new ParseField("language")); PARSER.declareString(optionalConstructorArg(), new ParseField("name")); @@ -115,9 +115,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (description != null) { builder.field("description", description); } - if (indexName != null) { - builder.field("index_name", indexName); - } + builder.field("index_name", indexName); if (isNative != null) { builder.field("is_native", isNative); } @@ -144,7 +142,7 @@ public ActionRequestValidationException validate() { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeOptionalString(description); - out.writeOptionalString(indexName); + out.writeString(indexName); out.writeOptionalBoolean(isNative); out.writeOptionalString(language); out.writeOptionalString(name); diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/PutConnectorAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/PutConnectorAction.java index 6abb5ef548be5..592be3a6b37ab 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/PutConnectorAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/connector/action/PutConnectorAction.java @@ -49,7 +49,6 @@ public static class Request extends ActionRequest implements ToXContentObject { @Nullable private final String description; - @Nullable private final String indexName; @Nullable private final Boolean isNative; @@ -82,7 +81,7 @@ public Request(StreamInput in) throws IOException { super(in); this.connectorId = in.readString(); this.description = in.readOptionalString(); - this.indexName = in.readOptionalString(); + this.indexName = in.readString(); this.isNative = in.readOptionalBoolean(); this.language = in.readOptionalString(); this.name = in.readOptionalString(); @@ -131,9 +130,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (description != null) { builder.field("description", description); } - if (indexName != null) { - builder.field("index_name", indexName); - } + builder.field("index_name", indexName); if (isNative != null) { builder.field("is_native", isNative); } @@ -168,7 +165,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(connectorId); out.writeOptionalString(description); - out.writeOptionalString(indexName); + out.writeString(indexName); out.writeOptionalBoolean(isNative); out.writeOptionalString(language); out.writeOptionalString(name); diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTestUtils.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTestUtils.java index 200b14109059b..6a16e6f183383 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTestUtils.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTestUtils.java @@ -245,15 +245,15 @@ public static Connector getRandomConnector() { .setDescription(randomFrom(new String[] { null, randomAlphaOfLength(10) })) .setError(randomFrom(new String[] { null, randomAlphaOfLength(10) })) .setFeatures(randomBoolean() ? getRandomConnectorFeatures() : null) - .setFiltering(randomBoolean() ? List.of(getRandomConnectorFiltering()) : null) - .setIndexName(randomFrom(new String[] { null, randomAlphaOfLength(10) })) + .setFiltering(List.of(getRandomConnectorFiltering())) + .setIndexName(randomAlphaOfLength(10)) .setIsNative(randomBoolean()) .setLanguage(randomFrom(new String[] { null, randomAlphaOfLength(10) })) .setLastSeen(randomFrom(new Instant[] { null, Instant.ofEpochMilli(randomLong()) })) .setSyncInfo(getRandomConnectorSyncInfo()) .setName(randomFrom(new String[] { null, randomAlphaOfLength(10) })) .setPipeline(randomBoolean() ? getRandomConnectorIngestPipeline() : null) - .setScheduling(randomBoolean() ? getRandomConnectorScheduling() : null) + .setScheduling(getRandomConnectorScheduling()) .setStatus(getRandomConnectorStatus()) .setSyncCursor(randomBoolean() ? Map.of(randomAlphaOfLengthBetween(5, 10), randomAlphaOfLengthBetween(5, 10)) : null) .setSyncNow(randomBoolean()) diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTests.java index c08cd37218aeb..9401a2a58403e 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTests.java @@ -216,6 +216,64 @@ public void testToXContent() throws IOException { assertToXContentEquivalent(originalBytes, toXContent(parsed, XContentType.JSON, humanReadable), XContentType.JSON); } + public void testToContent_WithNullValues() throws IOException { + String connectorId = "test-connector"; + String content = XContentHelper.stripWhitespace(""" + { + "api_key_id": null, + "custom_scheduling":{}, + "configuration":{}, + "description": null, + "features": null, + "filtering":[], + "index_name": "search-test", + "is_native": false, + "language": null, + "last_access_control_sync_error": null, + "last_access_control_sync_scheduled_at": null, + "last_access_control_sync_status": null, + "last_incremental_sync_scheduled_at": null, + "last_seen": null, + "last_sync_error": null, + "last_sync_scheduled_at": null, + "last_sync_status": null, + "last_synced": null, + "name": null, + "pipeline":{ + "extract_binary_content":true, + "name":"ent-search-generic-ingestion", + "reduce_whitespace":true, + "run_ml_inference":false + }, + "scheduling":{ + "access_control":{ + "enabled":false, + "interval":"0 0 0 * * ?" + }, + "full":{ + "enabled":false, + "interval":"0 0 0 * * ?" + }, + "incremental":{ + "enabled":false, + "interval":"0 0 0 * * ?" + } + }, + "service_type": null, + "status": "needs_configuration", + "sync_now":false + }"""); + + Connector connector = Connector.fromXContentBytes(new BytesArray(content), connectorId, XContentType.JSON); + boolean humanReadable = true; + BytesReference originalBytes = toShuffledXContent(connector, XContentType.JSON, ToXContent.EMPTY_PARAMS, humanReadable); + Connector parsed; + try (XContentParser parser = createParser(XContentType.JSON.xContent(), originalBytes)) { + parsed = Connector.fromXContent(parser, connectorId); + } + assertToXContentEquivalent(originalBytes, toXContent(parsed, XContentType.JSON, humanReadable), XContentType.JSON); + } + private void assertTransportSerialization(Connector testInstance) throws IOException { Connector deserializedInstance = copyInstance(testInstance); assertNotSame(testInstance, deserializedInstance); From b16ba56b322b47edf17cf677515d1da51ee5d986 Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 7 Dec 2023 14:13:01 +0000 Subject: [PATCH 33/45] Streamline listeners in ApiKeyUserRoleDescriptorResolver#resolveUserRoleDescriptors (#103121) There's no need for two layers of wrapping, and `delegateFailureAndWrap` should be more efficient here. Also since the listener is wrapped there's no need to catch specific exceptions. Also there's no need to go via `roleDescriptorsListener` if we're returning an empty set. --- .../ApiKeyUserRoleDescriptorResolver.java | 32 +++++++++---------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/ApiKeyUserRoleDescriptorResolver.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/ApiKeyUserRoleDescriptorResolver.java index 9eb82bc97270a..17c35ecca5f13 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/ApiKeyUserRoleDescriptorResolver.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/ApiKeyUserRoleDescriptorResolver.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.security.authc.support; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xpack.core.security.authc.Authentication; @@ -16,6 +15,7 @@ import org.elasticsearch.xpack.core.security.authz.support.DLSRoleQueryValidator; import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore; +import java.util.Collection; import java.util.Set; public class ApiKeyUserRoleDescriptorResolver { @@ -28,29 +28,27 @@ public ApiKeyUserRoleDescriptorResolver(CompositeRolesStore rolesStore, NamedXCo } public void resolveUserRoleDescriptors(final Authentication authentication, final ActionListener> listener) { - final ActionListener> roleDescriptorsListener = ActionListener.wrap(roleDescriptors -> { - for (RoleDescriptor rd : roleDescriptors) { - try { - DLSRoleQueryValidator.validateQueryField(rd.getIndicesPrivileges(), xContentRegistry); - } catch (ElasticsearchException | IllegalArgumentException e) { - listener.onFailure(e); - return; - } - } - listener.onResponse(roleDescriptors); - }, listener::onFailure); final Subject effectiveSubject = authentication.getEffectiveSubject(); // Retain current behaviour that User of an API key authentication has no roles if (effectiveSubject.getType() == Subject.Type.API_KEY) { - roleDescriptorsListener.onResponse(Set.of()); + listener.onResponse(Set.of()); return; } - rolesStore.getRoleDescriptorsList(effectiveSubject, ActionListener.wrap(roleDescriptorsList -> { - assert roleDescriptorsList.size() == 1; - roleDescriptorsListener.onResponse(roleDescriptorsList.iterator().next()); - }, roleDescriptorsListener::onFailure)); + rolesStore.getRoleDescriptorsList(effectiveSubject, listener.delegateFailureAndWrap(this::handleRoleDescriptorsList)); + } + + private void handleRoleDescriptorsList( + ActionListener> listener, + Collection> roleDescriptorsList + ) { + assert roleDescriptorsList.size() == 1; + final var roleDescriptors = roleDescriptorsList.iterator().next(); + for (RoleDescriptor rd : roleDescriptors) { + DLSRoleQueryValidator.validateQueryField(rd.getIndicesPrivileges(), xContentRegistry); + } + listener.onResponse(roleDescriptors); } } From 6d37f756f1652302219670cbebf3b0f3b9f3011a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tim=20R=C3=BChsen?= Date: Thu, 7 Dec 2023 15:33:18 +0100 Subject: [PATCH 34/45] [Profiling] Remove superfluous flamegraph response fields (#102849) * [Profiling] Remove superfluous flamegraph response fields The removed fields are redundant, have not yet been released and the Kibana side does not use these values. * Rebase on main --------- Co-authored-by: Elastic Machine --- .../profiling/GetFlameGraphActionIT.java | 2 - .../profiling/GetFlamegraphResponse.java | 40 ------------------- .../TransportGetFlamegraphAction.java | 14 ------- 3 files changed, 56 deletions(-) diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetFlameGraphActionIT.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetFlameGraphActionIT.java index b55ea03557cf3..4ce441285d432 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetFlameGraphActionIT.java +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetFlameGraphActionIT.java @@ -16,8 +16,6 @@ public void testGetStackTracesUnfiltered() throws Exception { assertEquals(1.0d, response.getSamplingRate(), 0.001d); assertEquals(44, response.getSelfCPU()); assertEquals(1865, response.getTotalCPU()); - assertEquals(1.3651d, response.getSelfAnnualCostsUSD(), 0.0001d); - assertEquals(0.000144890d, response.getSelfAnnualCO2Tons(), 0.000000001d); assertEquals(44, response.getTotalSamples()); } } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphResponse.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphResponse.java index 0e24d4754e2ce..211b6d76f8caa 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphResponse.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphResponse.java @@ -25,10 +25,6 @@ public class GetFlamegraphResponse extends ActionResponse implements ChunkedToXC private final double samplingRate; private final long selfCPU; private final long totalCPU; - private final double selfAnnualCO2Tons; - private final double totalAnnualCO2Tons; - private final double selfAnnualCostsUSD; - private final double totalAnnualCostsUSD; private final long totalSamples; private final List> edges; private final List fileIds; @@ -68,10 +64,6 @@ public GetFlamegraphResponse(StreamInput in) throws IOException { this.annualCostsUSDExclusive = in.readCollectionAsList(StreamInput::readDouble); this.selfCPU = in.readLong(); this.totalCPU = in.readLong(); - this.selfAnnualCO2Tons = in.readDouble(); - this.totalAnnualCO2Tons = in.readDouble(); - this.selfAnnualCostsUSD = in.readDouble(); - this.totalAnnualCostsUSD = in.readDouble(); this.totalSamples = in.readLong(); } @@ -96,10 +88,6 @@ public GetFlamegraphResponse( List annualCostsUSDExclusive, long selfCPU, long totalCPU, - double selfAnnualCO2Tons, - double totalAnnualCO2Tons, - double selfAnnualCostsUSD, - double totalAnnualCostsUSD, long totalSamples ) { this.size = size; @@ -122,10 +110,6 @@ public GetFlamegraphResponse( this.annualCostsUSDExclusive = annualCostsUSDExclusive; this.selfCPU = selfCPU; this.totalCPU = totalCPU; - this.selfAnnualCO2Tons = selfAnnualCO2Tons; - this.totalAnnualCO2Tons = totalAnnualCO2Tons; - this.selfAnnualCostsUSD = selfAnnualCostsUSD; - this.totalAnnualCostsUSD = totalAnnualCostsUSD; this.totalSamples = totalSamples; } @@ -151,10 +135,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeCollection(this.annualCostsUSDExclusive, StreamOutput::writeDouble); out.writeLong(this.selfCPU); out.writeLong(this.totalCPU); - out.writeDouble(this.selfAnnualCO2Tons); - out.writeDouble(this.totalAnnualCO2Tons); - out.writeDouble(this.selfAnnualCostsUSD); - out.writeDouble(this.totalAnnualCostsUSD); out.writeLong(this.totalSamples); } @@ -222,22 +202,6 @@ public long getTotalCPU() { return totalCPU; } - public double getSelfAnnualCostsUSD() { - return selfAnnualCostsUSD; - } - - public double getTotalAnnualCostsUSD() { - return totalAnnualCostsUSD; - } - - public double getSelfAnnualCO2Tons() { - return selfAnnualCO2Tons; - } - - public double getTotalAnnualCO2Tons() { - return totalAnnualCO2Tons; - } - public long getTotalSamples() { return totalSamples; } @@ -288,10 +252,6 @@ public Iterator toXContentChunked(ToXContent.Params params Iterators.single((b, p) -> b.field("SamplingRate", samplingRate)), Iterators.single((b, p) -> b.field("SelfCPU", selfCPU)), Iterators.single((b, p) -> b.field("TotalCPU", totalCPU)), - Iterators.single((b, p) -> b.field("SelfAnnualCO2Tons", selfAnnualCO2Tons)), - Iterators.single((b, p) -> b.field("TotalAnnualCO2Tons", totalAnnualCO2Tons)), - Iterators.single((b, p) -> b.field("SelfAnnualCostsUSD", selfAnnualCostsUSD)), - Iterators.single((b, p) -> b.field("TotalAnnualCostsUSD", totalAnnualCostsUSD)), Iterators.single((b, p) -> b.field("TotalSamples", totalSamples)), ChunkedToXContentHelper.endObject() ); diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetFlamegraphAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetFlamegraphAction.java index 5f8457e6c3b24..3cd9ded3005a2 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetFlamegraphAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetFlamegraphAction.java @@ -147,10 +147,6 @@ private static class FlamegraphBuilder { private int size = 0; private long selfCPU; private long totalCPU; - private double selfAnnualCO2Tons; - private double totalAnnualCO2Tons; - private double selfAnnualCostsUSD; - private double totalAnnualCostsUSD; // totalSamples is the total number of samples in the stacktraces private final long totalSamples; // Map: FrameGroupId -> NodeId @@ -229,10 +225,8 @@ public int addNode( this.totalCPU += samples; this.countExclusive.add(0L); this.annualCO2TonsInclusive.add(annualCO2Tons); - this.totalAnnualCO2Tons += annualCO2Tons; this.annualCO2TonsExclusive.add(0.0); this.annualCostsUSDInclusive.add(annualCostsUSD); - this.totalAnnualCostsUSD += annualCostsUSD; this.annualCostsUSDExclusive.add(0.0); if (frameGroupId != null) { this.edges.get(currentNode).put(frameGroupId, node); @@ -268,25 +262,21 @@ public void addSamplesExclusive(int nodeId, long sampleCount) { public void addAnnualCO2TonsInclusive(int nodeId, double annualCO2Tons) { Double priorAnnualCO2Tons = this.annualCO2TonsInclusive.get(nodeId); this.annualCO2TonsInclusive.set(nodeId, priorAnnualCO2Tons + annualCO2Tons); - this.totalAnnualCO2Tons += annualCO2Tons; } public void addAnnualCO2TonsExclusive(int nodeId, double annualCO2Tons) { Double priorAnnualCO2Tons = this.annualCO2TonsExclusive.get(nodeId); this.annualCO2TonsExclusive.set(nodeId, priorAnnualCO2Tons + annualCO2Tons); - this.selfAnnualCO2Tons += annualCO2Tons; } public void addAnnualCostsUSDInclusive(int nodeId, double annualCostsUSD) { Double priorAnnualCostsUSD = this.annualCostsUSDInclusive.get(nodeId); this.annualCostsUSDInclusive.set(nodeId, priorAnnualCostsUSD + annualCostsUSD); - this.totalAnnualCostsUSD += annualCostsUSD; } public void addAnnualCostsUSDExclusive(int nodeId, double annualCostsUSD) { Double priorAnnualCostsUSD = this.annualCostsUSDExclusive.get(nodeId); this.annualCostsUSDExclusive.set(nodeId, priorAnnualCostsUSD + annualCostsUSD); - this.selfAnnualCostsUSD += annualCostsUSD; } public GetFlamegraphResponse build() { @@ -311,10 +301,6 @@ public GetFlamegraphResponse build() { annualCostsUSDExclusive, selfCPU, totalCPU, - selfAnnualCO2Tons, - totalAnnualCO2Tons, - selfAnnualCostsUSD, - totalAnnualCostsUSD, totalSamples ); } From 47ff44ee5347f78cfca9c4779e4cb3060d63f8bb Mon Sep 17 00:00:00 2001 From: Kostas Krikellas <131142368+kkrik-es@users.noreply.github.com> Date: Thu, 7 Dec 2023 16:40:28 +0200 Subject: [PATCH 35/45] Mute test SingleValueQueryTests.testNotMatchSome (#103131) Related to #102997 --- .../xpack/esql/querydsl/query/SingleValueQueryTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java index f5fc643d98fe6..30de8ecae135b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQueryTests.java @@ -137,6 +137,7 @@ public void testNotMatchNone() throws IOException { ); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/102997") public void testNotMatchSome() throws IOException { int max = between(1, 100); testCase( From 8277f5a7e7eca47b7f98c53490a27ce5ef38d503 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Thu, 7 Dec 2023 15:15:40 +0000 Subject: [PATCH 36/45] [ML] Wait for the model download task to be created (#103063) In response to the PUT model call wait ensure the download task is created. --- .../action/TransportLoadTrainedModelPackage.java | 2 +- .../xpack/ml/action/TransportPutTrainedModelAction.java | 9 +-------- .../rest-api-spec/test/ml/3rd_party_deployment.yml | 3 --- 3 files changed, 2 insertions(+), 12 deletions(-) diff --git a/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackage.java b/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackage.java index b61b87e4a8139..2827255874224 100644 --- a/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackage.java +++ b/x-pack/plugin/ml-package-loader/src/main/java/org/elasticsearch/xpack/ml/packageloader/action/TransportLoadTrainedModelPackage.java @@ -105,7 +105,7 @@ protected void masterOperation(Task task, Request request, ClusterState state, A .execute(() -> importModel(client, taskManager, request, modelImporter, listener, downloadTask)); } catch (Exception e) { taskManager.unregister(downloadTask); - throw e; + listener.onFailure(e); } if (request.isWaitForCompletion() == false) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelAction.java index 242d5e00f0ec7..7462b6cd918aa 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelAction.java @@ -476,15 +476,8 @@ private void triggerModelFetchIfNecessary( client.execute( LoadTrainedModelPackageAction.INSTANCE, new LoadTrainedModelPackageAction.Request(modelId, modelPackageConfig, waitForCompletion), - ActionListener.wrap(ack -> { - if (waitForCompletion) { - listener.onResponse(null); - } - }, listener::onFailure) + ActionListener.wrap(ack -> listener.onResponse(null), listener::onFailure) ); - if (waitForCompletion == false) { - listener.onResponse(null); - } } private void resolvePackageConfig(String modelId, ActionListener listener) { diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/3rd_party_deployment.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/3rd_party_deployment.yml index af3ecd2637843..2765a69d0819c 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/3rd_party_deployment.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/ml/3rd_party_deployment.yml @@ -218,9 +218,6 @@ setup: --- "Test start deployment fails while model download in progress": - - skip: - version: "all" - reason: "Awaits fix: https://github.com/elastic/elasticsearch/issues/102948" - do: ml.put_trained_model: model_id: .elser_model_2 From 763ef56be8f0759a6619e5e6879d46cb5973d1e9 Mon Sep 17 00:00:00 2001 From: Tim Grein Date: Thu, 7 Dec 2023 16:44:43 +0100 Subject: [PATCH 37/45] [Connectors API] Check that the deleted sync job doesn't exist anymore (#103129) Extend the delete sync job integration tests, so they're checking, that the deleted sync job doesn't exist anymore. --- .../test/entsearch/410_connector_sync_job_delete.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/410_connector_sync_job_delete.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/410_connector_sync_job_delete.yml index 67fea7fc0cd3c..4bec70ca77e0c 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/410_connector_sync_job_delete.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/410_connector_sync_job_delete.yml @@ -20,13 +20,19 @@ setup: id: test-connector job_type: full trigger_method: on_demand + - set: { id: sync-job-id-to-delete } + - do: connector_sync_job.delete: connector_sync_job_id: $sync-job-id-to-delete - match: { acknowledged: true } + - do: + connector_sync_job.get: + connector_sync_job_id: $sync-job-id-to-delete + catch: missing --- "Delete Connector Sync Job - Connector Sync Job does not exist": From a721ed8cf71ccb3978943c259a03c604a68b8ce3 Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Thu, 7 Dec 2023 16:45:49 +0100 Subject: [PATCH 38/45] [Profiling] Query in parallel only if beneficial (#103061) With this commit we check index allocation before we do key-value lookups. To reduce latency, key-value lookups are done in parallel for multiple slices of data. However, on nodes with spinning disks, parallel accesses are harmful. Therefore, we check whether any index is allocated either to the warm or cold tier (which are usually on spinning disks) and disable parallel key-value lookups. This has improved latency on the warm tier by about 10% in our experiments. --- docs/changelog/103061.yaml | 5 + .../xpack/profiling/IndexAllocation.java | 60 +++++++++ .../TransportGetStackTracesAction.java | 22 +++- .../xpack/profiling/IndexAllocationTests.java | 122 ++++++++++++++++++ .../TransportGetStackTracesActionTests.java | 7 + 5 files changed, 211 insertions(+), 5 deletions(-) create mode 100644 docs/changelog/103061.yaml create mode 100644 x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/IndexAllocation.java create mode 100644 x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/IndexAllocationTests.java diff --git a/docs/changelog/103061.yaml b/docs/changelog/103061.yaml new file mode 100644 index 0000000000000..558429493ac6f --- /dev/null +++ b/docs/changelog/103061.yaml @@ -0,0 +1,5 @@ +pr: 103061 +summary: "[Profiling] Query in parallel only if beneficial" +area: Application +type: bug +issues: [] diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/IndexAllocation.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/IndexAllocation.java new file mode 100644 index 0000000000000..701b2d8d8728d --- /dev/null +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/IndexAllocation.java @@ -0,0 +1,60 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.profiling; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.allocation.DataTier; +import org.elasticsearch.index.Index; + +import java.util.List; +import java.util.function.Predicate; + +final class IndexAllocation { + private IndexAllocation() { + // no instances intended + } + + static boolean isAnyAssignedToNode(ClusterState state, List indices, Predicate nodePredicate) { + for (Index index : indices) { + IndexMetadata metadata = state.getMetadata().index(index); + if (metadata == null) { + continue; + } + IndexRoutingTable routingTable = state.routingTable().index(index); + if (routingTable == null) { + continue; + } + for (ShardRouting shardRouting : routingTable.randomAllActiveShardsIt()) { + if (shardRouting.assignedToNode() == false) { + continue; + } + DiscoveryNode assignedNode = state.nodes().get(shardRouting.currentNodeId()); + if (nodePredicate.test(assignedNode)) { + return true; + } + } + } + return false; + } + + /** + * Determines whether any of the provided indices is allocated to the warm or cold tier. Machines on these + * tiers usually use spinning disks. + * + * @param state Current cluster state. + * @param indices A list of indices to check. + * @return true iff at least one index is allocated to either a warm or cold data node. + */ + static boolean isAnyOnWarmOrColdTier(ClusterState state, List indices) { + return isAnyAssignedToNode(state, indices, n -> DataTier.isWarmNode(n) || DataTier.isColdNode(n)); + } +} diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java index 27feb8cc9e22a..000b448696985 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java @@ -401,9 +401,12 @@ private void retrieveStackTraces( return; } List eventIds = new ArrayList<>(responseBuilder.getStackTraceEvents().keySet()); - List> slicedEventIds = sliced(eventIds, desiredSlices); ClusterState clusterState = clusterService.state(); List indices = resolver.resolve(clusterState, "profiling-stacktraces", responseBuilder.getStart(), responseBuilder.getEnd()); + // Avoid parallelism if there is potential we are on spinning disks (frozen tier uses searchable snapshots) + int sliceCount = IndexAllocation.isAnyOnWarmOrColdTier(clusterState, indices) ? 1 : desiredSlices; + log.trace("Using [{}] slice(s) to lookup stacktraces.", sliceCount); + List> slicedEventIds = sliced(eventIds, sliceCount); // Build a set of unique host IDs. Set uniqueHostIDs = new HashSet<>(responseBuilder.getHostEventCounts().size()); @@ -457,7 +460,7 @@ private void retrieveStackTraces( // package private for testing static List> sliced(List c, int slices) { - if (c.size() <= slices) { + if (c.size() <= slices || slices == 1) { return List.of(c); } List> slicedList = new ArrayList<>(); @@ -621,9 +624,6 @@ private void retrieveStackTraceDetails( if (mayNotifyOfCancellation(submitTask, submitListener)) { return; } - - List> slicedStackFrameIds = sliced(stackFrameIds, desiredDetailSlices); - List> slicedExecutableIds = sliced(executableIds, desiredDetailSlices); List stackFrameIndices = resolver.resolve( clusterState, "profiling-stackframes", @@ -636,6 +636,18 @@ private void retrieveStackTraceDetails( responseBuilder.getStart(), responseBuilder.getEnd() ); + // Avoid parallelism if there is potential we are on spinning disks (frozen tier uses searchable snapshots) + int stackFrameSliceCount = IndexAllocation.isAnyOnWarmOrColdTier(clusterState, stackFrameIndices) ? 1 : desiredDetailSlices; + int executableSliceCount = IndexAllocation.isAnyOnWarmOrColdTier(clusterState, executableIndices) ? 1 : desiredDetailSlices; + log.trace( + "Using [{}] slice(s) to lookup stack frames and [{}] slice(s) to lookup executables.", + stackFrameSliceCount, + executableSliceCount + ); + + List> slicedStackFrameIds = sliced(stackFrameIds, stackFrameSliceCount); + List> slicedExecutableIds = sliced(executableIds, executableSliceCount); + DetailsHandler handler = new DetailsHandler( responseBuilder, submitListener, diff --git a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/IndexAllocationTests.java b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/IndexAllocationTests.java new file mode 100644 index 0000000000000..852790e219a2d --- /dev/null +++ b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/IndexAllocationTests.java @@ -0,0 +1,122 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.profiling; + +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlocks; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodeRole; +import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.RecoverySource; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.test.ESTestCase; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; + +public class IndexAllocationTests extends ESTestCase { + private final Index hot = idx("hot"); + private final Index warm = idx("warm"); + private final Index cold = idx("cold"); + private final Index frozen = idx("frozen"); + + public void testEmptyIndicesNotOnWarmColdTier() { + assertFalse(IndexAllocation.isAnyOnWarmOrColdTier(clusterState(), Collections.emptyList())); + } + + public void testOtherIndicesNotOnWarmColdTier() { + assertFalse(IndexAllocation.isAnyOnWarmOrColdTier(clusterState(), List.of(hot, frozen))); + } + + public void testIndicesOnWarmColdTier() { + assertTrue(IndexAllocation.isAnyOnWarmOrColdTier(clusterState(), List.of(warm))); + assertTrue(IndexAllocation.isAnyOnWarmOrColdTier(clusterState(), List.of(cold))); + } + + public void testMixedIndicesOnWarmColdTier() { + assertTrue(IndexAllocation.isAnyOnWarmOrColdTier(clusterState(), List.of(hot, warm))); + assertTrue(IndexAllocation.isAnyOnWarmOrColdTier(clusterState(), List.of(frozen, cold))); + } + + /** + * Creates a cluster state that represents several indices: + * + *
      + *
    • hot assigned to a hot-tier node named n-hot
    • + *
    • warm assigned to a warm-tier node named n-warm
    • + *
    • cold assigned to a cold-tier node named n-cold
    • + *
    • frozen assigned to a frozen-tier node named n-frozen
    • + *
    + */ + private ClusterState clusterState() { + DiscoveryNode node = DiscoveryNodeUtils.create("node"); + DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder().localNodeId("node").masterNodeId("node").add(node); + + nodesBuilder.add(DiscoveryNodeUtils.builder("n-" + hot.getName()).roles(Set.of(DiscoveryNodeRole.DATA_HOT_NODE_ROLE)).build()); + nodesBuilder.add(DiscoveryNodeUtils.builder("n-" + warm.getName()).roles(Set.of(DiscoveryNodeRole.DATA_WARM_NODE_ROLE)).build()); + nodesBuilder.add(DiscoveryNodeUtils.builder("n-" + cold.getName()).roles(Set.of(DiscoveryNodeRole.DATA_COLD_NODE_ROLE)).build()); + nodesBuilder.add( + DiscoveryNodeUtils.builder("n-" + frozen.getName()).roles(Set.of(DiscoveryNodeRole.DATA_FROZEN_NODE_ROLE)).build() + ); + + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + Map indices = new HashMap<>(); + for (Index index : List.of(hot, warm, cold, frozen)) { + indices.put(index.getName(), metadata(index)); + ShardRouting shardRouting = ShardRouting.newUnassigned( + new ShardId(index, 0), + true, + RecoverySource.ExistingStoreRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, ""), + ShardRouting.Role.DEFAULT + ); + + shardRouting = shardRouting.initialize("n-" + index.getName(), null, 0).moveToStarted(0); + routingTableBuilder.add( + IndexRoutingTable.builder(index) + .addIndexShard(IndexShardRoutingTable.builder(shardRouting.shardId()).addShard(shardRouting)) + ); + } + + return ClusterState.builder(new ClusterName("test")) + .metadata(Metadata.builder().indices(indices).build()) + .blocks(new ClusterBlocks.Builder().build()) + .nodes(nodesBuilder) + .routingTable(routingTableBuilder) + .build(); + } + + private IndexMetadata metadata(Index index) { + final Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) + .build(); + return IndexMetadata.builder(index.getName()).settings(settings).numberOfShards(1).numberOfReplicas(0).build(); + } + + private Index idx(String name) { + return new Index(name, UUID.randomUUID().toString()); + } + +} diff --git a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesActionTests.java b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesActionTests.java index bf4c15d8d1ed5..2eccfb45f5958 100644 --- a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesActionTests.java +++ b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesActionTests.java @@ -17,6 +17,13 @@ public void testSliceEmptyList() { assertEquals(List.of(List.of()), TransportGetStackTracesAction.sliced(Collections.emptyList(), 4)); } + public void testSingleSlice() { + List input = randomList(2, 5, () -> randomAlphaOfLength(3)); + List> sliced = TransportGetStackTracesAction.sliced(input, 1); + assertEquals(1, sliced.size()); + assertEquals(input, sliced.get(0)); + } + public void testSliceListSmallerOrEqualToSliceCount() { int slices = 7; List input = randomList(0, slices, () -> randomAlphaOfLength(3)); From 47625de9e60b21246390dcb2fdb42e95b9fe514c Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Thu, 7 Dec 2023 08:56:24 -0700 Subject: [PATCH 39/45] Use latest version of entsearch ingestion pipeline (#103087) * Use latest version of entsearch ingestion pipeline This pipeline accidentally had a hardcoded version of "1", instead of using the registry version (like the other assets for this plugin). This led to lots of lines such as: ``` [2023-12-06T14:16:00,593][INFO ][o.e.x.c.t.IndexTemplateRegistry] [runTask-0] adding ingest pipeline ent-search-generic-ingestion [2023-12-06T14:16:00,633][INFO ][o.e.x.c.t.IndexTemplateRegistry] [runTask-0] upgrading ingest pipeline [ent-search-generic-ingestion] for [enterprise_search] from version [1] to version [3] [2023-12-06T14:16:00,634][INFO ][o.e.x.c.t.IndexTemplateRegistry] [runTask-0] adding ingest pipeline ent-search-generic-ingestion [2023-12-06T14:16:00,680][INFO ][o.e.x.c.t.IndexTemplateRegistry] [runTask-0] upgrading ingest pipeline [ent-search-generic-ingestion] for [enterprise_search] from version [1] to version [3] [2023-12-06T14:16:00,681][INFO ][o.e.x.c.t.IndexTemplateRegistry] [runTask-0] adding ingest pipeline ent-search-generic-ingestion [2023-12-06T14:16:00,706][INFO ][o.e.x.c.t.IndexTemplateRegistry] [runTask-0] upgrading ingest pipeline [ent-search-generic-ingestion] for [enterprise_search] from version [1] to version [3] [2023-12-06T14:16:00,707][INFO ][o.e.x.c.t.IndexTemplateRegistry] [runTask-0] adding ingest pipeline ent-search-generic-ingestion [2023-12-06T14:16:00,731][INFO ][o.e.x.c.t.IndexTemplateRegistry] [runTask-0] upgrading ingest pipeline [ent-search-generic-ingestion] for [enterprise_search] from version [1] to version [3] [2023-12-06T14:16:00,732][INFO ][o.e.x.c.t.IndexTemplateRegistry] [runTask-0] adding ingest pipeline ent-search-generic-ingestion etc etc etc ``` Because the pipeline was installed at version 1, and then immediately "upgraded" to version 3, despite no changes. Relates to #97463 --- docs/changelog/103087.yaml | 5 +++++ .../main/resources/entsearch/generic_ingestion_pipeline.json | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/103087.yaml diff --git a/docs/changelog/103087.yaml b/docs/changelog/103087.yaml new file mode 100644 index 0000000000000..5824bc53edb8d --- /dev/null +++ b/docs/changelog/103087.yaml @@ -0,0 +1,5 @@ +pr: 103087 +summary: Use latest version of entsearch ingestion pipeline +area: Application +type: bug +issues: [] diff --git a/x-pack/plugin/core/template-resources/src/main/resources/entsearch/generic_ingestion_pipeline.json b/x-pack/plugin/core/template-resources/src/main/resources/entsearch/generic_ingestion_pipeline.json index f66789d25e5f5..e2a2cbd460117 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/entsearch/generic_ingestion_pipeline.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/entsearch/generic_ingestion_pipeline.json @@ -1,5 +1,5 @@ { - "version": 1, + "version": ${xpack.application.connector.template.version}, "description": "Generic Enterprise Search ingest pipeline", "_meta": { "managed_by": "Enterprise Search", From b510e59c67ba8ddb07100419fccb86d6da2dc141 Mon Sep 17 00:00:00 2001 From: Chris Hegarty <62058229+ChrisHegarty@users.noreply.github.com> Date: Thu, 7 Dec 2023 16:02:53 +0000 Subject: [PATCH 40/45] Add JIT compiler excludes for computeCommonPrefixLengthAndBuildHistogram (#103112) This commit adds a temporary JIT compile command that excludes the compilation of a couple of Lucene methods which, if compiled, crash the JVM. --- .../gradle/internal/ElasticsearchTestBasePlugin.java | 5 ++++- distribution/src/config/jvm.options | 4 ++++ docs/changelog/103112.yaml | 5 +++++ 3 files changed, 13 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/103112.yaml diff --git a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java index 31b62c4ac700f..f1804064b7e07 100644 --- a/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java +++ b/build-tools-internal/src/main/java/org/elasticsearch/gradle/internal/ElasticsearchTestBasePlugin.java @@ -108,7 +108,10 @@ public void execute(Task t) { "--add-opens=java.base/java.nio.file=ALL-UNNAMED", "--add-opens=java.base/java.time=ALL-UNNAMED", "--add-opens=java.management/java.lang.management=ALL-UNNAMED", - "-XX:+HeapDumpOnOutOfMemoryError" + "-XX:+HeapDumpOnOutOfMemoryError", + // REMOVE once bumped to a JDK greater than 21.0.1, https://github.com/elastic/elasticsearch/issues/103004 + "-XX:CompileCommand=exclude,org.apache.lucene.util.MSBRadixSorter::computeCommonPrefixLengthAndBuildHistogram", + "-XX:CompileCommand=exclude,org.apache.lucene.util.RadixSelector::computeCommonPrefixLengthAndBuildHistogram" ); test.getJvmArgumentProviders().add(new SimpleCommandLineArgumentProvider("-XX:HeapDumpPath=" + heapdumpDir)); diff --git a/distribution/src/config/jvm.options b/distribution/src/config/jvm.options index c5e905f461f45..9e26582d58439 100644 --- a/distribution/src/config/jvm.options +++ b/distribution/src/config/jvm.options @@ -58,6 +58,10 @@ # result in less optimal vector performance 20-:--add-modules=jdk.incubator.vector +# REMOVE once bumped to a JDK greater than 21.0.1, https://github.com/elastic/elasticsearch/issues/103004 +19-21:-XX:CompileCommand=exclude,org.apache.lucene.util.MSBRadixSorter::computeCommonPrefixLengthAndBuildHistogram +19-21:-XX:CompileCommand=exclude,org.apache.lucene.util.RadixSelector::computeCommonPrefixLengthAndBuildHistogram + ## heap dumps # generate a heap dump when an allocation from the Java heap fails; heap dumps diff --git a/docs/changelog/103112.yaml b/docs/changelog/103112.yaml new file mode 100644 index 0000000000000..dcb4cf604c179 --- /dev/null +++ b/docs/changelog/103112.yaml @@ -0,0 +1,5 @@ +pr: 103112 +summary: Add JIT compiler excludes for `computeCommonPrefixLengthAndBuildHistogram` +area: Search +type: bug +issues: [] From 5b0b74108a06666471b873cebb40e044818e01f2 Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 7 Dec 2023 16:09:50 +0000 Subject: [PATCH 41/45] AwaitsFix for #102987 --- .../esql/expression/function/scalar/convert/ToDegreesTests.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDegreesTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDegreesTests.java index a1c3c1f38aac5..b3e0c65f0c8f8 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDegreesTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/ToDegreesTests.java @@ -10,6 +10,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; @@ -22,6 +23,7 @@ import java.util.function.Function; import java.util.function.Supplier; +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/102987") public class ToDegreesTests extends AbstractFunctionTestCase { public ToDegreesTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); From 3807cb81340d803884db03ce7a737bd343649353 Mon Sep 17 00:00:00 2001 From: Brian Seeders Date: Thu, 7 Dec 2023 11:10:13 -0500 Subject: [PATCH 42/45] Bump versions after 8.11.2 release --- .buildkite/pipelines/intake.yml | 2 +- .buildkite/pipelines/periodic-packaging.yml | 16 ++++++++++++++++ .buildkite/pipelines/periodic.yml | 10 ++++++++++ .ci/bwcVersions | 1 + .ci/snapshotBwcVersions | 2 +- .../src/main/java/org/elasticsearch/Version.java | 1 + 6 files changed, 30 insertions(+), 2 deletions(-) diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml index b6bbc62e6bc0e..a200e871ec8e6 100644 --- a/.buildkite/pipelines/intake.yml +++ b/.buildkite/pipelines/intake.yml @@ -48,7 +48,7 @@ steps: timeout_in_minutes: 300 matrix: setup: - BWC_VERSION: ["7.17.16", "8.11.2", "8.12.0", "8.13.0"] + BWC_VERSION: ["7.17.16", "8.11.3", "8.12.0", "8.13.0"] agents: provider: gcp image: family/elasticsearch-ubuntu-2004 diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml index c0e51b609faee..d397039128457 100644 --- a/.buildkite/pipelines/periodic-packaging.yml +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -1745,6 +1745,22 @@ steps: env: BWC_VERSION: 8.11.2 + - label: "{{matrix.image}} / 8.11.3 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.11.3 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.11.3 + - label: "{{matrix.image}} / 8.12.0 / packaging-tests-upgrade" command: ./.ci/scripts/packaging-test.sh -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.12.0 timeout_in_minutes: 300 diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 9fb66a8062ab2..248bfd52742d7 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -1072,6 +1072,16 @@ steps: buildDirectory: /dev/shm/bk env: BWC_VERSION: 8.11.2 + - label: 8.11.3 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.11.3#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: n1-standard-32 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.11.3 - label: 8.12.0 / bwc command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.12.0#bwcTest timeout_in_minutes: 300 diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 0c29d210149bc..de951a9e6fc24 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -106,5 +106,6 @@ BWC_VERSION: - "8.11.0" - "8.11.1" - "8.11.2" + - "8.11.3" - "8.12.0" - "8.13.0" diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index 6fbe04325c898..9df1c097ac941 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,5 +1,5 @@ BWC_VERSION: - "7.17.16" - - "8.11.2" + - "8.11.3" - "8.12.0" - "8.13.0" diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 2ecc9703b2398..3d6995bd9e90f 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -157,6 +157,7 @@ public class Version implements VersionId, ToXContentFragment { public static final Version V_8_11_0 = new Version(8_11_00_99); public static final Version V_8_11_1 = new Version(8_11_01_99); public static final Version V_8_11_2 = new Version(8_11_02_99); + public static final Version V_8_11_3 = new Version(8_11_03_99); public static final Version V_8_12_0 = new Version(8_12_00_99); public static final Version V_8_13_0 = new Version(8_13_00_99); public static final Version CURRENT = V_8_13_0; From c7406ed09314f7fc90f22d40c175381c6ff42064 Mon Sep 17 00:00:00 2001 From: Brian Seeders Date: Thu, 7 Dec 2023 11:13:07 -0500 Subject: [PATCH 43/45] Prune changelogs after 8.11.2 release --- docs/changelog/100986.yaml | 6 ------ docs/changelog/101915.yaml | 5 ----- docs/changelog/102057.yaml | 6 ------ docs/changelog/102114.yaml | 6 ------ docs/changelog/102151.yaml | 5 ----- docs/changelog/102220.yaml | 5 ----- docs/changelog/102230.yaml | 6 ------ docs/changelog/102240.yaml | 5 ----- docs/changelog/102250.yaml | 6 ------ docs/changelog/102259.yaml | 5 ----- docs/changelog/102281.yaml | 5 ----- docs/changelog/102282.yaml | 6 ------ docs/changelog/102311.yaml | 5 ----- docs/changelog/102396.yaml | 5 ----- docs/changelog/102399.yaml | 6 ------ docs/changelog/102467.yaml | 6 ------ docs/changelog/102492.yaml | 5 ----- docs/changelog/102580.yaml | 6 ------ docs/changelog/102599.yaml | 5 ----- docs/changelog/102715.yaml | 6 ------ docs/changelog/102716.yaml | 5 ----- docs/changelog/102779.yaml | 5 ----- docs/changelog/102821.yaml | 5 ----- docs/changelog/102831.yaml | 9 --------- docs/changelog/102891.yaml | 7 ------- docs/changelog/102934.yaml | 6 ------ 26 files changed, 147 deletions(-) delete mode 100644 docs/changelog/100986.yaml delete mode 100644 docs/changelog/101915.yaml delete mode 100644 docs/changelog/102057.yaml delete mode 100644 docs/changelog/102114.yaml delete mode 100644 docs/changelog/102151.yaml delete mode 100644 docs/changelog/102220.yaml delete mode 100644 docs/changelog/102230.yaml delete mode 100644 docs/changelog/102240.yaml delete mode 100644 docs/changelog/102250.yaml delete mode 100644 docs/changelog/102259.yaml delete mode 100644 docs/changelog/102281.yaml delete mode 100644 docs/changelog/102282.yaml delete mode 100644 docs/changelog/102311.yaml delete mode 100644 docs/changelog/102396.yaml delete mode 100644 docs/changelog/102399.yaml delete mode 100644 docs/changelog/102467.yaml delete mode 100644 docs/changelog/102492.yaml delete mode 100644 docs/changelog/102580.yaml delete mode 100644 docs/changelog/102599.yaml delete mode 100644 docs/changelog/102715.yaml delete mode 100644 docs/changelog/102716.yaml delete mode 100644 docs/changelog/102779.yaml delete mode 100644 docs/changelog/102821.yaml delete mode 100644 docs/changelog/102831.yaml delete mode 100644 docs/changelog/102891.yaml delete mode 100644 docs/changelog/102934.yaml diff --git a/docs/changelog/100986.yaml b/docs/changelog/100986.yaml deleted file mode 100644 index 3920e2ef78d6a..0000000000000 --- a/docs/changelog/100986.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 100986 -summary: Synchronize Coordinator#onClusterStateApplied -area: Cluster Coordination -type: bug -issues: - - 99023 diff --git a/docs/changelog/101915.yaml b/docs/changelog/101915.yaml deleted file mode 100644 index aed7ca62021a5..0000000000000 --- a/docs/changelog/101915.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 101915 -summary: Add inference counts by model to the machine learning usage stats -area: Machine Learning -type: enhancement -issues: [] diff --git a/docs/changelog/102057.yaml b/docs/changelog/102057.yaml deleted file mode 100644 index d5b664ba14c29..0000000000000 --- a/docs/changelog/102057.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102057 -summary: Simplify `BlobStoreRepository` idle check -area: Snapshot/Restore -type: bug -issues: - - 101948 diff --git a/docs/changelog/102114.yaml b/docs/changelog/102114.yaml deleted file mode 100644 index a08389da0351b..0000000000000 --- a/docs/changelog/102114.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102114 -summary: Fix double-completion in `SecurityUsageTransportAction` -area: Security -type: bug -issues: - - 102111 diff --git a/docs/changelog/102151.yaml b/docs/changelog/102151.yaml deleted file mode 100644 index 652ae555af97d..0000000000000 --- a/docs/changelog/102151.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102151 -summary: Default `run_ml_inference` should be true -area: Application -type: bug -issues: [] diff --git a/docs/changelog/102220.yaml b/docs/changelog/102220.yaml deleted file mode 100644 index d24dab1f91b31..0000000000000 --- a/docs/changelog/102220.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102220 -summary: Upgrade xmlsec to 2.3.4 -area: Security -type: enhancement -issues: [] diff --git a/docs/changelog/102230.yaml b/docs/changelog/102230.yaml deleted file mode 100644 index 20e8d8d1f10a6..0000000000000 --- a/docs/changelog/102230.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102230 -summary: Set region for the STS client via privileged calls in AWS SDK -area: Snapshot/Restore -type: bug -issues: - - 102173 diff --git a/docs/changelog/102240.yaml b/docs/changelog/102240.yaml deleted file mode 100644 index 5df0046ee92fc..0000000000000 --- a/docs/changelog/102240.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102240 -summary: Exclude stack traces from transform audit messages and health -area: Transform -type: bug -issues: [] diff --git a/docs/changelog/102250.yaml b/docs/changelog/102250.yaml deleted file mode 100644 index 755341d9a3a64..0000000000000 --- a/docs/changelog/102250.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102250 -summary: "[ILM] Fix downsample to skip already downsampled indices" -area: ILM+SLM -type: bug -issues: - - 102249 diff --git a/docs/changelog/102259.yaml b/docs/changelog/102259.yaml deleted file mode 100644 index 3d8a1c6381f6d..0000000000000 --- a/docs/changelog/102259.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102259 -summary: "[Usage API] Count all the data streams that have lifecycle" -area: Data streams -type: bug -issues: [] diff --git a/docs/changelog/102281.yaml b/docs/changelog/102281.yaml deleted file mode 100644 index ac6c17591e013..0000000000000 --- a/docs/changelog/102281.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102281 -summary: Improve failure handling in `ContinuousComputation` -area: Allocation -type: bug -issues: [] diff --git a/docs/changelog/102282.yaml b/docs/changelog/102282.yaml deleted file mode 100644 index 4860d70f99ccc..0000000000000 --- a/docs/changelog/102282.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102282 -summary: "ES|QL: Fix drop of renamed grouping" -area: ES|QL -type: bug -issues: - - 102121 diff --git a/docs/changelog/102311.yaml b/docs/changelog/102311.yaml deleted file mode 100644 index bb1769527fdd4..0000000000000 --- a/docs/changelog/102311.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102311 -summary: Upgrade reactor netty http version -area: Snapshot/Restore -type: upgrade -issues: [] diff --git a/docs/changelog/102396.yaml b/docs/changelog/102396.yaml deleted file mode 100644 index 9ea53ca5b6840..0000000000000 --- a/docs/changelog/102396.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102396 -summary: Add more logging to the real memory circuit breaker and lower minimum interval -area: "Infra/Circuit Breakers" -type: bug -issues: [] diff --git a/docs/changelog/102399.yaml b/docs/changelog/102399.yaml deleted file mode 100644 index 7a4e1ff7ddab6..0000000000000 --- a/docs/changelog/102399.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102399 -summary: "ES|QL: Fix layout management for Project" -area: ES|QL -type: bug -issues: - - 102120 diff --git a/docs/changelog/102467.yaml b/docs/changelog/102467.yaml deleted file mode 100644 index 580788a5aa2f9..0000000000000 --- a/docs/changelog/102467.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102467 -summary: Fix dense_vector cluster stats indexed_vector_dim_min/max values -area: Mapping -type: bug -issues: - - 102416 diff --git a/docs/changelog/102492.yaml b/docs/changelog/102492.yaml deleted file mode 100644 index 943d82873e0b6..0000000000000 --- a/docs/changelog/102492.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102492 -summary: Ensure datafeed previews with no start or end time don't search the cold or frozen tiers -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/102580.yaml b/docs/changelog/102580.yaml deleted file mode 100644 index 50d315efd7071..0000000000000 --- a/docs/changelog/102580.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102580 -summary: Fix DISSECT with empty patterns -area: ES|QL -type: bug -issues: - - 102577 diff --git a/docs/changelog/102599.yaml b/docs/changelog/102599.yaml deleted file mode 100644 index 74e3d89421463..0000000000000 --- a/docs/changelog/102599.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102599 -summary: "Recreate the Elasticsearch private temporary directory if it doesn't exist when an ML job is opened" -area: Machine Learning -type: bug -issues: [] diff --git a/docs/changelog/102715.yaml b/docs/changelog/102715.yaml deleted file mode 100644 index 7311db66ce151..0000000000000 --- a/docs/changelog/102715.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102715 -summary: Fix leaking blocks in TopN -area: ES|QL -type: bug -issues: - - 102646 diff --git a/docs/changelog/102716.yaml b/docs/changelog/102716.yaml deleted file mode 100644 index 39317fdb38415..0000000000000 --- a/docs/changelog/102716.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102716 -summary: Fix leaking blocks in `BlockUtils` -area: ES|QL -type: bug -issues: [] diff --git a/docs/changelog/102779.yaml b/docs/changelog/102779.yaml deleted file mode 100644 index 7bbecb29665bd..0000000000000 --- a/docs/changelog/102779.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102779 -summary: Allow mismatched sort-by field types if there are no docs to sort -area: Search -type: bug -issues: [] diff --git a/docs/changelog/102821.yaml b/docs/changelog/102821.yaml deleted file mode 100644 index dcd6721621878..0000000000000 --- a/docs/changelog/102821.yaml +++ /dev/null @@ -1,5 +0,0 @@ -pr: 102821 -summary: Better processor stat merge -area: Ingest Node -type: bug -issues: [] diff --git a/docs/changelog/102831.yaml b/docs/changelog/102831.yaml deleted file mode 100644 index fb99b0c7f732b..0000000000000 --- a/docs/changelog/102831.yaml +++ /dev/null @@ -1,9 +0,0 @@ -pr: 102831 -summary: Fix memory tracking in TopN.Row -area: ES|QL -type: bug -issues: - - 100640 - - 102784 - - 102790 - - 102683 diff --git a/docs/changelog/102891.yaml b/docs/changelog/102891.yaml deleted file mode 100644 index c5d5ed8c6758e..0000000000000 --- a/docs/changelog/102891.yaml +++ /dev/null @@ -1,7 +0,0 @@ -pr: 102891 -summary: "[Query Rules] Fix bug where combining the same metadata with text/numeric\ - \ values leads to error" -area: Application -type: bug -issues: - - 102827 diff --git a/docs/changelog/102934.yaml b/docs/changelog/102934.yaml deleted file mode 100644 index 4f61427506cf3..0000000000000 --- a/docs/changelog/102934.yaml +++ /dev/null @@ -1,6 +0,0 @@ -pr: 102934 -summary: Ensure transform updates only modify the expected transform task -area: Transform -type: bug -issues: - - 102933 From c6eff199ada799ed716fa0d5a97bb71647382f12 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tim=20R=C3=BChsen?= Date: Thu, 7 Dec 2023 17:16:20 +0100 Subject: [PATCH 44/45] [Profiling] Fix stacktraces `total_frames` response value and improve tests (#103062) * Fix flamegraph total_frame value and improve tests * Make inline aggregation more expressive --- .../profiling/GetFlameGraphActionIT.java | 10 +++++++--- .../profiling/GetStackTracesActionIT.java | 14 +++++++------- .../data/profiling-events-all.ndjson | 2 ++ .../profiling/GetFlamegraphResponse.java | 8 ++++++++ .../GetStackTracesResponseBuilder.java | 4 ++++ .../xpack/profiling/StackFrame.java | 19 +++++++++++++++---- .../TransportGetStackTracesAction.java | 6 +++++- 7 files changed, 48 insertions(+), 15 deletions(-) diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetFlameGraphActionIT.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetFlameGraphActionIT.java index 4ce441285d432..8553574d39646 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetFlameGraphActionIT.java +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetFlameGraphActionIT.java @@ -14,8 +14,12 @@ public void testGetStackTracesUnfiltered() throws Exception { // only spot-check top level properties - detailed tests are done in unit tests assertEquals(994, response.getSize()); assertEquals(1.0d, response.getSamplingRate(), 0.001d); - assertEquals(44, response.getSelfCPU()); - assertEquals(1865, response.getTotalCPU()); - assertEquals(44, response.getTotalSamples()); + assertEquals(46, response.getSelfCPU()); + assertEquals(1903, response.getTotalCPU()); + assertEquals(46, response.getTotalSamples()); + + // The root node's values are the same as the top-level values. + assertEquals("", response.getFileIds().get(0)); + assertEquals(response.getSelfCPU(), response.getCountInclusive().get(0).longValue()); } } diff --git a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java index a5efa24da5397..289f6896ed698 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java +++ b/x-pack/plugin/profiling/src/internalClusterTest/java/org/elasticsearch/xpack/profiling/GetStackTracesActionIT.java @@ -14,14 +14,14 @@ public class GetStackTracesActionIT extends ProfilingTestCase { public void testGetStackTracesUnfiltered() throws Exception { - GetStackTracesRequest request = new GetStackTracesRequest(10, 1.0d, 1.0d, null, null, null, null, null, null, null, null); + GetStackTracesRequest request = new GetStackTracesRequest(1000, 600.0d, 1.0d, null, null, null, null, null, null, null, null); request.setAdjustSampleCount(true); GetStackTracesResponse response = client().execute(GetStackTracesAction.INSTANCE, request).get(); - assertEquals(40, response.getTotalSamples()); - assertEquals(473, response.getTotalFrames()); + assertEquals(46, response.getTotalSamples()); + assertEquals(1821, response.getTotalFrames()); assertNotNull(response.getStackTraceEvents()); - assertEquals(4L, response.getStackTraceEvents().get("L7kj7UvlKbT-vN73el4faQ").count); + assertEquals(3L, response.getStackTraceEvents().get("L7kj7UvlKbT-vN73el4faQ").count); assertNotNull(response.getStackTraces()); // just do a high-level spot check. Decoding is tested in unit-tests @@ -30,8 +30,8 @@ public void testGetStackTracesUnfiltered() throws Exception { assertEquals(18, stackTrace.fileIds.size()); assertEquals(18, stackTrace.frameIds.size()); assertEquals(18, stackTrace.typeIds.size()); - assertEquals(0.007903d, stackTrace.annualCO2Tons, 0.000001d); - assertEquals(74.46d, stackTrace.annualCostsUSD, 0.01d); + assertEquals(0.0000098789d, stackTrace.annualCO2Tons, 0.0000000001d); + assertEquals(0.093075d, stackTrace.annualCostsUSD, 0.000001d); assertNotNull(response.getStackFrames()); StackFrame stackFrame = response.getStackFrames().get("8NlMClggx8jaziUTJXlmWAAAAAAAAIYI"); @@ -58,7 +58,7 @@ public void testGetStackTracesFromAPMWithMatch() throws Exception { null ); GetStackTracesResponse response = client().execute(GetStackTracesAction.INSTANCE, request).get(); - assertEquals(43, response.getTotalFrames()); + assertEquals(49, response.getTotalFrames()); assertNotNull(response.getStackTraceEvents()); assertEquals(3L, response.getStackTraceEvents().get("Ce77w10WeIDow3kd1jowlA").count); diff --git a/x-pack/plugin/profiling/src/internalClusterTest/resources/data/profiling-events-all.ndjson b/x-pack/plugin/profiling/src/internalClusterTest/resources/data/profiling-events-all.ndjson index 6964368e534c7..502494f05c62c 100644 --- a/x-pack/plugin/profiling/src/internalClusterTest/resources/data/profiling-events-all.ndjson +++ b/x-pack/plugin/profiling/src/internalClusterTest/resources/data/profiling-events-all.ndjson @@ -71,6 +71,8 @@ {"create": {"_index": "profiling-events-all"}} {"Stacktrace.count": [1], "profiling.project.id": ["100"], "os.kernel": ["9.9.9-0"], "tags": ["environment:qa", "region:eu-west-1"], "host.ip": ["192.168.1.2"], "@timestamp": ["1698624000"], "container.name": ["instance-0000000010"], "ecs.version": ["1.12.0"], "Stacktrace.id": ["XF9MchOwpePfa6_hYy-vZQ"], "agent.version": ["head-be593ef3-1688111067"], "host.name": ["ip-192-168-1-2"], "host.id": ["8457605156473051743"], "process.thread.name": ["497295213074376"]} {"create": {"_index": "profiling-events-all"}} +{"Stacktrace.count": [2], "profiling.project.id": ["100"], "os.kernel": ["9.9.9-0"], "tags": ["environment:qa", "region:eu-west-1"], "host.ip": ["192.168.1.2"], "@timestamp": ["1698624000"], "container.name": ["instance-0000000010"], "ecs.version": ["1.12.0"], "Stacktrace.id": ["L7kj7UvlKbT-vN73el4faQ"], "agent.version": ["head-be593ef3-1688111067"], "host.name": ["ip-192-168-1-2"], "host.id": ["8457605156473051743"], "process.thread.name": ["497295213074376"]} +{"create": {"_index": "profiling-events-all"}} {"Stacktrace.count": [1], "profiling.project.id": ["100"], "os.kernel": ["9.9.9-0"], "tags": ["environment:qa", "region:eu-west-1"], "host.ip": ["192.168.1.2"], "@timestamp": ["1698624000"], "container.name": ["instance-0000000010"], "ecs.version": ["1.12.0"], "Stacktrace.id": ["L7kj7UvlKbT-vN73el4faQ"], "agent.version": ["head-be593ef3-1688111067"], "host.name": ["ip-192-168-1-2"], "host.id": ["8457605156473051743"], "process.thread.name": ["497295213074376"]} {"create": {"_index": "profiling-events-all"}} {"Stacktrace.count": [1], "profiling.project.id": ["100"], "os.kernel": ["9.9.9-0"], "tags": ["environment:qa", "region:eu-west-1"], "host.ip": ["192.168.1.2"], "@timestamp": ["1698624000"], "container.name": ["instance-0000000010"], "ecs.version": ["1.12.0"], "Stacktrace.id": ["hRqQI2CBPiapzgFG9jrmDA"], "agent.version": ["head-be593ef3-1688111067"], "host.name": ["ip-192-168-1-2"], "host.id": ["8457605156473051743"], "process.thread.name": ["599103450330106"]} diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphResponse.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphResponse.java index 211b6d76f8caa..457faecf4ad54 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphResponse.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetFlamegraphResponse.java @@ -194,6 +194,14 @@ public List getSourceLines() { return sourceLines; } + public List getAnnualCO2TonsInclusive() { + return annualCO2TonsInclusive; + } + + public List getAnnualCostsUSDInclusive() { + return annualCostsUSDInclusive; + } + public long getSelfCPU() { return selfCPU; } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesResponseBuilder.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesResponseBuilder.java index ccafe99c31d9a..ab98b1d4daa2a 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesResponseBuilder.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesResponseBuilder.java @@ -54,6 +54,10 @@ public void setTotalFrames(int totalFrames) { this.totalFrames = totalFrames; } + public void addTotalFrames(int numFrames) { + this.totalFrames += numFrames; + } + public void setStackFrames(Map stackFrames) { this.stackFrames = stackFrames; } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/StackFrame.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/StackFrame.java index 455b150b6ee76..35f5899536745 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/StackFrame.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/StackFrame.java @@ -30,9 +30,20 @@ final class StackFrame implements ToXContentObject { this.lineNumber = listOf(lineNumber); } + public int size() { + return this.functionName.size(); // functionName is the only array that is always set + } + + /** + * Returns the number of inlined frames in this stack frame. + * @return the number of inlined frames in this stack frame. + */ + public int inlineFrameCount() { + return size() > 0 ? size() - 1 : 0; + } + public void forEach(Consumer action) { - int size = this.functionName.size(); // functionName is the only array that is always set - for (int i = 0; i < size; i++) { + for (int i = 0; i < size(); i++) { action.accept( new Frame( fileName.size() > i ? fileName.get(i) : "", @@ -40,7 +51,7 @@ public void forEach(Consumer action) { functionOffset.size() > i ? functionOffset.get(i) : 0, lineNumber.size() > i ? lineNumber.get(i) : 0, i > 0, - i == size - 1 + i == size() - 1 ) ); } @@ -67,7 +78,7 @@ public static StackFrame fromSource(Map source) { } public boolean isEmpty() { - return fileName.isEmpty() && functionName.isEmpty() && functionOffset.isEmpty() && lineNumber.isEmpty(); + return size() == 0; } @Override diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java index 000b448696985..3fa47beebd70a 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/TransportGetStackTracesAction.java @@ -688,6 +688,7 @@ private static class DetailsHandler { private final Map executables; private final Map stackFrames; private final AtomicInteger expectedSlices; + private final AtomicInteger totalInlineFrames = new AtomicInteger(); private final StopWatch watch = new StopWatch("retrieveStackTraceDetails"); private DetailsHandler( @@ -718,7 +719,9 @@ public void onStackFramesResponse(MultiGetResponse multiGetItemResponses) { if (stackFrames.containsKey(frame.getId()) == false) { StackFrame stackFrame = StackFrame.fromSource(frame.getResponse().getSource()); if (stackFrame.isEmpty() == false) { - stackFrames.putIfAbsent(frame.getId(), stackFrame); + if (stackFrames.putIfAbsent(frame.getId(), stackFrame) == null) { + totalInlineFrames.addAndGet(stackFrame.inlineFrameCount()); + } } else { log.trace("Stack frame with id [{}] has no properties.", frame.getId()); } @@ -757,6 +760,7 @@ public void mayFinish() { if (expectedSlices.decrementAndGet() == 0) { builder.setExecutables(executables); builder.setStackFrames(stackFrames); + builder.addTotalFrames(totalInlineFrames.get()); log.debug("retrieveStackTraceDetails found [{}] stack frames, [{}] executables.", stackFrames.size(), executables.size()); log.debug(watch::report); submitListener.onResponse(builder.build()); From d8bbc62c6e228774f103da9f879a3bae5a676149 Mon Sep 17 00:00:00 2001 From: David Roberts Date: Thu, 7 Dec 2023 16:42:56 +0000 Subject: [PATCH 45/45] [ML] Start a new trace context before loading a trained model (#103124) Each distinct task is a different span in APM tracing, so needs a new trace context. --- docs/changelog/103124.yaml | 5 +++++ .../assignment/TrainedModelAssignmentNodeService.java | 9 +++++++-- 2 files changed, 12 insertions(+), 2 deletions(-) create mode 100644 docs/changelog/103124.yaml diff --git a/docs/changelog/103124.yaml b/docs/changelog/103124.yaml new file mode 100644 index 0000000000000..078c8249bbf5d --- /dev/null +++ b/docs/changelog/103124.yaml @@ -0,0 +1,5 @@ +pr: 103124 +summary: Start a new trace context before loading a trained model +area: Machine Learning +type: bug +issues: [] diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java index 3efae0ed58bf6..fdb007862cfdc 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java @@ -369,9 +369,14 @@ public void clusterChanged(ClusterChangedEvent event) { } if (shouldLoadModel(routingInfo, trainedModelAssignment.getDeploymentId(), isResetMode)) { - prepareModelToLoad( - createStartTrainedModelDeploymentTaskParams(trainedModelAssignment, routingInfo.getCurrentAllocations()) + StartTrainedModelDeploymentAction.TaskParams params = createStartTrainedModelDeploymentTaskParams( + trainedModelAssignment, + routingInfo.getCurrentAllocations() ); + // Loading the model is done by a separate task, so needs a new trace context + try (var ignored = threadPool.getThreadContext().newTraceContext()) { + prepareModelToLoad(params); + } } }