From b9c29807ecc831838c398aace29157d24e51fdea Mon Sep 17 00:00:00 2001 From: Yang Wang Date: Fri, 8 Dec 2023 10:26:17 +1100 Subject: [PATCH 01/14] Extract repositories metrics into its own class (#103034) This PR is a follow up of https://github.com/elastic/elasticsearch/pull/102505#discussion_r1402957598 that move the repositories metrics management into its own class which is then passed around instead of relying on the raw meterRegistry and string metric names. --- .../repositories/azure/AzureRepository.java | 4 +- .../azure/AzureRepositoryPlugin.java | 4 +- ...eCloudStorageBlobStoreRepositoryTests.java | 4 +- .../gcs/GoogleCloudStoragePlugin.java | 4 +- .../gcs/GoogleCloudStorageRepository.java | 4 +- .../s3/S3BlobStoreRepositoryMetricsTests.java | 16 +++--- .../s3/S3BlobStoreRepositoryTests.java | 8 +-- .../s3/S3RepositoryThirdPartyTests.java | 4 +- .../repositories/s3/S3BlobStore.java | 50 +++++------------ .../repositories/s3/S3Repository.java | 8 +-- .../repositories/s3/S3RepositoryPlugin.java | 18 +++---- .../s3/RepositoryCredentialsTests.java | 7 +-- .../s3/S3BlobContainerRetriesTests.java | 4 +- .../repositories/s3/S3RepositoryTests.java | 4 +- .../repository/url/URLRepositoryPlugin.java | 4 +- .../repositories/hdfs/HdfsPlugin.java | 4 +- .../plan/ShardSnapshotsServiceIT.java | 4 +- .../repositories/InvalidRepositoryIT.java | 3 +- ...BlobStoreRepositoryOperationPurposeIT.java | 4 +- ...etadataLoadingDuringSnapshotRestoreIT.java | 4 +- .../SnapshotsServiceDoubleFinalizationIT.java | 4 +- .../plugins/RepositoryPlugin.java | 4 +- .../repositories/RepositoriesMetrics.java | 53 +++++++++++++++++++ .../repositories/RepositoriesModule.java | 31 ++--------- .../blobstore/MeteredBlobStoreRepository.java | 8 +-- .../repositories/RepositoriesModuleTests.java | 22 ++++++-- .../RepositoriesServiceTests.java | 5 +- ...bStoreRepositoryDeleteThrottlingTests.java | 4 +- ...ncySimulatingBlobStoreRepositoryTests.java | 4 +- .../LatencySimulatingRepositoryPlugin.java | 4 +- .../snapshots/mockstore/MockRepository.java | 4 +- .../sourceonly/SourceOnlySnapshotIT.java | 4 +- .../elasticsearch/xpack/core/XPackPlugin.java | 4 +- .../core/LocalStateCompositeXPackPlugin.java | 10 ++-- .../lucene/bwc/AbstractArchiveTestCase.java | 4 +- ...chableSnapshotDiskThresholdIntegTests.java | 4 +- ...archableSnapshotsPrewarmingIntegTests.java | 4 +- ...SnapshotRecoveryStateIntegrationTests.java | 4 +- .../SnapshotBasedIndexRecoveryIT.java | 4 +- .../testkit/RepositoryAnalysisFailureIT.java | 4 +- .../testkit/RepositoryAnalysisSuccessIT.java | 4 +- .../votingonly/VotingOnlyNodePluginTests.java | 4 +- 42 files changed, 210 insertions(+), 145 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/repositories/RepositoriesMetrics.java diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java index c71bbf02782ca..f58611cb0567a 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -21,8 +21,8 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.indices.recovery.RecoverySettings; +import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.blobstore.MeteredBlobStoreRepository; -import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.xcontent.NamedXContentRegistry; import java.util.Locale; @@ -109,7 +109,7 @@ public AzureRepository( recoverySettings, buildBasePath(metadata), buildLocation(metadata), - MeterRegistry.NOOP + RepositoriesMetrics.NOOP ); this.chunkSize = Repository.CHUNK_SIZE_SETTING.get(metadata.settings()); this.storageService = storageService; diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java index 6ff9a40940e8c..73d969ee31b19 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java @@ -21,6 +21,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.ReloadablePlugin; import org.elasticsearch.plugins.RepositoryPlugin; +import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.Repository; import org.elasticsearch.threadpool.ExecutorBuilder; import org.elasticsearch.threadpool.ScalingExecutorBuilder; @@ -62,7 +63,8 @@ public Map getRepositories( NamedXContentRegistry namedXContentRegistry, ClusterService clusterService, BigArrays bigArrays, - RecoverySettings recoverySettings + RecoverySettings recoverySettings, + RepositoriesMetrics repositoriesMetrics ) { return Collections.singletonMap(AzureRepository.TYPE, metadata -> { AzureStorageService storageService = azureStoreService.get(); diff --git a/modules/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java b/modules/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java index 87449d7153057..6d2c015d7d922 100644 --- a/modules/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java +++ b/modules/repository-gcs/src/internalClusterTest/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java @@ -42,6 +42,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; @@ -256,7 +257,8 @@ public Map getRepositories( NamedXContentRegistry registry, ClusterService clusterService, BigArrays bigArrays, - RecoverySettings recoverySettings + RecoverySettings recoverySettings, + RepositoriesMetrics repositoriesMetrics ) { return Collections.singletonMap( GoogleCloudStorageRepository.TYPE, diff --git a/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStoragePlugin.java b/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStoragePlugin.java index 6acaff1801ffc..4f05289899a7f 100644 --- a/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStoragePlugin.java +++ b/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStoragePlugin.java @@ -17,6 +17,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.ReloadablePlugin; import org.elasticsearch.plugins.RepositoryPlugin; +import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.Repository; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -48,7 +49,8 @@ public Map getRepositories( NamedXContentRegistry namedXContentRegistry, ClusterService clusterService, BigArrays bigArrays, - RecoverySettings recoverySettings + RecoverySettings recoverySettings, + RepositoriesMetrics repositoriesMetrics ) { return Collections.singletonMap( GoogleCloudStorageRepository.TYPE, diff --git a/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java b/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java index 21dd7529afaca..94d0abe17909f 100644 --- a/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java +++ b/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java @@ -19,9 +19,9 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.indices.recovery.RecoverySettings; +import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.blobstore.MeteredBlobStoreRepository; -import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.xcontent.NamedXContentRegistry; import java.util.Map; @@ -78,7 +78,7 @@ class GoogleCloudStorageRepository extends MeteredBlobStoreRepository { recoverySettings, buildBasePath(metadata), buildLocation(metadata), - MeterRegistry.NOOP + RepositoriesMetrics.NOOP ); this.storageService = storageService; diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java index a7c1d25195028..59f65032272df 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryMetricsTests.java @@ -37,14 +37,14 @@ import java.util.Queue; import java.util.concurrent.LinkedBlockingQueue; -import static org.elasticsearch.repositories.RepositoriesModule.HTTP_REQUEST_TIME_IN_MICROS_HISTOGRAM; -import static org.elasticsearch.repositories.RepositoriesModule.METRIC_EXCEPTIONS_COUNT; -import static org.elasticsearch.repositories.RepositoriesModule.METRIC_EXCEPTIONS_HISTOGRAM; -import static org.elasticsearch.repositories.RepositoriesModule.METRIC_OPERATIONS_COUNT; -import static org.elasticsearch.repositories.RepositoriesModule.METRIC_REQUESTS_COUNT; -import static org.elasticsearch.repositories.RepositoriesModule.METRIC_THROTTLES_COUNT; -import static org.elasticsearch.repositories.RepositoriesModule.METRIC_THROTTLES_HISTOGRAM; -import static org.elasticsearch.repositories.RepositoriesModule.METRIC_UNSUCCESSFUL_OPERATIONS_COUNT; +import static org.elasticsearch.repositories.RepositoriesMetrics.HTTP_REQUEST_TIME_IN_MICROS_HISTOGRAM; +import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_EXCEPTIONS_COUNT; +import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_EXCEPTIONS_HISTOGRAM; +import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_OPERATIONS_COUNT; +import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_REQUESTS_COUNT; +import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_THROTTLES_COUNT; +import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_THROTTLES_HISTOGRAM; +import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_UNSUCCESSFUL_OPERATIONS_COUNT; import static org.elasticsearch.rest.RestStatus.INTERNAL_SERVER_ERROR; import static org.elasticsearch.rest.RestStatus.NOT_FOUND; import static org.elasticsearch.rest.RestStatus.TOO_MANY_REQUESTS; diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index c76364f48c081..29342a7f5ea92 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -37,6 +37,7 @@ import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; @@ -74,7 +75,7 @@ import java.util.stream.Collectors; import java.util.stream.StreamSupport; -import static org.elasticsearch.repositories.RepositoriesModule.METRIC_REQUESTS_COUNT; +import static org.elasticsearch.repositories.RepositoriesMetrics.METRIC_REQUESTS_COUNT; import static org.elasticsearch.repositories.blobstore.BlobStoreTestUtil.randomNonDataPurpose; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; @@ -444,9 +445,10 @@ protected S3Repository createRepository( NamedXContentRegistry registry, ClusterService clusterService, BigArrays bigArrays, - RecoverySettings recoverySettings + RecoverySettings recoverySettings, + RepositoriesMetrics repositoriesMetrics ) { - return new S3Repository(metadata, registry, getService(), clusterService, bigArrays, recoverySettings, getMeterRegistry()) { + return new S3Repository(metadata, registry, getService(), clusterService, bigArrays, recoverySettings, repositoriesMetrics) { @Override public BlobStore blobStore() { diff --git a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java index 1e2ff831b8e49..f182b54b0c696 100644 --- a/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java +++ b/modules/repository-s3/src/internalClusterTest/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java @@ -30,8 +30,8 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.repositories.AbstractThirdPartyRepositoryTestCase; +import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.RepositoriesService; -import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.fixtures.minio.MinioTestContainer; import org.elasticsearch.test.fixtures.testcontainers.TestContainersThreadFilter; @@ -145,7 +145,7 @@ public long absoluteTimeInMillis() { ClusterServiceUtils.createClusterService(threadpool), BigArrays.NON_RECYCLING_INSTANCE, new RecoverySettings(node().settings(), node().injector().getInstance(ClusterService.class).getClusterSettings()), - MeterRegistry.NOOP + RepositoriesMetrics.NOOP ) ) { repository.start(); diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java index 99fe3d5cc31ad..c045e05a6f8e0 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java @@ -32,9 +32,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.telemetry.metric.LongCounter; -import org.elasticsearch.telemetry.metric.LongHistogram; -import org.elasticsearch.telemetry.metric.MeterRegistry; +import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; @@ -54,14 +52,6 @@ import java.util.stream.Collectors; import static org.elasticsearch.core.Strings.format; -import static org.elasticsearch.repositories.RepositoriesModule.HTTP_REQUEST_TIME_IN_MICROS_HISTOGRAM; -import static org.elasticsearch.repositories.RepositoriesModule.METRIC_EXCEPTIONS_COUNT; -import static org.elasticsearch.repositories.RepositoriesModule.METRIC_EXCEPTIONS_HISTOGRAM; -import static org.elasticsearch.repositories.RepositoriesModule.METRIC_OPERATIONS_COUNT; -import static org.elasticsearch.repositories.RepositoriesModule.METRIC_REQUESTS_COUNT; -import static org.elasticsearch.repositories.RepositoriesModule.METRIC_THROTTLES_COUNT; -import static org.elasticsearch.repositories.RepositoriesModule.METRIC_THROTTLES_HISTOGRAM; -import static org.elasticsearch.repositories.RepositoriesModule.METRIC_UNSUCCESSFUL_OPERATIONS_COUNT; class S3BlobStore implements BlobStore { @@ -91,15 +81,7 @@ class S3BlobStore implements BlobStore { private final ThreadPool threadPool; private final Executor snapshotExecutor; - private final MeterRegistry meterRegistry; - private final LongCounter requestCounter; - private final LongCounter exceptionCounter; - private final LongCounter throttleCounter; - private final LongCounter operationCounter; - private final LongCounter unsuccessfulOperationCounter; - private final LongHistogram exceptionHistogram; - private final LongHistogram throttleHistogram; - private final LongHistogram httpRequestTimeInMicroHistogram; + private final RepositoriesMetrics repositoriesMetrics; private final StatsCollectors statsCollectors = new StatsCollectors(); @@ -117,7 +99,7 @@ class S3BlobStore implements BlobStore { RepositoryMetadata repositoryMetadata, BigArrays bigArrays, ThreadPool threadPool, - MeterRegistry meterRegistry + RepositoriesMetrics repositoriesMetrics ) { this.service = service; this.bigArrays = bigArrays; @@ -129,15 +111,7 @@ class S3BlobStore implements BlobStore { this.repositoryMetadata = repositoryMetadata; this.threadPool = threadPool; this.snapshotExecutor = threadPool.executor(ThreadPool.Names.SNAPSHOT); - this.meterRegistry = meterRegistry; - this.requestCounter = this.meterRegistry.getLongCounter(METRIC_REQUESTS_COUNT); - this.exceptionCounter = this.meterRegistry.getLongCounter(METRIC_EXCEPTIONS_COUNT); - this.throttleCounter = this.meterRegistry.getLongCounter(METRIC_THROTTLES_COUNT); - this.operationCounter = this.meterRegistry.getLongCounter(METRIC_OPERATIONS_COUNT); - this.unsuccessfulOperationCounter = this.meterRegistry.getLongCounter(METRIC_UNSUCCESSFUL_OPERATIONS_COUNT); - this.exceptionHistogram = this.meterRegistry.getLongHistogram(METRIC_EXCEPTIONS_HISTOGRAM); - this.throttleHistogram = this.meterRegistry.getLongHistogram(METRIC_THROTTLES_HISTOGRAM); - this.httpRequestTimeInMicroHistogram = this.meterRegistry.getLongHistogram(HTTP_REQUEST_TIME_IN_MICROS_HISTOGRAM); + this.repositoriesMetrics = repositoriesMetrics; s3RequestRetryStats = new S3RequestRetryStats(getMaxRetries()); threadPool.scheduleWithFixedDelay(() -> { var priorRetryStats = s3RequestRetryStats; @@ -214,21 +188,21 @@ public final void collectMetrics(Request request, Response response) { .map(List::size) .orElse(0); - operationCounter.incrementBy(1, attributes); + repositoriesMetrics.operationCounter().incrementBy(1, attributes); if (numberOfAwsErrors == requestCount) { - unsuccessfulOperationCounter.incrementBy(1, attributes); + repositoriesMetrics.unsuccessfulOperationCounter().incrementBy(1, attributes); } - requestCounter.incrementBy(requestCount, attributes); + repositoriesMetrics.requestCounter().incrementBy(requestCount, attributes); if (exceptionCount > 0) { - exceptionCounter.incrementBy(exceptionCount, attributes); - exceptionHistogram.record(exceptionCount, attributes); + repositoriesMetrics.exceptionCounter().incrementBy(exceptionCount, attributes); + repositoriesMetrics.exceptionHistogram().record(exceptionCount, attributes); } if (throttleCount > 0) { - throttleCounter.incrementBy(throttleCount, attributes); - throttleHistogram.record(throttleCount, attributes); + repositoriesMetrics.throttleCounter().incrementBy(throttleCount, attributes); + repositoriesMetrics.throttleHistogram().record(throttleCount, attributes); } - httpRequestTimeInMicroHistogram.record(getHttpRequestTimeInMicros(request), attributes); + repositoriesMetrics.httpRequestTimeInMicroHistogram().record(getHttpRequestTimeInMicros(request), attributes); } private boolean assertConsistencyBetweenHttpRequestAndOperation(Request request, Operation operation) { diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index ddab811fcb078..624867a2f0c41 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -31,12 +31,12 @@ import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.repositories.FinalizeSnapshotContext; +import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.blobstore.MeteredBlobStoreRepository; import org.elasticsearch.snapshots.SnapshotDeleteListener; import org.elasticsearch.snapshots.SnapshotsService; -import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -205,7 +205,7 @@ class S3Repository extends MeteredBlobStoreRepository { final ClusterService clusterService, final BigArrays bigArrays, final RecoverySettings recoverySettings, - final MeterRegistry meterRegistry + final RepositoriesMetrics repositoriesMetrics ) { super( metadata, @@ -215,7 +215,7 @@ class S3Repository extends MeteredBlobStoreRepository { recoverySettings, buildBasePath(metadata), buildLocation(metadata), - meterRegistry + repositoriesMetrics ); this.service = service; this.snapshotExecutor = threadPool().executor(ThreadPool.Names.SNAPSHOT); @@ -408,7 +408,7 @@ protected S3BlobStore createBlobStore() { metadata, bigArrays, threadPool, - meterRegistry + repositoriesMetrics ); } diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java index f85a66c5eb367..ba762537537e3 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java @@ -23,8 +23,8 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.ReloadablePlugin; import org.elasticsearch.plugins.RepositoryPlugin; +import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.Repository; -import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.xcontent.NamedXContentRegistry; import java.io.IOException; @@ -60,7 +60,6 @@ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin, Relo } private final SetOnce service = new SetOnce<>(); - private final SetOnce meterRegistry = new SetOnce<>(); private final Settings settings; public S3RepositoryPlugin(Settings settings) { @@ -77,16 +76,16 @@ protected S3Repository createRepository( final NamedXContentRegistry registry, final ClusterService clusterService, final BigArrays bigArrays, - final RecoverySettings recoverySettings + final RecoverySettings recoverySettings, + final RepositoriesMetrics repositoriesMetrics ) { - return new S3Repository(metadata, registry, service.get(), clusterService, bigArrays, recoverySettings, meterRegistry.get()); + return new S3Repository(metadata, registry, service.get(), clusterService, bigArrays, recoverySettings, repositoriesMetrics); } @Override public Collection createComponents(PluginServices services) { service.set(s3Service(services.environment(), services.clusterService().getSettings())); this.service.get().refreshAndClearCache(S3ClientSettings.load(settings)); - meterRegistry.set(services.telemetryProvider().getMeterRegistry()); return List.of(service); } @@ -100,11 +99,12 @@ public Map getRepositories( final NamedXContentRegistry registry, final ClusterService clusterService, final BigArrays bigArrays, - final RecoverySettings recoverySettings + final RecoverySettings recoverySettings, + RepositoriesMetrics repositoriesMetrics ) { return Collections.singletonMap( S3Repository.TYPE, - metadata -> createRepository(metadata, registry, clusterService, bigArrays, recoverySettings) + metadata -> createRepository(metadata, registry, clusterService, bigArrays, recoverySettings, repositoriesMetrics) ); } @@ -146,8 +146,4 @@ public void reload(Settings settings) { public void close() throws IOException { getService().close(); } - - protected MeterRegistry getMeterRegistry() { - return meterRegistry.get(); - } } diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java index a966e70fd960c..085d438618a19 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java @@ -26,12 +26,12 @@ import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.rest.AbstractRestChannel; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.action.admin.cluster.RestGetRepositoriesAction; -import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.rest.FakeRestRequest; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -262,9 +262,10 @@ protected S3Repository createRepository( NamedXContentRegistry registry, ClusterService clusterService, BigArrays bigArrays, - RecoverySettings recoverySettings + RecoverySettings recoverySettings, + RepositoriesMetrics repositoriesMetrics ) { - return new S3Repository(metadata, registry, getService(), clusterService, bigArrays, recoverySettings, MeterRegistry.NOOP) { + return new S3Repository(metadata, registry, getService(), clusterService, bigArrays, recoverySettings, repositoriesMetrics) { @Override protected void assertSnapshotOrGenericThread() { // eliminate thread name check as we create repo manually on test/main threads diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java index b4b136338923f..8f273bcad3cf5 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java @@ -36,9 +36,9 @@ import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; import org.elasticsearch.env.Environment; +import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.blobstore.AbstractBlobContainerRetriesTestCase; import org.elasticsearch.repositories.blobstore.BlobStoreTestUtil; -import org.elasticsearch.telemetry.metric.MeterRegistry; import org.hamcrest.Matcher; import org.junit.After; import org.junit.Before; @@ -162,7 +162,7 @@ protected BlobContainer createBlobContainer( repositoryMetadata, BigArrays.NON_RECYCLING_INSTANCE, new DeterministicTaskQueue().getThreadPool(), - MeterRegistry.NOOP + RepositoriesMetrics.NOOP ) ) { @Override diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java index db477c16a57e7..ab5edc4608bfd 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java @@ -18,9 +18,9 @@ import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.env.Environment; import org.elasticsearch.indices.recovery.RecoverySettings; +import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.blobstore.BlobStoreTestUtil; -import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.hamcrest.Matchers; @@ -129,7 +129,7 @@ private S3Repository createS3Repo(RepositoryMetadata metadata) { BlobStoreTestUtil.mockClusterService(), MockBigArrays.NON_RECYCLING_INSTANCE, new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), - MeterRegistry.NOOP + RepositoriesMetrics.NOOP ) { @Override protected void assertSnapshotOrGenericThread() { diff --git a/modules/repository-url/src/main/java/org/elasticsearch/plugin/repository/url/URLRepositoryPlugin.java b/modules/repository-url/src/main/java/org/elasticsearch/plugin/repository/url/URLRepositoryPlugin.java index fe33051df342e..0bd3ad462ef70 100644 --- a/modules/repository-url/src/main/java/org/elasticsearch/plugin/repository/url/URLRepositoryPlugin.java +++ b/modules/repository-url/src/main/java/org/elasticsearch/plugin/repository/url/URLRepositoryPlugin.java @@ -18,6 +18,7 @@ import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.RepositoryPlugin; +import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.url.URLRepository; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -47,7 +48,8 @@ public Map getRepositories( NamedXContentRegistry namedXContentRegistry, ClusterService clusterService, BigArrays bigArrays, - RecoverySettings recoverySettings + RecoverySettings recoverySettings, + RepositoriesMetrics repositoriesMetrics ) { return Collections.singletonMap(URLRepository.TYPE, metadata -> { assert httpClientFactory.get() != null : "Expected to get a configured http client factory"; diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsPlugin.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsPlugin.java index 957622fe66247..426899643859b 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsPlugin.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsPlugin.java @@ -18,6 +18,7 @@ import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.RepositoryPlugin; +import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.Repository; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -108,7 +109,8 @@ public Map getRepositories( NamedXContentRegistry namedXContentRegistry, ClusterService clusterService, BigArrays bigArrays, - RecoverySettings recoverySettings + RecoverySettings recoverySettings, + RepositoriesMetrics repositoriesMetrics ) { return Collections.singletonMap( "hdfs", diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java index 30c57873fc6b1..212cf7510d349 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/recovery/plan/ShardSnapshotsServiceIT.java @@ -27,6 +27,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.RepositoryPlugin; import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; @@ -72,7 +73,8 @@ public Map getRepositories( NamedXContentRegistry namedXContentRegistry, ClusterService clusterService, BigArrays bigArrays, - RecoverySettings recoverySettings + RecoverySettings recoverySettings, + RepositoriesMetrics repositoriesMetrics ) { return Collections.singletonMap( TYPE, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/InvalidRepositoryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/InvalidRepositoryIT.java index 1c096120a7649..730cdba059a69 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/InvalidRepositoryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/InvalidRepositoryIT.java @@ -70,7 +70,8 @@ public Map getRepositories( NamedXContentRegistry namedXContentRegistry, ClusterService clusterService, BigArrays bigArrays, - RecoverySettings recoverySettings + RecoverySettings recoverySettings, + RepositoriesMetrics repositoriesMetrics ) { return Collections.singletonMap( TYPE, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryOperationPurposeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryOperationPurposeIT.java index 91eb1dc6eb01b..6c8f4c04e2a75 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryOperationPurposeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryOperationPurposeIT.java @@ -25,6 +25,7 @@ import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.RepositoryPlugin; +import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.snapshots.AbstractSnapshotIntegTestCase; @@ -94,7 +95,8 @@ public Map getRepositories( NamedXContentRegistry namedXContentRegistry, ClusterService clusterService, BigArrays bigArrays, - RecoverySettings recoverySettings + RecoverySettings recoverySettings, + RepositoriesMetrics repositoriesMetrics ) { return Map.of( ASSERTING_REPO_TYPE, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java index 3b129455d4eef..0aa3475de7be1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java @@ -20,6 +20,7 @@ import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; @@ -207,7 +208,8 @@ public Map getRepositories( NamedXContentRegistry namedXContentRegistry, ClusterService clusterService, BigArrays bigArrays, - RecoverySettings recoverySettings + RecoverySettings recoverySettings, + RepositoriesMetrics repositoriesMetrics ) { return Collections.singletonMap( TYPE, diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceDoubleFinalizationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceDoubleFinalizationIT.java index d9e6a8eff5ad1..4c9de6cb5369f 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceDoubleFinalizationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotsServiceDoubleFinalizationIT.java @@ -31,6 +31,7 @@ import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.RepositoryPlugin; +import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.snapshots.mockstore.BlobStoreWrapper; @@ -204,7 +205,8 @@ public Map getRepositories( NamedXContentRegistry namedXContentRegistry, ClusterService clusterService, BigArrays bigArrays, - RecoverySettings recoverySettings + RecoverySettings recoverySettings, + RepositoriesMetrics repositoriesMetrics ) { return Map.of( REPO_TYPE, diff --git a/server/src/main/java/org/elasticsearch/plugins/RepositoryPlugin.java b/server/src/main/java/org/elasticsearch/plugins/RepositoryPlugin.java index ff6d21f3039a8..86f2fb3feeab4 100644 --- a/server/src/main/java/org/elasticsearch/plugins/RepositoryPlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/RepositoryPlugin.java @@ -13,6 +13,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.indices.recovery.RecoverySettings; +import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.Repository; import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -39,7 +40,8 @@ default Map getRepositories( NamedXContentRegistry namedXContentRegistry, ClusterService clusterService, BigArrays bigArrays, - RecoverySettings recoverySettings + RecoverySettings recoverySettings, + RepositoriesMetrics repositoriesMetrics ) { return Collections.emptyMap(); } diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesMetrics.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesMetrics.java new file mode 100644 index 0000000000000..8442cf8c4a341 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesMetrics.java @@ -0,0 +1,53 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.repositories; + +import org.elasticsearch.telemetry.metric.LongCounter; +import org.elasticsearch.telemetry.metric.LongHistogram; +import org.elasticsearch.telemetry.metric.MeterRegistry; + +public record RepositoriesMetrics( + LongCounter requestCounter, + LongCounter exceptionCounter, + LongCounter throttleCounter, + LongCounter operationCounter, + LongCounter unsuccessfulOperationCounter, + LongHistogram exceptionHistogram, + LongHistogram throttleHistogram, + LongHistogram httpRequestTimeInMicroHistogram +) { + + public static RepositoriesMetrics NOOP = new RepositoriesMetrics(MeterRegistry.NOOP); + + public static final String METRIC_REQUESTS_COUNT = "es.repositories.requests.count"; + public static final String METRIC_EXCEPTIONS_COUNT = "es.repositories.exceptions.count"; + public static final String METRIC_THROTTLES_COUNT = "es.repositories.throttles.count"; + public static final String METRIC_OPERATIONS_COUNT = "es.repositories.operations.count"; + public static final String METRIC_UNSUCCESSFUL_OPERATIONS_COUNT = "es.repositories.operations.unsuccessful.count"; + public static final String METRIC_EXCEPTIONS_HISTOGRAM = "es.repositories.exceptions.histogram"; + public static final String METRIC_THROTTLES_HISTOGRAM = "es.repositories.throttles.histogram"; + public static final String HTTP_REQUEST_TIME_IN_MICROS_HISTOGRAM = "es.repositories.requests.http_request_time.histogram"; + + public RepositoriesMetrics(MeterRegistry meterRegistry) { + this( + meterRegistry.registerLongCounter(METRIC_REQUESTS_COUNT, "repository request counter", "unit"), + meterRegistry.registerLongCounter(METRIC_EXCEPTIONS_COUNT, "repository request exception counter", "unit"), + meterRegistry.registerLongCounter(METRIC_THROTTLES_COUNT, "repository request throttle counter", "unit"), + meterRegistry.registerLongCounter(METRIC_OPERATIONS_COUNT, "repository operation counter", "unit"), + meterRegistry.registerLongCounter(METRIC_UNSUCCESSFUL_OPERATIONS_COUNT, "repository unsuccessful operation counter", "unit"), + meterRegistry.registerLongHistogram(METRIC_EXCEPTIONS_HISTOGRAM, "repository request exception histogram", "unit"), + meterRegistry.registerLongHistogram(METRIC_THROTTLES_HISTOGRAM, "repository request throttle histogram", "unit"), + meterRegistry.registerLongHistogram( + HTTP_REQUEST_TIME_IN_MICROS_HISTOGRAM, + "HttpRequestTime in microseconds expressed as as a histogram", + "micros" + ) + ); + } +} diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesModule.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesModule.java index a60ab48597de3..902dcd6078e7f 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesModule.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesModule.java @@ -35,14 +35,6 @@ */ public final class RepositoriesModule { - public static final String METRIC_REQUESTS_COUNT = "es.repositories.requests.count"; - public static final String METRIC_EXCEPTIONS_COUNT = "es.repositories.exceptions.count"; - public static final String METRIC_THROTTLES_COUNT = "es.repositories.throttles.count"; - public static final String METRIC_OPERATIONS_COUNT = "es.repositories.operations.count"; - public static final String METRIC_UNSUCCESSFUL_OPERATIONS_COUNT = "es.repositories.operations.unsuccessful.count"; - public static final String METRIC_EXCEPTIONS_HISTOGRAM = "es.repositories.exceptions.histogram"; - public static final String METRIC_THROTTLES_HISTOGRAM = "es.repositories.throttles.histogram"; - public static final String HTTP_REQUEST_TIME_IN_MICROS_HISTOGRAM = "es.repositories.requests.http_request_time.histogram"; private final RepositoriesService repositoriesService; public RepositoriesModule( @@ -55,25 +47,7 @@ public RepositoriesModule( RecoverySettings recoverySettings, TelemetryProvider telemetryProvider ) { - // TODO: refactor APM metrics into their own class, passed in as a dependency (e.g. see BlobCacheMetrics as an example). - telemetryProvider.getMeterRegistry().registerLongCounter(METRIC_REQUESTS_COUNT, "repository request counter", "unit"); - telemetryProvider.getMeterRegistry().registerLongCounter(METRIC_EXCEPTIONS_COUNT, "repository request exception counter", "unit"); - telemetryProvider.getMeterRegistry().registerLongCounter(METRIC_THROTTLES_COUNT, "repository operation counter", "unit"); - telemetryProvider.getMeterRegistry() - .registerLongCounter(METRIC_OPERATIONS_COUNT, "repository unsuccessful operation counter", "unit"); - telemetryProvider.getMeterRegistry() - .registerLongCounter(METRIC_UNSUCCESSFUL_OPERATIONS_COUNT, "repository request throttle counter", "unit"); - telemetryProvider.getMeterRegistry() - .registerLongHistogram(METRIC_EXCEPTIONS_HISTOGRAM, "repository request exception histogram", "unit"); - telemetryProvider.getMeterRegistry() - .registerLongHistogram(METRIC_THROTTLES_HISTOGRAM, "repository request throttle histogram", "unit"); - telemetryProvider.getMeterRegistry() - .registerLongHistogram( - HTTP_REQUEST_TIME_IN_MICROS_HISTOGRAM, - "HttpRequestTime in microseconds expressed as as a histogram", - "micros" - ); - + final RepositoriesMetrics repositoriesMetrics = new RepositoriesMetrics(telemetryProvider.getMeterRegistry()); Map factories = new HashMap<>(); factories.put( FsRepository.TYPE, @@ -86,7 +60,8 @@ public RepositoriesModule( namedXContentRegistry, clusterService, bigArrays, - recoverySettings + recoverySettings, + repositoriesMetrics ); for (Map.Entry entry : newRepoTypes.entrySet()) { if (factories.put(entry.getKey(), entry.getValue()) != null) { diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/MeteredBlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/MeteredBlobStoreRepository.java index 4029938c6fc5f..6ecab2f8c77f2 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/MeteredBlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/MeteredBlobStoreRepository.java @@ -14,9 +14,9 @@ import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.indices.recovery.RecoverySettings; +import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.RepositoryInfo; import org.elasticsearch.repositories.RepositoryStatsSnapshot; -import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -24,7 +24,7 @@ public abstract class MeteredBlobStoreRepository extends BlobStoreRepository { private final RepositoryInfo repositoryInfo; - protected final MeterRegistry meterRegistry; + protected final RepositoriesMetrics repositoriesMetrics; public MeteredBlobStoreRepository( RepositoryMetadata metadata, @@ -34,10 +34,10 @@ public MeteredBlobStoreRepository( RecoverySettings recoverySettings, BlobPath basePath, Map location, - MeterRegistry meterRegistry + RepositoriesMetrics repositoriesMetrics ) { super(metadata, namedXContentRegistry, clusterService, bigArrays, recoverySettings, basePath); - this.meterRegistry = meterRegistry; + this.repositoriesMetrics = repositoriesMetrics; ThreadPool threadPool = clusterService.getClusterApplierService().threadPool(); this.repositoryInfo = new RepositoryInfo( UUIDs.randomBase64UUID(), diff --git a/server/src/test/java/org/elasticsearch/repositories/RepositoriesModuleTests.java b/server/src/test/java/org/elasticsearch/repositories/RepositoriesModuleTests.java index e99dcb9dae561..c3cdfe3b8981f 100644 --- a/server/src/test/java/org/elasticsearch/repositories/RepositoriesModuleTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/RepositoriesModuleTests.java @@ -24,6 +24,8 @@ import java.util.Collections; import java.util.List; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -58,9 +60,13 @@ public void setUp() throws Exception { } public void testCanRegisterTwoRepositoriesWithDifferentTypes() { - when(plugin1.getRepositories(environment, contentRegistry, clusterService, MockBigArrays.NON_RECYCLING_INSTANCE, recoverySettings)) + when(plugin1.getRepositories(eq(environment), eq(contentRegistry), eq(clusterService), + eq(MockBigArrays.NON_RECYCLING_INSTANCE), eq(recoverySettings), + any(RepositoriesMetrics.class))) .thenReturn(Collections.singletonMap("type1", factory)); - when(plugin2.getRepositories(environment, contentRegistry, clusterService, MockBigArrays.NON_RECYCLING_INSTANCE, recoverySettings)) + when(plugin2.getRepositories(eq(environment), eq(contentRegistry), eq(clusterService), + eq(MockBigArrays.NON_RECYCLING_INSTANCE), eq(recoverySettings), + any(RepositoriesMetrics.class))) .thenReturn(Collections.singletonMap("type2", factory)); // Would throw @@ -75,9 +81,13 @@ public void testCanRegisterTwoRepositoriesWithDifferentTypes() { } public void testCannotRegisterTwoRepositoriesWithSameTypes() { - when(plugin1.getRepositories(environment, contentRegistry, clusterService, MockBigArrays.NON_RECYCLING_INSTANCE, recoverySettings)) + when(plugin1.getRepositories(eq(environment), eq(contentRegistry), eq(clusterService), + eq(MockBigArrays.NON_RECYCLING_INSTANCE), eq(recoverySettings), + any(RepositoriesMetrics.class))) .thenReturn(Collections.singletonMap("type1", factory)); - when(plugin2.getRepositories(environment, contentRegistry, clusterService, MockBigArrays.NON_RECYCLING_INSTANCE, recoverySettings)) + when(plugin2.getRepositories(eq(environment), eq(contentRegistry), eq(clusterService), + eq(MockBigArrays.NON_RECYCLING_INSTANCE), eq(recoverySettings), + any(RepositoriesMetrics.class))) .thenReturn(Collections.singletonMap("type1", factory)); IllegalArgumentException ex = expectThrows( @@ -119,7 +129,9 @@ public void testCannotRegisterTwoInternalRepositoriesWithSameTypes() { } public void testCannotRegisterNormalAndInternalRepositoriesWithSameTypes() { - when(plugin1.getRepositories(environment, contentRegistry, clusterService, MockBigArrays.NON_RECYCLING_INSTANCE, recoverySettings)) + when(plugin1.getRepositories(eq(environment), eq(contentRegistry), eq(clusterService), + eq(MockBigArrays.NON_RECYCLING_INSTANCE), eq(recoverySettings), + any(RepositoriesMetrics.class))) .thenReturn(Collections.singletonMap("type1", factory)); when(plugin2.getInternalRepositories(environment, contentRegistry, clusterService, recoverySettings)).thenReturn( Collections.singletonMap("type1", factory) diff --git a/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java index e30a67c166b5d..4f7001f00e6a7 100644 --- a/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java @@ -39,7 +39,6 @@ import org.elasticsearch.repositories.blobstore.MeteredBlobStoreRepository; import org.elasticsearch.snapshots.SnapshotDeleteListener; import org.elasticsearch.snapshots.SnapshotId; -import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.Transport; @@ -484,7 +483,7 @@ private MeteredRepositoryTypeA(RepositoryMetadata metadata, ClusterService clust mock(RecoverySettings.class), BlobPath.EMPTY, Map.of("bucket", "bucket-a"), - MeterRegistry.NOOP + RepositoriesMetrics.NOOP ); } @@ -512,7 +511,7 @@ private MeteredRepositoryTypeB(RepositoryMetadata metadata, ClusterService clust mock(RecoverySettings.class), BlobPath.EMPTY, Map.of("bucket", "bucket-b"), - MeterRegistry.NOOP + RepositoriesMetrics.NOOP ); } diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryDeleteThrottlingTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryDeleteThrottlingTests.java index 0d322cf2542a1..0d94c027f8c46 100644 --- a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryDeleteThrottlingTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryDeleteThrottlingTests.java @@ -22,6 +22,7 @@ import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.RepositoryPlugin; +import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.test.ESIntegTestCase; @@ -69,7 +70,8 @@ public Map getRepositories( NamedXContentRegistry namedXContentRegistry, ClusterService clusterService, BigArrays bigArrays, - RecoverySettings recoverySettings + RecoverySettings recoverySettings, + RepositoriesMetrics repositoriesMetrics ) { return Collections.singletonMap( TEST_REPO_TYPE, diff --git a/test/external-modules/latency-simulating-directory/src/internalClusterTest/java/org/elasticsearch/test/simulatedlatencyrepo/LatencySimulatingBlobStoreRepositoryTests.java b/test/external-modules/latency-simulating-directory/src/internalClusterTest/java/org/elasticsearch/test/simulatedlatencyrepo/LatencySimulatingBlobStoreRepositoryTests.java index 7e1d1ac376cf0..77e151ed22f32 100644 --- a/test/external-modules/latency-simulating-directory/src/internalClusterTest/java/org/elasticsearch/test/simulatedlatencyrepo/LatencySimulatingBlobStoreRepositoryTests.java +++ b/test/external-modules/latency-simulating-directory/src/internalClusterTest/java/org/elasticsearch/test/simulatedlatencyrepo/LatencySimulatingBlobStoreRepositoryTests.java @@ -20,6 +20,7 @@ import org.elasticsearch.license.LicenseSettings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.RepositoryPlugin; +import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.Repository; import org.elasticsearch.snapshots.AbstractSnapshotIntegTestCase; import org.elasticsearch.snapshots.SnapshotInfo; @@ -57,7 +58,8 @@ public Map getRepositories( NamedXContentRegistry namedXContentRegistry, ClusterService clusterService, BigArrays bigArrays, - RecoverySettings recoverySettings + RecoverySettings recoverySettings, + RepositoriesMetrics repositoriesMetrics ) { return Map.of( REPO_TYPE, diff --git a/test/external-modules/latency-simulating-directory/src/main/java/org/elasticsearch/test/simulatedlatencyrepo/LatencySimulatingRepositoryPlugin.java b/test/external-modules/latency-simulating-directory/src/main/java/org/elasticsearch/test/simulatedlatencyrepo/LatencySimulatingRepositoryPlugin.java index e59da871a95bc..51e5234bb7b75 100644 --- a/test/external-modules/latency-simulating-directory/src/main/java/org/elasticsearch/test/simulatedlatencyrepo/LatencySimulatingRepositoryPlugin.java +++ b/test/external-modules/latency-simulating-directory/src/main/java/org/elasticsearch/test/simulatedlatencyrepo/LatencySimulatingRepositoryPlugin.java @@ -15,6 +15,7 @@ import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.RepositoryPlugin; +import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.Repository; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -34,7 +35,8 @@ public Map getRepositories( NamedXContentRegistry namedXContentRegistry, ClusterService clusterService, BigArrays bigArrays, - RecoverySettings recoverySettings + RecoverySettings recoverySettings, + RepositoriesMetrics repositoriesMetrics ) { return Map.of( TYPE, diff --git a/test/framework/src/main/java/org/elasticsearch/snapshots/mockstore/MockRepository.java b/test/framework/src/main/java/org/elasticsearch/snapshots/mockstore/MockRepository.java index 1e4c328e9b1ac..b1381e408a75d 100644 --- a/test/framework/src/main/java/org/elasticsearch/snapshots/mockstore/MockRepository.java +++ b/test/framework/src/main/java/org/elasticsearch/snapshots/mockstore/MockRepository.java @@ -36,6 +36,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.plugins.RepositoryPlugin; +import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; import org.elasticsearch.repositories.fs.FsRepository; @@ -82,7 +83,8 @@ public Map getRepositories( NamedXContentRegistry namedXContentRegistry, ClusterService clusterService, BigArrays bigArrays, - RecoverySettings recoverySettings + RecoverySettings recoverySettings, + RepositoriesMetrics repositoriesMetrics ) { return Collections.singletonMap( "mock", diff --git a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotIT.java b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotIT.java index eca3bdea374b4..90a525c2df45c 100644 --- a/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotIT.java +++ b/x-pack/plugin/core/src/internalClusterTest/java/org/elasticsearch/snapshots/sourceonly/SourceOnlySnapshotIT.java @@ -32,6 +32,7 @@ import org.elasticsearch.plugins.EnginePlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.RepositoryPlugin; +import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.Repository; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; @@ -86,7 +87,8 @@ public Map getRepositories( NamedXContentRegistry namedXContentRegistry, ClusterService clusterService, BigArrays bigArrays, - RecoverySettings recoverySettings + RecoverySettings recoverySettings, + RepositoriesMetrics repositoriesMetrics ) { return Collections.singletonMap("source", SourceOnlySnapshotRepository.newRepositoryFactory()); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java index 66534cccff064..761390266231e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java @@ -78,6 +78,7 @@ import org.elasticsearch.protocol.xpack.XPackInfoRequest; import org.elasticsearch.protocol.xpack.XPackInfoResponse; import org.elasticsearch.protocol.xpack.XPackUsageRequest; +import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.Repository; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; @@ -431,7 +432,8 @@ public Map getRepositories( NamedXContentRegistry namedXContentRegistry, ClusterService clusterService, BigArrays bigArrays, - RecoverySettings recoverySettings + RecoverySettings recoverySettings, + RepositoriesMetrics repositoriesMetrics ) { return Collections.singletonMap("source", SourceOnlySnapshotRepository.newRepositoryFactory()); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java index 26436e497a644..7747461a6f93a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/LocalStateCompositeXPackPlugin.java @@ -78,6 +78,7 @@ import org.elasticsearch.plugins.ShutdownAwarePlugin; import org.elasticsearch.plugins.SystemIndexPlugin; import org.elasticsearch.plugins.interceptor.RestServerActionPlugin; +import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.Repository; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; @@ -493,13 +494,16 @@ public Map getRepositories( NamedXContentRegistry namedXContentRegistry, ClusterService clusterService, BigArrays bigArrays, - RecoverySettings recoverySettings + RecoverySettings recoverySettings, + RepositoriesMetrics repositoriesMetrics ) { HashMap repositories = new HashMap<>( - super.getRepositories(env, namedXContentRegistry, clusterService, bigArrays, recoverySettings) + super.getRepositories(env, namedXContentRegistry, clusterService, bigArrays, recoverySettings, repositoriesMetrics) ); filterPlugins(RepositoryPlugin.class).forEach( - r -> repositories.putAll(r.getRepositories(env, namedXContentRegistry, clusterService, bigArrays, recoverySettings)) + r -> repositories.putAll( + r.getRepositories(env, namedXContentRegistry, clusterService, bigArrays, recoverySettings, RepositoriesMetrics.NOOP) + ) ); return repositories; } diff --git a/x-pack/plugin/old-lucene-versions/src/internalClusterTest/java/org/elasticsearch/xpack/lucene/bwc/AbstractArchiveTestCase.java b/x-pack/plugin/old-lucene-versions/src/internalClusterTest/java/org/elasticsearch/xpack/lucene/bwc/AbstractArchiveTestCase.java index 095185241c4dc..206dfbe6729d6 100644 --- a/x-pack/plugin/old-lucene-versions/src/internalClusterTest/java/org/elasticsearch/xpack/lucene/bwc/AbstractArchiveTestCase.java +++ b/x-pack/plugin/old-lucene-versions/src/internalClusterTest/java/org/elasticsearch/xpack/lucene/bwc/AbstractArchiveTestCase.java @@ -21,6 +21,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.RepositoryPlugin; import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; import org.elasticsearch.repositories.fs.FsRepository; @@ -60,7 +61,8 @@ public Map getRepositories( NamedXContentRegistry namedXContentRegistry, ClusterService clusterService, BigArrays bigArrays, - RecoverySettings recoverySettings + RecoverySettings recoverySettings, + RepositoriesMetrics repositoriesMetrics ) { return Map.of( FAKE_VERSIONS_TYPE, diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotDiskThresholdIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotDiskThresholdIntegTests.java index 91ddb112feee6..8d115d0f19580 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotDiskThresholdIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/allocation/SearchableSnapshotDiskThresholdIntegTests.java @@ -34,6 +34,7 @@ import org.elasticsearch.node.NodeRoleSettings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.fs.FsRepository; @@ -376,7 +377,8 @@ public Map getRepositories( NamedXContentRegistry namedXContentRegistry, ClusterService clusterService, BigArrays bigArrays, - RecoverySettings recoverySettings + RecoverySettings recoverySettings, + RepositoriesMetrics repositoriesMetrics ) { return Collections.singletonMap( TYPE, diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/SearchableSnapshotsPrewarmingIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/SearchableSnapshotsPrewarmingIntegTests.java index 7e6981a4594e5..0cf6cb93c865b 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/SearchableSnapshotsPrewarmingIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/SearchableSnapshotsPrewarmingIntegTests.java @@ -41,6 +41,7 @@ import org.elasticsearch.plugins.PluginsService; import org.elasticsearch.plugins.RepositoryPlugin; import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; @@ -440,7 +441,8 @@ public Map getRepositories( NamedXContentRegistry namedXContentRegistry, ClusterService clusterService, BigArrays bigArrays, - RecoverySettings recoverySettings + RecoverySettings recoverySettings, + RepositoriesMetrics repositoriesMetrics ) { return Collections.singletonMap( "tracking", diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/recovery/SearchableSnapshotRecoveryStateIntegrationTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/recovery/SearchableSnapshotRecoveryStateIntegrationTests.java index 5b31549dbd42f..6800dea01863a 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/recovery/SearchableSnapshotRecoveryStateIntegrationTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/recovery/SearchableSnapshotRecoveryStateIntegrationTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.RepositoryPlugin; import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryData; @@ -240,7 +241,8 @@ public Map getRepositories( NamedXContentRegistry namedXContentRegistry, ClusterService clusterService, BigArrays bigArrays, - RecoverySettings recoverySettings + RecoverySettings recoverySettings, + RepositoriesMetrics repositoriesMetrics ) { return Collections.singletonMap( "test-fs", diff --git a/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java b/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java index 4670f0fd0b9b1..d19747578b537 100644 --- a/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java +++ b/x-pack/plugin/snapshot-based-recoveries/src/internalClusterTest/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/SnapshotBasedIndexRecoveryIT.java @@ -57,6 +57,7 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.RepositoryPlugin; import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; @@ -148,7 +149,8 @@ public Map getRepositories( NamedXContentRegistry namedXContentRegistry, ClusterService clusterService, BigArrays bigArrays, - RecoverySettings recoverySettings + RecoverySettings recoverySettings, + RepositoriesMetrics repositoriesMetrics ) { return Map.of( FAULTY_TYPE, diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisFailureIT.java b/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisFailureIT.java index 2f55e55b2f0cf..adf5aec6a72ce 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisFailureIT.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisFailureIT.java @@ -32,6 +32,7 @@ import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.RepositoryPlugin; +import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryMissingException; @@ -430,7 +431,8 @@ public Map getRepositories( NamedXContentRegistry namedXContentRegistry, ClusterService clusterService, BigArrays bigArrays, - RecoverySettings recoverySettings + RecoverySettings recoverySettings, + RepositoriesMetrics repositoriesMetrics ) { return Map.of( DISRUPTABLE_REPO_TYPE, diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisSuccessIT.java b/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisSuccessIT.java index 77095fc770908..45e63eb9ff31f 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisSuccessIT.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisSuccessIT.java @@ -31,6 +31,7 @@ import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.RepositoryPlugin; +import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.RepositoryMissingException; @@ -149,7 +150,8 @@ public Map getRepositories( NamedXContentRegistry namedXContentRegistry, ClusterService clusterService, BigArrays bigArrays, - RecoverySettings recoverySettings + RecoverySettings recoverySettings, + RepositoriesMetrics repositoriesMetrics ) { return Map.of( ASSERTING_REPO_TYPE, diff --git a/x-pack/plugin/voting-only-node/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePluginTests.java b/x-pack/plugin/voting-only-node/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePluginTests.java index 9fa7d10581353..983ca4e741d83 100644 --- a/x-pack/plugin/voting-only-node/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePluginTests.java +++ b/x-pack/plugin/voting-only-node/src/internalClusterTest/java/org/elasticsearch/cluster/coordination/votingonly/VotingOnlyNodePluginTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.node.Node; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.RepositoryPlugin; +import org.elasticsearch.repositories.RepositoriesMetrics; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.snapshots.SnapshotInfo; @@ -243,7 +244,8 @@ public Map getRepositories( NamedXContentRegistry namedXContentRegistry, ClusterService clusterService, BigArrays bigArrays, - RecoverySettings recoverySettings + RecoverySettings recoverySettings, + RepositoriesMetrics repositoriesMetrics ) { return Collections.singletonMap( "verifyaccess-fs", From 47a640fc9e1ffcc1b8202ee6a07864ea7d5708cd Mon Sep 17 00:00:00 2001 From: Tim Grein Date: Fri, 8 Dec 2023 10:45:13 +0100 Subject: [PATCH 02/14] [Connectors API] Check the created sync job in the post connector sync job integration tests (#103136) Add verifications of the values of a created sync job to the integration tests. --- .../entsearch/400_connector_sync_job_post.yml | 59 ++++++++++++++++++- 1 file changed, 57 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/400_connector_sync_job_post.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/400_connector_sync_job_post.yml index 055221b917cb1..0403842cb0728 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/400_connector_sync_job_post.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/400_connector_sync_job_post.yml @@ -20,28 +20,83 @@ setup: id: test-connector job_type: full trigger_method: on_demand + - set: { id: id } + + - match: { id: $id } + + - do: + connector_sync_job.get: + connector_sync_job_id: $id + + - match: { connector.id: test-connector} + - match: { job_type: full } + - match: { trigger_method: on_demand } + - match: { status: pending } + - match: { total_document_count: 0 } + - match: { indexed_document_count: 0 } + - match: { indexed_document_volume: 0 } + - match: { deleted_document_count: 0 } - match: { id: $id } + - exists: created_at + - exists: last_seen + --- -'Create connector sync job with missing job type': +'Create connector sync job with missing job type - expect job type full as default': - do: connector_sync_job.post: body: id: test-connector trigger_method: on_demand + - set: { id: id } + - match: { id: $id } + - do: + connector_sync_job.get: + connector_sync_job_id: $id + + - match: { connector.id: test-connector } + - match: { job_type: full } + - match: { trigger_method: on_demand } + - match: { status: pending } + - match: { total_document_count: 0 } + - match: { indexed_document_count: 0 } + - match: { indexed_document_volume: 0 } + - match: { deleted_document_count: 0 } + - match: { id: $id } + - exists: created_at + - exists: last_seen + --- -'Create connector sync job with missing trigger method': +'Create connector sync job with missing trigger method - expect trigger method on_demand as default': - do: connector_sync_job.post: body: id: test-connector job_type: full + - set: { id: id } + + - match: { id: $id } + + - do: + connector_sync_job.get: + connector_sync_job_id: $id + + - match: { connector.id: test-connector } + - match: { job_type: full } + - match: { trigger_method: on_demand } + - match: { status: pending } + - match: { total_document_count: 0 } + - match: { indexed_document_count: 0 } + - match: { indexed_document_volume: 0 } + - match: { deleted_document_count: 0 } - match: { id: $id } + - exists: created_at + - exists: last_seen --- 'Create connector sync job with non-existing connector id': From 39d9ce8f897edaf7da89dda3e43d6b2707f07eff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20Fred=C3=A9n?= <109296772+jfreden@users.noreply.github.com> Date: Fri, 8 Dec 2023 11:11:11 +0100 Subject: [PATCH 03/14] [DOCS] Update SAML guide to reference attribute_delimiters.group (#103102) This is a follow up PR from https://github.com/elastic/elasticsearch/pull/102769. The SAML realm can now be configured to split the `groups` attribute by delimiter, this updates the docs to mention that. --- .../authentication/saml-guide.asciidoc | 27 +++++++------------ .../settings/security-settings.asciidoc | 12 ++++----- 2 files changed, 15 insertions(+), 24 deletions(-) diff --git a/docs/reference/security/authentication/saml-guide.asciidoc b/docs/reference/security/authentication/saml-guide.asciidoc index c0cdd6bc01dc3..cf91e11b7f18f 100644 --- a/docs/reference/security/authentication/saml-guide.asciidoc +++ b/docs/reference/security/authentication/saml-guide.asciidoc @@ -328,24 +328,15 @@ groups:: _(Recommended)_ + [NOTE] ==== -Some IdPs are configured to send the `groups` list as a comma-separated string, -but {es} can't parse this string into an array of groups. To map this SAML -attribute to the `attributes.groups` setting in the {es} realm, a cluster -security administrator can use a wildcard when -<>. While flexible, wildcards are -less accurate and can match on unwanted patterns. Instead, a cluster security -administrator can use a regular expression to create a role mapping rule that -matches only a single group. For example, the following regular expression -matches only on the `elasticsearch-admins` group: - -[source,sh] ----- -/^(.*,)?elasticsearch-admins(,.*)?$/ ----- - -These regular expressions are based on Lucene’s -{ref}/regexp-syntax.html[regular expression syntax], and can match more complex -patterns. All regular expressions must start and end with a forward slash. +Some IdPs are configured to send the `groups` list as a single value, comma-separated +string. To map this SAML attribute to the `attributes.groups` setting in the {es} +realm, you can configure a string delimiter using the `attribute_delimiters.group` +setting. + +For example, splitting the SAML attribute value +`engineering,elasticsearch-admins,employees` on a delimiter value of `,` will +result in `engineering`, `elasticsearch-admins`, and `employees` as the list of +groups for the user. ==== name:: _(Optional)_ The user's full name. diff --git a/docs/reference/settings/security-settings.asciidoc b/docs/reference/settings/security-settings.asciidoc index 5729a31b6c728..f4875fd096b00 100644 --- a/docs/reference/settings/security-settings.asciidoc +++ b/docs/reference/settings/security-settings.asciidoc @@ -1222,7 +1222,7 @@ _Distinguished Name_. `attribute_patterns.principal` {ess-icon}:: (<>) A Java regular expression that is matched against the SAML attribute specified -by `attributes.pattern` before it is applied to the user's _principal_ property. +by `attributes.principal` before it is applied to the user's _principal_ property. The attribute value must match the pattern and the value of the first _capturing group_ is used as the principal. For example, `^([^@]+)@example\\.com$` matches email addresses from the "example.com" domain and uses the local-part as @@ -1257,13 +1257,13 @@ As per `attribute_patterns.principal`, but for the _dn_ property. `attribute_delimiters.groups` {ess-icon}:: (<>) A plain string that is used as a delimiter to split a single-valued SAML -attribute specified by attributes.groups before it is applied to the user's -groups property. For example, splitting the SAML attribute value -engineering,elasticsearch-admins,employees on a delimiter value of , will -result in engineering, elasticsearch-admins, and employees as the list of +attribute specified by `attributes.groups` before it is applied to the user's +_groups_ property. For example, splitting the SAML attribute value +`engineering,elasticsearch-admins,employees` on a delimiter value of `,` will +result in `engineering`, `elasticsearch-admins`, and `employees` as the list of groups for the user. The delimiter will always be split on, regardless of escaping in the input string. This setting does not support multi-valued SAML -attributes. It cannot be used together with the attribute_patterns setting. +attributes. It cannot be used together with the `attribute_patterns` setting. You can only configure this setting for the groups attribute. // end::saml-attributes-delimiters-groups-tag[] From c6f7e2d8689926e5c8743d88943cc55182af7bb1 Mon Sep 17 00:00:00 2001 From: Tim Grein Date: Fri, 8 Dec 2023 11:56:46 +0100 Subject: [PATCH 04/14] Add new IsAfterAssertion for yaml rest tests (#103122) Adds a new assertion "is_after", which can be used in the yaml based rest tests to check, whether one instant comes after the other. --- .../rest-api-spec/test/README.asciidoc | 9 +++ .../test/rest/yaml/Features.java | 3 +- .../yaml/section/ClientYamlTestSuite.java | 11 +++ .../rest/yaml/section/ExecutableSection.java | 1 + .../rest/yaml/section/IsAfterAssertion.java | 69 +++++++++++++++++++ .../rest/yaml/section/AssertionTests.java | 11 +++ .../section/ClientYamlTestSuiteTests.java | 12 ++++ .../yaml/section/IsAfterAssertionTests.java | 63 +++++++++++++++++ 8 files changed, 178 insertions(+), 1 deletion(-) create mode 100644 test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/IsAfterAssertion.java create mode 100644 test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/IsAfterAssertionTests.java diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/README.asciidoc b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/README.asciidoc index 96a1d5d25f0fd..c2baa6746afdb 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/README.asciidoc +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/README.asciidoc @@ -526,6 +526,15 @@ For instance, when testing you may want to base64 encode username and password f Stashed values can be used as described in the `set` section +=== `is_after` + +Used to compare two variables (both need to be of type String, which can be parsed to an Instant) and check, whether +the first one is after the other one. + +.... + - is_after: { result.some_field: 2023-05-25T12:30:00.000Z } +.... + === `is_true` The specified key exists and has a true value (ie not `0`, `false`, `undefined`, `null` diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/Features.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/Features.java index e11e22488c834..8b9aafff5bded 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/Features.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/Features.java @@ -39,7 +39,8 @@ public final class Features { "arbitrary_key", "allowed_warnings", "allowed_warnings_regex", - "close_to" + "close_to", + "is_after" ); private Features() { diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuite.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuite.java index 225bb3a94d7ca..4fee01e71d881 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuite.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuite.java @@ -272,6 +272,17 @@ private static Stream validateExecutableSections( """, section.getLocation().lineNumber())) ); + errors = Stream.concat( + errors, + sections.stream() + .filter(section -> section instanceof IsAfterAssertion) + .filter(section -> false == hasSkipFeature("is_after", testSection, setupSection, teardownSection)) + .map(section -> String.format(Locale.ROOT, """ + attempted to add an [is_after] assertion without a corresponding ["skip": "features": "is_after"] \ + so runners that do not support the [is_after] assertion can skip the test at line [%d]\ + """, section.getLocation().lineNumber())) + ); + return errors; } diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ExecutableSection.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ExecutableSection.java index 64832d47cc7b3..a3fb9870a6ba8 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ExecutableSection.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/ExecutableSection.java @@ -28,6 +28,7 @@ public interface ExecutableSection { new NamedXContentRegistry.Entry(ExecutableSection.class, new ParseField("set"), SetSection::parse), new NamedXContentRegistry.Entry(ExecutableSection.class, new ParseField("transform_and_set"), TransformAndSetSection::parse), new NamedXContentRegistry.Entry(ExecutableSection.class, new ParseField("match"), MatchAssertion::parse), + new NamedXContentRegistry.Entry(ExecutableSection.class, new ParseField("is_after"), IsAfterAssertion::parse), new NamedXContentRegistry.Entry(ExecutableSection.class, new ParseField("is_true"), IsTrueAssertion::parse), new NamedXContentRegistry.Entry(ExecutableSection.class, new ParseField("is_false"), IsFalseAssertion::parse), new NamedXContentRegistry.Entry(ExecutableSection.class, new ParseField("gt"), GreaterThanAssertion::parse), diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/IsAfterAssertion.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/IsAfterAssertion.java new file mode 100644 index 0000000000000..986cae4b28b5e --- /dev/null +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/IsAfterAssertion.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.test.rest.yaml.section; + +import org.elasticsearch.core.Tuple; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; +import org.elasticsearch.xcontent.XContentLocation; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; +import java.time.Instant; +import java.time.format.DateTimeParseException; + +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +/** + * Represents an is after assert section: + * + * - is_after: { result.some_instant: "2023-05-25T12:30:00.000Z" } + * + */ +public class IsAfterAssertion extends Assertion { + + public static IsAfterAssertion parse(XContentParser parser) throws IOException { + XContentLocation location = parser.getTokenLocation(); + Tuple stringObjectTuple = ParserUtils.parseTuple(parser); + return new IsAfterAssertion(location, stringObjectTuple.v1(), stringObjectTuple.v2()); + } + + private static final Logger logger = LogManager.getLogger(IsAfterAssertion.class); + + public IsAfterAssertion(XContentLocation location, String field, Object expectedValue) { + super(location, field, expectedValue); + } + + @Override + protected void doAssert(Object actualValue, Object expectedValue) { + assertNotNull("field [" + getField() + "] is null", actualValue); + assertNotNull("value to test against cannot be null", expectedValue); + + Instant fieldInstant = parseToInstant( + actualValue.toString(), + "field [" + getField() + "] cannot be parsed to " + Instant.class.getSimpleName() + ", got [" + actualValue + "]" + ); + Instant valueInstant = parseToInstant( + expectedValue.toString(), + "value to test against [" + expectedValue + "] cannot be parsed to " + Instant.class.getSimpleName() + ); + + logger.trace("assert that [{}] is after [{}] (field [{}])", fieldInstant, valueInstant); + assertTrue("field [" + getField() + "] should be after [" + actualValue + "], but was not", fieldInstant.isAfter(valueInstant)); + } + + private Instant parseToInstant(String string, String onErrorMessage) { + try { + return Instant.parse(string); + } catch (DateTimeParseException e) { + throw new AssertionError(onErrorMessage, e); + } + } +} diff --git a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/AssertionTests.java b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/AssertionTests.java index 953b5f261485d..2513a7f1e94c1 100644 --- a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/AssertionTests.java +++ b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/AssertionTests.java @@ -69,6 +69,17 @@ public void testParseLength() throws Exception { assertThat((Integer) lengthAssertion.getExpectedValue(), equalTo(22)); } + public void testParseIsAfter() throws Exception { + parser = createParser(YamlXContent.yamlXContent, "{ field: 2021-05-25T12:30:00.000Z}"); + + IsAfterAssertion isAfterAssertion = IsAfterAssertion.parse(parser); + + assertThat(isAfterAssertion, notNullValue()); + assertThat(isAfterAssertion.getField(), equalTo("field")); + assertThat(isAfterAssertion.getExpectedValue(), instanceOf(String.class)); + assertThat(isAfterAssertion.getExpectedValue(), equalTo("2021-05-25T12:30:00.000Z")); + } + public void testParseMatchSimpleIntegerValue() throws Exception { parser = createParser(YamlXContent.yamlXContent, "{ field: 10 }"); diff --git a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java index 5a9de8de37549..3f8cc298c5c36 100644 --- a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java +++ b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java @@ -17,6 +17,7 @@ import java.nio.file.Files; import java.nio.file.Path; +import java.time.Instant; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -715,6 +716,17 @@ public void testAddingCloseToWithSkip() { createTestSuite(skipSection, closeToAssertion).validate(); } + public void testAddingIsAfterWithSkip() { + int lineNumber = between(1, 10000); + SkipSection skipSection = new SkipSection(null, singletonList("is_after"), emptyList(), null); + IsAfterAssertion isAfterAssertion = new IsAfterAssertion( + new XContentLocation(lineNumber, 0), + randomAlphaOfLength(randomIntBetween(3, 30)), + randomInstantBetween(Instant.ofEpochSecond(0L), Instant.ofEpochSecond(3000000000L)) + ); + createTestSuite(skipSection, isAfterAssertion).validate(); + } + private static ClientYamlTestSuite createTestSuite(SkipSection skipSection, ExecutableSection executableSection) { final SetupSection setupSection; final TeardownSection teardownSection; diff --git a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/IsAfterAssertionTests.java b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/IsAfterAssertionTests.java new file mode 100644 index 0000000000000..d810fdc8b66ad --- /dev/null +++ b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/IsAfterAssertionTests.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.test.rest.yaml.section; + +import org.elasticsearch.xcontent.XContentLocation; + +import java.io.IOException; + +public class IsAfterAssertionTests extends AbstractClientYamlTestFragmentParserTestCase { + + public void testParseIsAfterAssertionWithNonInstantValue() { + XContentLocation xContentLocation = new XContentLocation(0, 0); + IsAfterAssertion isAfterAssertion = new IsAfterAssertion(xContentLocation, "some_field", "non instant value"); + + expectThrows(AssertionError.class, () -> isAfterAssertion.doAssert("2022-05-25T12:30:00.000Z", "non instant value")); + } + + public void testIsAfter() { + String field = "some_field"; + + // actual value one year after value to test against + String actualValue = "2022-05-25T12:30:00.000Z"; + String expectedValue = "2021-05-25T12:30:00.000Z"; + + XContentLocation xContentLocation = new XContentLocation(0, 0); + IsAfterAssertion isAfterAssertion = new IsAfterAssertion(xContentLocation, field, expectedValue); + + isAfterAssertion.doAssert(actualValue, expectedValue); + } + + public void testIsNotAfter() { + String field = "some_field"; + + // actual value one year before value to test against + String actualValue = "2020-05-25T12:30:00.000Z"; + String expectedValue = "2021-05-25T12:30:00.000Z"; + + XContentLocation xContentLocation = new XContentLocation(0, 0); + IsAfterAssertion isAfterAssertion = new IsAfterAssertion(xContentLocation, field, expectedValue); + + expectThrows(AssertionError.class, () -> isAfterAssertion.doAssert(actualValue, expectedValue)); + } + + public void testActualValueIsNull() { + XContentLocation xContentLocation = new XContentLocation(0, 0); + IsAfterAssertion isAfterAssertion = new IsAfterAssertion(xContentLocation, "field", "2021-05-25T12:30:00.000Z"); + + expectThrows(AssertionError.class, () -> isAfterAssertion.doAssert(null, "2021-05-25T12:30:00.000Z")); + } + + public void testExpectedValueIsNull() throws IOException { + XContentLocation xContentLocation = new XContentLocation(0, 0); + IsAfterAssertion isAfterAssertion = new IsAfterAssertion(xContentLocation, "field", "2021-05-25T12:30:00.000Z"); + + expectThrows(AssertionError.class, () -> isAfterAssertion.doAssert("2021-05-25T12:30:00.000Z", null)); + } +} From 17811280c20ca51b67400d32dc813d91bcee6ed3 Mon Sep 17 00:00:00 2001 From: Andrei Dan Date: Fri, 8 Dec 2023 13:52:18 +0200 Subject: [PATCH 05/14] [DOCS] DSL downsampling docs (#103148) --- .../lifecycle/apis/put-lifecycle.asciidoc | 36 +++++++++++++++++++ .../data-streams/lifecycle/index.asciidoc | 8 ++++- 2 files changed, 43 insertions(+), 1 deletion(-) diff --git a/docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc b/docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc index 89b8bbeb880c3..53bd3c2b96f0b 100644 --- a/docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc +++ b/docs/reference/data-streams/lifecycle/apis/put-lifecycle.asciidoc @@ -59,6 +59,16 @@ duration the document could be deleted. When empty, every document in this data If defined, it turns data streqm lifecycle on/off (`true`/`false`) for this data stream. A data stream lifecycle that's disabled (`enabled: false`) will have no effect on the data stream. Defaults to `true`. + +`downsampling`:: +(Optional, array) +An optional array of downsampling configuration objects, each defining an `after` +interval representing when the backing index is meant to be downsampled (the time +frame is calculated since the index was rolled over, i.e. generation time) and +a `fixed_interval` representing the downsampling interval (the minimum `fixed_interval` +value is `5m`). A maximum number of 10 downsampling rounds can be configured. +See <> below. + ==== [[data-streams-put-lifecycle-example]] @@ -84,3 +94,29 @@ When the lifecycle is successfully updated in all data streams, you receive the "acknowledged": true } -------------------------------------------------- + +[[data-streams-put-lifecycle-downsampling-example]] +==== {api-examples-title} + +The following example configures two downsampling rounds, the first one starting +one day after the backing index is rolled over (or later, if the index is still +within its write-accepting <>) with an interval +of `10m`, and a second round starting 7 days after rollover at an interval of `1d`: + +[source,console] +-------------------------------------------------------------------- +PUT _data_stream/my-weather-sensor-data-stream/_lifecycle +{ + "downsampling": [ + { + "after": "1d", + "fixed_interval": "10m" + }, + { + "after": "7d", + "fixed_interval": "1d" + } + ] +} +-------------------------------------------------------------------- +//TEST[skip:downsampling requires waiting for indices to be out of time bounds] diff --git a/docs/reference/data-streams/lifecycle/index.asciidoc b/docs/reference/data-streams/lifecycle/index.asciidoc index 6c0220ef0a80f..ef5558817885e 100644 --- a/docs/reference/data-streams/lifecycle/index.asciidoc +++ b/docs/reference/data-streams/lifecycle/index.asciidoc @@ -18,6 +18,10 @@ and backwards incompatible mapping changes. * Configurable retention, which allows you to configure the time period for which your data is guaranteed to be stored. {es} is allowed at a later time to delete data older than this time period. +A data stream lifecycle also supports downsampling the data stream backing indices. +See <> for +more details. + [discrete] [[data-streams-lifecycle-how-it-works]] === How does it work? @@ -35,7 +39,9 @@ into tiers of exponential sizes, merging the long tail of small segments is only fraction of the cost of force mergeing to a single segment. The small segments would usually hold the most recent data so tail mergeing will focus the merging resources on the higher-value data that is most likely to keep being queried. -4. Applies retention to the remaining backing indices. This means deleting the backing indices whose +4. If <> is configured it will execute +all the configured downsampling rounds. +5. Applies retention to the remaining backing indices. This means deleting the backing indices whose `generation_time` is longer than the configured retention period. The `generation_time` is only applicable to rolled over backing indices and it is either the time since the backing index got rolled over, or the time optionally configured in the <> setting. From 5cf8b94f30037f7f4719be2bcd2bce45e513373d Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 8 Dec 2023 12:56:05 +0000 Subject: [PATCH 06/14] Simplify CompositeRolesStore#getRoleDescriptors (#103126) This method has only one production caller, which asserts that the response always has length 1 and (if assertions are disabled) discards all but the first set of role descriptors in the response. We can push that behaviour down into the implementation simplifying things quite considerably and eliminating the allocation and copying needed to support other sizes of response. --- .../ApiKeyUserRoleDescriptorResolver.java | 10 ++---- .../authz/store/CompositeRolesStore.java | 35 ++++++------------- ...ApiKeyUserRoleDescriptorResolverTests.java | 11 +++--- .../authz/store/CompositeRolesStoreTests.java | 14 ++++---- 4 files changed, 24 insertions(+), 46 deletions(-) diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/ApiKeyUserRoleDescriptorResolver.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/ApiKeyUserRoleDescriptorResolver.java index 17c35ecca5f13..9c54f6decb0c8 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/ApiKeyUserRoleDescriptorResolver.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/ApiKeyUserRoleDescriptorResolver.java @@ -15,7 +15,6 @@ import org.elasticsearch.xpack.core.security.authz.support.DLSRoleQueryValidator; import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore; -import java.util.Collection; import java.util.Set; public class ApiKeyUserRoleDescriptorResolver { @@ -37,15 +36,10 @@ public void resolveUserRoleDescriptors(final Authentication authentication, fina return; } - rolesStore.getRoleDescriptorsList(effectiveSubject, listener.delegateFailureAndWrap(this::handleRoleDescriptorsList)); + rolesStore.getRoleDescriptors(effectiveSubject, listener.delegateFailureAndWrap(this::handleRoleDescriptors)); } - private void handleRoleDescriptorsList( - ActionListener> listener, - Collection> roleDescriptorsList - ) { - assert roleDescriptorsList.size() == 1; - final var roleDescriptors = roleDescriptorsList.iterator().next(); + private void handleRoleDescriptors(ActionListener> listener, Set roleDescriptors) { for (RoleDescriptor rd : roleDescriptors) { DLSRoleQueryValidator.validateQueryField(rd.getIndicesPrivileges(), xContentRegistry); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java index ab9ea8f772054..631a988b0f325 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.GroupedActionListener; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.cache.Cache; @@ -349,30 +348,18 @@ private void buildThenMaybeCacheRole( ); } - public void getRoleDescriptorsList(Subject subject, ActionListener>> listener) { - tryGetRoleDescriptorForInternalUser(subject).ifPresentOrElse( - roleDescriptor -> listener.onResponse(List.of(Set.of(roleDescriptor))), - () -> { - final List roleReferences = subject.getRoleReferenceIntersection(anonymousUser).getRoleReferences(); - final GroupedActionListener> groupedActionListener = new GroupedActionListener<>( - roleReferences.size(), - listener - ); + public void getRoleDescriptors(Subject subject, ActionListener> listener) { + tryGetRoleDescriptorForInternalUser(subject).ifPresentOrElse(roleDescriptor -> listener.onResponse(Set.of(roleDescriptor)), () -> { + final List roleReferences = subject.getRoleReferenceIntersection(anonymousUser).getRoleReferences(); + assert roleReferences.size() == 1; // we only handle the singleton case today, but that may change with derived API keys - roleReferences.forEach( - roleReference -> roleReference.resolve( - roleReferenceResolver, - groupedActionListener.delegateFailureAndWrap((delegate, rolesRetrievalResult) -> { - if (rolesRetrievalResult.isSuccess()) { - delegate.onResponse(rolesRetrievalResult.getRoleDescriptors()); - } else { - delegate.onFailure(new ElasticsearchException("role retrieval had one or more failures")); - } - }) - ) - ); - } - ); + ActionListener.run(listener.map(rolesRetrievalResult -> { + if (rolesRetrievalResult.isSuccess() == false) { + throw new ElasticsearchException("role retrieval had one or more failures"); + } + return rolesRetrievalResult.getRoleDescriptors(); + }), l -> roleReferences.iterator().next().resolve(roleReferenceResolver, l)); + }); } // Package private for testing diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/ApiKeyUserRoleDescriptorResolverTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/ApiKeyUserRoleDescriptorResolverTests.java index 2af18f5dca7dc..502d2b7ce0dda 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/ApiKeyUserRoleDescriptorResolverTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/ApiKeyUserRoleDescriptorResolverTests.java @@ -19,8 +19,6 @@ import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore; -import java.util.Collection; -import java.util.List; import java.util.Set; import java.util.stream.Collectors; @@ -56,11 +54,10 @@ public void testGetRoleDescriptors() { assertThat(subject.getType(), is(Subject.Type.USER)); assertThat(Set.of(subject.getUser().roles()), equalTo(userRoleNames)); - ActionListener>> listener = (ActionListener>>) args[args.length - - 1]; - listener.onResponse(List.of(roleDescriptors)); + ActionListener> listener = (ActionListener>) args[args.length - 1]; + listener.onResponse(roleDescriptors); return null; - }).when(rolesStore).getRoleDescriptorsList(any(Subject.class), any(ActionListener.class)); + }).when(rolesStore).getRoleDescriptors(any(Subject.class), any(ActionListener.class)); final PlainActionFuture> future = new PlainActionFuture<>(); resolver.resolveUserRoleDescriptors(authentication, future); @@ -77,6 +74,6 @@ public void testGetRoleDescriptorsEmptyForApiKey() { resolver.resolveUserRoleDescriptors(authentication, future); assertThat(future.actionGet(), equalTo(Set.of())); - verify(rolesStore, never()).getRoleDescriptorsList(any(), any()); + verify(rolesStore, never()).getRoleDescriptors(any(), any()); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java index c0df3a0947c71..4ad7c61d45d63 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java @@ -2760,15 +2760,15 @@ public void testGetRoleDescriptorsListForInternalUsers() { when(subject.getUser()).thenReturn(InternalUsers.SYSTEM_USER); final IllegalArgumentException e1 = expectThrows( IllegalArgumentException.class, - () -> compositeRolesStore.getRoleDescriptorsList(subject, new PlainActionFuture<>()) + () -> compositeRolesStore.getRoleDescriptors(subject, ActionListener.noop()) ); assertThat(e1.getMessage(), equalTo("should never try to get the roles for internal user [" + SystemUser.NAME + "]")); for (var internalUser : AuthenticationTestHelper.internalUsersWithLocalRoleDescriptor()) { when(subject.getUser()).thenReturn(internalUser); - final PlainActionFuture>> future = new PlainActionFuture<>(); - compositeRolesStore.getRoleDescriptorsList(subject, future); - assertThat(future.actionGet(), equalTo(List.of(Set.of(internalUser.getLocalClusterRoleDescriptor().get())))); + final PlainActionFuture> future = new PlainActionFuture<>(); + compositeRolesStore.getRoleDescriptors(subject, future); + assertThat(future.actionGet(), equalTo(Set.of(internalUser.getLocalClusterRoleDescriptor().get()))); } } @@ -2787,9 +2787,9 @@ public void testGetRoleDescriptorsListUsesRoleStoreToResolveRoleWithInternalRole when(subject.getRoleReferenceIntersection(any())).thenReturn( new RoleReferenceIntersection(new RoleReference.NamedRoleReference(new String[] { roleName })) ); - final PlainActionFuture>> future = new PlainActionFuture<>(); - compositeRolesStore.getRoleDescriptorsList(subject, future); - assertThat(future.actionGet(), equalTo(List.of(Set.of(expectedRoleDescriptor)))); + final PlainActionFuture> future = new PlainActionFuture<>(); + compositeRolesStore.getRoleDescriptors(subject, future); + assertThat(future.actionGet(), equalTo(Set.of(expectedRoleDescriptor))); } private void getRoleForRoleNames(CompositeRolesStore rolesStore, Collection roleNames, ActionListener listener) { From 722853e2530e5c8b052cab77298955c959cfe00f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Przemys=C5=82aw=20Witek?= Date: Fri, 8 Dec 2023 14:20:15 +0100 Subject: [PATCH 07/14] [Transform] Unmute TransformSchedulerTests.testScheduleNowWithSystemClock (#103191) --- .../transforms/scheduling/TransformSchedulerTests.java | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformSchedulerTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformSchedulerTests.java index c6c6c2febfdad..7125b4074bc4a 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformSchedulerTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformSchedulerTests.java @@ -327,7 +327,6 @@ public void testSchedulingWithSystemClock() throws Exception { transformScheduler.stop(); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/95445") public void testScheduleNowWithSystemClock() throws Exception { String transformId = "test-schedule-now-with-system-clock"; TimeValue frequency = TimeValue.timeValueHours(1); // Very long pause between checkpoints @@ -343,7 +342,11 @@ public void testScheduleNowWithSystemClock() throws Exception { Thread.sleep(5 * 1000L); transformScheduler.scheduleNow(transformId); - assertThat(events, hasSize(2)); + // If we are unlucky, the scheduleNow call will trigger processing **exactly** in this small window of time in which processing is + // temporarily disallowed (as two concurrent invocations of the "TransformScheduler.processScheduledTasks" method are not allowed). + // Hence, we need to wait for the next processing cycle to happen (it will be TEST_SCHEDULER_FREQUENCY from now). + assertBusy(() -> assertThat(events, hasSize(2)), TEST_SCHEDULER_FREQUENCY.seconds() + 1, TimeUnit.SECONDS); + assertThat(events.get(0).transformId(), is(equalTo(transformId))); assertThat(events.get(1).transformId(), is(equalTo(transformId))); assertThat(events.get(1).scheduledTime() - events.get(0).triggeredTime(), is(allOf(greaterThan(4 * 1000L), lessThan(6 * 1000L)))); From 2e592a3416c23e841d530d789b4d5bddbb47814f Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 8 Dec 2023 13:48:52 +0000 Subject: [PATCH 08/14] More logging for ClusterHealthRestCancellationIT (#103193) Relates #100062 --- .../org/elasticsearch/http/ClusterHealthRestCancellationIT.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/ClusterHealthRestCancellationIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/ClusterHealthRestCancellationIT.java index f165f00c5cc2f..64dd20f1fdfc4 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/ClusterHealthRestCancellationIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/ClusterHealthRestCancellationIT.java @@ -33,6 +33,8 @@ public class ClusterHealthRestCancellationIT extends HttpSmokeTestCase { @TestIssueLogging( issueUrl = "https://github.com/elastic/elasticsearch/issues/100062", value = "org.elasticsearch.test.TaskAssertions:TRACE" + + ",org.elasticsearch.cluster.service.MasterService:TRACE" + + ",org.elasticsearch.tasks.TaskManager:TRACE" ) public void testClusterHealthRestCancellation() throws Exception { From 6e0c031342e8e0c7f57eaa5d0baca0d601b3c9b5 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Fri, 8 Dec 2023 09:24:34 -0500 Subject: [PATCH 09/14] ESQL: Generate railroad diagrams for operators (#103143) This enables the generation of railroad diagrams for unary minus and a bunch of binary operators like `+`, `-`, `%`, and `>=`. Relates to #100558 --- docs/reference/esql/functions/binary.asciidoc | 10 +- .../esql/functions/operators.asciidoc | 2 + .../esql/functions/signature/add.svg | 1 + .../esql/functions/signature/div.svg | 1 + .../esql/functions/signature/equals.svg | 1 + .../esql/functions/signature/greater_than.svg | 1 + .../esql/functions/signature/less_than.svg | 1 + .../esql/functions/signature/mod.svg | 1 + .../esql/functions/signature/mul.svg | 1 + .../esql/functions/signature/neg.svg | 1 + .../esql/functions/signature/not_equals.svg | 1 + .../esql/functions/signature/sub.svg | 1 + docs/reference/esql/functions/unary.asciidoc | 8 ++ .../function/AbstractFunctionTestCase.java | 129 ++++++++++++------ .../expression/function/RailRoadDiagram.java | 85 +++++++++--- 15 files changed, 186 insertions(+), 58 deletions(-) create mode 100644 docs/reference/esql/functions/signature/add.svg create mode 100644 docs/reference/esql/functions/signature/div.svg create mode 100644 docs/reference/esql/functions/signature/equals.svg create mode 100644 docs/reference/esql/functions/signature/greater_than.svg create mode 100644 docs/reference/esql/functions/signature/less_than.svg create mode 100644 docs/reference/esql/functions/signature/mod.svg create mode 100644 docs/reference/esql/functions/signature/mul.svg create mode 100644 docs/reference/esql/functions/signature/neg.svg create mode 100644 docs/reference/esql/functions/signature/not_equals.svg create mode 100644 docs/reference/esql/functions/signature/sub.svg create mode 100644 docs/reference/esql/functions/unary.asciidoc diff --git a/docs/reference/esql/functions/binary.asciidoc b/docs/reference/esql/functions/binary.asciidoc index ba93f57af7ad6..32e97b7316d84 100644 --- a/docs/reference/esql/functions/binary.asciidoc +++ b/docs/reference/esql/functions/binary.asciidoc @@ -9,4 +9,12 @@ These binary comparison operators are supported: * less than: `<` * less than or equal: `<=` * larger than: `>` -* larger than or equal: `>=` \ No newline at end of file +* larger than or equal: `>=` + +And these mathematical operators are supported: + +[.text-center] +image::esql/functions/signature/add.svg[Embedded,opts=inline] + +[.text-center] +image::esql/functions/signature/sub.svg[Embedded,opts=inline] diff --git a/docs/reference/esql/functions/operators.asciidoc b/docs/reference/esql/functions/operators.asciidoc index c236413b5dd7e..1c88fa200ca11 100644 --- a/docs/reference/esql/functions/operators.asciidoc +++ b/docs/reference/esql/functions/operators.asciidoc @@ -9,6 +9,7 @@ Boolean operators for comparing against one or multiple expressions. // tag::op_list[] * <> +* <> * <> * <> * <> @@ -23,6 +24,7 @@ Boolean operators for comparing against one or multiple expressions. // end::op_list[] include::binary.asciidoc[] +include::unary.asciidoc[] include::logical.asciidoc[] include::predicates.asciidoc[] include::cidr_match.asciidoc[] diff --git a/docs/reference/esql/functions/signature/add.svg b/docs/reference/esql/functions/signature/add.svg new file mode 100644 index 0000000000000..10d89efc65f3b --- /dev/null +++ b/docs/reference/esql/functions/signature/add.svg @@ -0,0 +1 @@ +lhs+rhs \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/div.svg b/docs/reference/esql/functions/signature/div.svg new file mode 100644 index 0000000000000..ea061ae6ef625 --- /dev/null +++ b/docs/reference/esql/functions/signature/div.svg @@ -0,0 +1 @@ +lhs/rhs \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/equals.svg b/docs/reference/esql/functions/signature/equals.svg new file mode 100644 index 0000000000000..ade4b1260128f --- /dev/null +++ b/docs/reference/esql/functions/signature/equals.svg @@ -0,0 +1 @@ +lhs==rhs \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/greater_than.svg b/docs/reference/esql/functions/signature/greater_than.svg new file mode 100644 index 0000000000000..f5eb082d14642 --- /dev/null +++ b/docs/reference/esql/functions/signature/greater_than.svg @@ -0,0 +1 @@ +lhs>rhs \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/less_than.svg b/docs/reference/esql/functions/signature/less_than.svg new file mode 100644 index 0000000000000..9858a17450f60 --- /dev/null +++ b/docs/reference/esql/functions/signature/less_than.svg @@ -0,0 +1 @@ +lhs<rhs \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/mod.svg b/docs/reference/esql/functions/signature/mod.svg new file mode 100644 index 0000000000000..20a134a26f232 --- /dev/null +++ b/docs/reference/esql/functions/signature/mod.svg @@ -0,0 +1 @@ +lhs%rhs \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/mul.svg b/docs/reference/esql/functions/signature/mul.svg new file mode 100644 index 0000000000000..b15c488eb874b --- /dev/null +++ b/docs/reference/esql/functions/signature/mul.svg @@ -0,0 +1 @@ +lhs*rhs \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/neg.svg b/docs/reference/esql/functions/signature/neg.svg new file mode 100644 index 0000000000000..6090a85310684 --- /dev/null +++ b/docs/reference/esql/functions/signature/neg.svg @@ -0,0 +1 @@ +-v \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/not_equals.svg b/docs/reference/esql/functions/signature/not_equals.svg new file mode 100644 index 0000000000000..d4808abbac5a5 --- /dev/null +++ b/docs/reference/esql/functions/signature/not_equals.svg @@ -0,0 +1 @@ +lhs!=rhs \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/sub.svg b/docs/reference/esql/functions/signature/sub.svg new file mode 100644 index 0000000000000..8d49ad6b0ac1e --- /dev/null +++ b/docs/reference/esql/functions/signature/sub.svg @@ -0,0 +1 @@ +lhs-rhs \ No newline at end of file diff --git a/docs/reference/esql/functions/unary.asciidoc b/docs/reference/esql/functions/unary.asciidoc new file mode 100644 index 0000000000000..2ee35b6c6256f --- /dev/null +++ b/docs/reference/esql/functions/unary.asciidoc @@ -0,0 +1,8 @@ +[discrete] +[[esql-unary-operators]] +=== Unary operators + +These unary mathematical operators are supported: + +[.text-center] +image::esql/functions/signature/neg.svg[Embedded,opts=inline] diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index 3bac4f1c4b5c0..8b4a126d8a0c7 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -36,6 +36,7 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.conditional.Greatest; import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; import org.elasticsearch.xpack.esql.optimizer.FoldNull; +import org.elasticsearch.xpack.esql.parser.ExpressionBuilder; import org.elasticsearch.xpack.esql.planner.Layout; import org.elasticsearch.xpack.esql.planner.PlannerUtils; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; @@ -158,8 +159,9 @@ protected static Expression deepCopyOfField(String name, DataType type) { /** * Build the expression being tested, for the given source and list of arguments. Test classes need to implement this * to have something to test. + * * @param source the source - * @param args arg list from the test case, should match the length expected + * @param args arg list from the test case, should match the length expected * @return an expression for evaluating the function being tested on the given arguments */ protected abstract Expression build(Source source, List args); @@ -256,8 +258,8 @@ private void testEvaluate(boolean readFloating) { /** * Evaluates a {@link Block} of values, all copied from the input pattern, read directly from the page. *

- * Note that this'll sometimes be a {@link Vector} of values if the - * input pattern contained only a single value. + * Note that this'll sometimes be a {@link Vector} of values if the + * input pattern contained only a single value. *

*/ public final void testEvaluateBlockWithoutNulls() { @@ -267,8 +269,8 @@ public final void testEvaluateBlockWithoutNulls() { /** * Evaluates a {@link Block} of values, all copied from the input pattern, read from an intermediate operator. *

- * Note that this'll sometimes be a {@link Vector} of values if the - * input pattern contained only a single value. + * Note that this'll sometimes be a {@link Vector} of values if the + * input pattern contained only a single value. *

*/ public final void testEvaluateBlockWithoutNullsFloating() { @@ -296,8 +298,8 @@ public final void testEvaluateBlockWithNullsFloating() { * read directly from the {@link Page}, using the * {@link CrankyCircuitBreakerService} which fails randomly. *

- * Note that this'll sometimes be a {@link Vector} of values if the - * input pattern contained only a single value. + * Note that this'll sometimes be a {@link Vector} of values if the + * input pattern contained only a single value. *

*/ public final void testCrankyEvaluateBlockWithoutNulls() { @@ -314,8 +316,8 @@ public final void testCrankyEvaluateBlockWithoutNulls() { * read from an intermediate operator, using the * {@link CrankyCircuitBreakerService} which fails randomly. *

- * Note that this'll sometimes be a {@link Vector} of values if the - * input pattern contained only a single value. + * Note that this'll sometimes be a {@link Vector} of values if the + * input pattern contained only a single value. *

*/ public final void testCrankyEvaluateBlockWithoutNullsFloating() { @@ -544,7 +546,7 @@ public static void testFunctionInfo() { LogManager.getLogger(getTestClass()).info("Skipping function info checks because we're running a portion of the tests"); return; } - FunctionDefinition definition = definition(); + FunctionDefinition definition = definition(functionName()); if (definition == null) { LogManager.getLogger(getTestClass()).info("Skipping function info checks because the function isn't registered"); return; @@ -588,14 +590,15 @@ public static void testFunctionInfo() { /** * Adds cases with {@code null} and asserts that the result is {@code null}. *

- * Note: This won't add more than a single null to any existing test case, - * just to keep the number of test cases from exploding totally. + * Note: This won't add more than a single null to any existing test case, + * just to keep the number of test cases from exploding totally. *

- * @param entirelyNullPreservesType should a test case that only contains parameters - * with the {@code null} type keep it's expected type? - * This is mostly going to be {@code true} - * except for functions that base their type entirely - * on input types like {@link Greatest} or {@link Coalesce}. + * + * @param entirelyNullPreservesType should a test case that only contains parameters + * with the {@code null} type keep it's expected type? + * This is mostly going to be {@code true} + * except for functions that base their type entirely + * on input types like {@link Greatest} or {@link Coalesce}. */ protected static List anyNullIsNull(boolean entirelyNullPreservesType, List testCaseSuppliers) { typesRequired(testCaseSuppliers); @@ -691,8 +694,7 @@ protected static List errorsForCasesWithoutExamples(List, cases will function the same as , * cases. - */ - .filter(types -> types.stream().filter(t -> t == DataTypes.NULL).count() <= 1) + */.filter(types -> types.stream().filter(t -> t == DataTypes.NULL).count() <= 1) .map(types -> typeErrorSupplier(validPerPosition.size() != 1, validPerPosition, types)) .forEach(suppliers::add); return suppliers; @@ -911,30 +913,44 @@ public static void renderSignature() throws IOException { LogManager.getLogger(getTestClass()).info("Skipping rendering signature because we're running a portion of the tests"); return; } - FunctionDefinition definition = definition(); - if (definition == null) { + String rendered = buildSignatureSvg(functionName()); + if (rendered == null) { LogManager.getLogger(getTestClass()).info("Skipping rendering signature because the function isn't registered"); - return; + } else { + LogManager.getLogger(getTestClass()).info("Writing function signature"); + writeToTempDir("signature", rendered, "svg"); } + } - String rendered = RailRoadDiagram.functionSignature(definition); - LogManager.getLogger(getTestClass()).info("Writing function signature"); - writeToTempDir("signature", rendered, "svg"); + private static String buildSignatureSvg(String name) throws IOException { + String binaryOperator = binaryOperator(name); + if (binaryOperator != null) { + return RailRoadDiagram.binaryOperator(binaryOperator); + } + String unaryOperator = unaryOperator(name); + if (unaryOperator != null) { + return RailRoadDiagram.unaryOperator(unaryOperator); + } + FunctionDefinition definition = definition(name); + if (definition != null) { + return RailRoadDiagram.functionSignature(definition); + } + return null; } /** * Unique signatures encountered by this test. *

- * We clear this at the beginning of the test class with - * {@link #clearSignatures} out of paranoia. It is - * shared by many tests, after all. + * We clear this at the beginning of the test class with + * {@link #clearSignatures} out of paranoia. It is + * shared by many tests, after all. *

*

- * After each test method we add the signature it operated on via - * {@link #trackSignature}. Once the test class is done we render - * all the unique signatures to a temp file with {@link #renderTypesTable}. - * We use a temp file because that's all we're allowed to write to. - * Gradle will move the files into the docs after this is done. + * After each test method we add the signature it operated on via + * {@link #trackSignature}. Once the test class is done we render + * all the unique signatures to a temp file with {@link #renderTypesTable}. + * We use a temp file because that's all we're allowed to write to. + * Gradle will move the files into the docs after this is done. *

*/ private static final Map, DataType> signatures = new HashMap<>(); @@ -960,7 +976,8 @@ public static void renderTypesTable() throws IOException { if (System.getProperty("generateDocs") == null) { return; } - FunctionDefinition definition = definition(); + String name = functionName(); // TODO types table for operators + FunctionDefinition definition = definition(name); if (definition == null) { LogManager.getLogger(getTestClass()).info("Skipping rendering types because the function isn't registered"); return; @@ -995,8 +1012,11 @@ public static void renderTypesTable() throws IOException { writeToTempDir("types", rendered, "asciidoc"); } - private static FunctionDefinition definition() { - String name = functionName(); + private static String functionName() { + return StringUtils.camelCaseToUnderscore(getTestClass().getSimpleName().replace("Tests", "")).toLowerCase(Locale.ROOT); + } + + private static FunctionDefinition definition(String name) { EsqlFunctionRegistry registry = new EsqlFunctionRegistry(); if (registry.functionExists(name)) { return registry.resolveFunction(name); @@ -1004,15 +1024,44 @@ private static FunctionDefinition definition() { return null; } - private static String functionName() { - return StringUtils.camelCaseToUnderscore(getTestClass().getSimpleName().replace("Tests", "")).toLowerCase(Locale.ROOT); + /** + * If this test is a for a binary operator return its symbol, otherwise return {@code null}. + * This is functionally the reverse of the combination of + * {@link ExpressionBuilder#visitArithmeticBinary} and {@link ExpressionBuilder#visitComparison}. + */ + private static String binaryOperator(String name) { + return switch (name) { + case "add" -> "+"; + case "div" -> "/"; + case "equals" -> "=="; + case "greater_than" -> ">"; + case "greater_than_or_equal_to" -> ">="; + case "less_than" -> "<"; + case "less_than_or_equal_to" -> "<="; + case "mod" -> "%"; + case "mul" -> "*"; + case "not_equals" -> "!="; + case "sub" -> "-"; + default -> null; + }; + } + + /** + * If this tests is for a unary operator return its symbol, otherwise return {@code null}. + * This is functionally the reverse of {@link ExpressionBuilder#visitArithmeticUnary}. + */ + private static String unaryOperator(String name) { + return switch (name) { + case "neg" -> "-"; + default -> null; + }; } /** * Write some text to a tempdir so we can copy it to the docs later. *

- * We need to write to a tempdir instead of the docs because the tests - * don't have write permission to the docs. + * We need to write to a tempdir instead of the docs because the tests + * don't have write permission to the docs. *

*/ private static void writeToTempDir(String subdir, String str, String extension) throws IOException { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/RailRoadDiagram.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/RailRoadDiagram.java index 65977ea6a174f..d6501568a85ec 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/RailRoadDiagram.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/RailRoadDiagram.java @@ -42,6 +42,10 @@ public class RailRoadDiagram { */ private static final LazyInitializable FONT = new LazyInitializable<>(() -> loadFont().deriveFont(20.0F)); + /** + * Generate a railroad diagram for a function. The output would look like + * {@code FOO(a, b, c)}. + */ static String functionSignature(FunctionDefinition definition) throws IOException { List expressions = new ArrayList<>(); expressions.add(new SpecialSequence(definition.name().toUpperCase(Locale.ROOT))); @@ -61,10 +65,34 @@ static String functionSignature(FunctionDefinition definition) throws IOExceptio } } expressions.add(new Syntax(")")); - net.nextencia.rrdiagram.grammar.model.Expression rr = new Sequence( - expressions.toArray(net.nextencia.rrdiagram.grammar.model.Expression[]::new) - ); - RRDiagram rrDiagram = new GrammarToRRDiagram().convert(new Rule("test", rr)); + return toSvg(new Sequence(expressions.toArray(Expression[]::new))); + } + + /** + * Generate a railroad diagram for binary operator. The output would look like + * {@code lhs + rhs}. + */ + static String binaryOperator(String operator) throws IOException { + List expressions = new ArrayList<>(); + expressions.add(new Literal("lhs")); + expressions.add(new Syntax(operator)); + expressions.add(new Literal("rhs")); + return toSvg(new Sequence(expressions.toArray(Expression[]::new))); + } + + /** + * Generate a railroad diagram for unary operator. The output would look like + * {@code -v}. + */ + static String unaryOperator(String operator) throws IOException { + List expressions = new ArrayList<>(); + expressions.add(new Syntax(operator)); + expressions.add(new Literal("v")); + return toSvg(new Sequence(expressions.toArray(Expression[]::new))); + } + + private static String toSvg(Expression exp) throws IOException { + RRDiagram rrDiagram = new GrammarToRRDiagram().convert(new Rule("", exp)); RRDiagramToSVG toSvg = new RRDiagramToSVG(); toSvg.setSpecialSequenceShape(RRDiagramToSVG.BoxShape.RECTANGLE); @@ -74,20 +102,29 @@ static String functionSignature(FunctionDefinition definition) throws IOExceptio toSvg.setLiteralFont(FONT.getOrCompute()); toSvg.setRuleFont(FONT.getOrCompute()); - /* - * "Tighten" the styles in the SVG so they beat the styles sitting in the - * main page. We need this because we're embedding the SVG into the page. - * We need to embed the SVG into the page so it can get fonts loaded in the - * primary stylesheet. We need to load a font so they images are consistent - * on all clients. - */ - return toSvg.convert(rrDiagram).replace(".c", "#guide .c").replace(".k", "#guide .k").replace(".s", "#guide .s"); + return tightenStyles(toSvg.convert(rrDiagram)); + } + + /** + * "Tighten" the styles in the SVG so they beat the styles sitting in the + * main page. We need this because we're embedding the SVG into the page. + * We need to embed the SVG into the page so it can get fonts loaded in the + * primary stylesheet. We need to load a font so they images are consistent + * on all clients. + */ + private static String tightenStyles(String svg) { + for (String c : new String[] { "c", "k", "s", "j", "l" }) { + svg = svg.replace("." + c, "#guide ." + c); + } + return svg; } /** * Like a literal but with light grey text for a more muted appearance for syntax. */ private static class Syntax extends Literal { + private static final String LITERAL_CLASS = "l"; + private static final String SYNTAX_CLASS = "lsyn"; private static final String LITERAL_TEXT_CLASS = "j"; private static final String SYNTAX_TEXT_CLASS = "syn"; private static final String SYNTAX_GREY = "8D8D8D"; @@ -101,13 +138,20 @@ private Syntax(String text) { @Override protected RRElement toRRElement(GrammarToRRDiagram grammarToRRDiagram) { - // This performs a monumentally rude hack to replace the text color of this element. + /* + * This performs a monumentally rude hack to replace the text color of this element. + * It renders a "literal" element but intercepts the layer that defines it's css class + * and replaces it with our own. + */ return new RRText(RRText.Type.LITERAL, text, null) { @Override protected void toSVG(RRDiagramToSVG rrDiagramToSVG, int xOffset, int yOffset, RRDiagram.SvgContent svgContent) { super.toSVG(rrDiagramToSVG, xOffset, yOffset, new RRDiagram.SvgContent() { @Override public String getDefinedCSSClass(String style) { + if (style.equals(LITERAL_CLASS)) { + return svgContent.getDefinedCSSClass(SYNTAX_CLASS); + } if (style.equals(LITERAL_TEXT_CLASS)) { return svgContent.getDefinedCSSClass(SYNTAX_TEXT_CLASS); } @@ -116,11 +160,18 @@ public String getDefinedCSSClass(String style) { @Override public String setCSSClass(String cssClass, String definition) { - if (false == cssClass.equals(LITERAL_TEXT_CLASS)) { - return svgContent.setCSSClass(cssClass, definition); + if (cssClass.equals(LITERAL_CLASS)) { + svgContent.setCSSClass(cssClass, definition); + return svgContent.setCSSClass(SYNTAX_CLASS, definition); + } + if (cssClass.equals(LITERAL_TEXT_CLASS)) { + svgContent.setCSSClass(cssClass, definition); + return svgContent.setCSSClass( + SYNTAX_TEXT_CLASS, + definition.replace("fill:#000000", "fill:#" + SYNTAX_GREY) + ); } - svgContent.setCSSClass(cssClass, definition); - return svgContent.setCSSClass(SYNTAX_TEXT_CLASS, definition.replace("fill:#000000", "fill:#" + SYNTAX_GREY)); + return svgContent.setCSSClass(cssClass, definition); } @Override From de70fcd4e4b48dd611f33a2c1e13b81cd8a6abf9 Mon Sep 17 00:00:00 2001 From: Andrei Dan Date: Fri, 8 Dec 2023 16:53:28 +0200 Subject: [PATCH 10/14] Create a DSL health indicator as part of the health API (#103130) This adds a health indicator named `data_stream_lifecycle` that will detect data stream backing indices that cannot make progress (stagnating) due to repeatedly error-ing in their lifecycle execution. The output of the indicator looks like this: ``` "data_stream_lifecycle" : { "status" : "yellow", "symptom" : "2 backing indices have repeatedly encountered errors whilst trying to advance in its lifecycle", "details" : { "stagnating_backing_indices_count" : 2, "stagnating_backing_indices" : [ { "index_name" : ".ds-metrics-foo-2023.12.07-000002", "first_occurrence_timestamp" : 1701951305340, "retry_count" : 4 }, { "index_name" : ".ds-metrics-foo-2023.12.07-000001", "first_occurrence_timestamp" : 1701951305340, "retry_count" : 4 } ], "total_backing_indices_in_error" : 2 }, "impacts" : [ { "id" : "elasticsearch:health:dsl:impact:stagnating_backing_index", "severity" : 3, "description" : "Data streams backing indices cannot make progress in their lifecycle. The performance and stability of the indices and/or the cluster could be impacted.", "impact_areas" : [ "deployment_management" ] } ], "diagnosis" : [ { "id" : "elasticsearch:health:dsl:diagnosis:stagnating_dsl_backing_index", "cause" : "Some backing indices are repeatedly encountering errors in their lifecycle execution.", "action" : "Check the current status of the affected indices using the [GET //_lifecycle/explain] API. Please replace the in the API with the actual index name (or the data stream name for a wider overview).", "help_url" : "https://ela.st/dsl-explain", "affected_resources" : { "indices" : [ ".ds-metrics-foo-2023.12.07-000002", ".ds-metrics-foo-2023.12.07-000001" ] } } ] } ``` Documentation will follow in a subsequent PR. --- docs/changelog/103130.yaml | 5 + .../DataStreamLifecycleServiceIT.java | 76 +++++++++++ .../datastreams/DataStreamsPlugin.java | 13 +- ...StreamLifecycleHealthIndicatorService.java | 125 ++++++++++++++++++ ...mLifecycleHealthIndicatorServiceTests.java | 102 ++++++++++++++ 5 files changed, 320 insertions(+), 1 deletion(-) create mode 100644 docs/changelog/103130.yaml create mode 100644 modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthIndicatorService.java create mode 100644 modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthIndicatorServiceTests.java diff --git a/docs/changelog/103130.yaml b/docs/changelog/103130.yaml new file mode 100644 index 0000000000000..3ef56ae84d123 --- /dev/null +++ b/docs/changelog/103130.yaml @@ -0,0 +1,5 @@ +pr: 103130 +summary: Create a DSL health indicator as part of the health API +area: Health +type: feature +issues: [] diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java index d3eaee36f67f7..03bd753e29068 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java @@ -30,6 +30,7 @@ import org.elasticsearch.action.datastreams.lifecycle.ErrorEntry; import org.elasticsearch.action.datastreams.lifecycle.ExplainIndexDataStreamLifecycle; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.cluster.coordination.StableMasterHealthIndicatorService; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStreamAction; @@ -46,6 +47,11 @@ import org.elasticsearch.datastreams.DataStreamsPlugin; import org.elasticsearch.datastreams.lifecycle.action.ExplainDataStreamLifecycleAction; import org.elasticsearch.datastreams.lifecycle.action.PutDataStreamLifecycleAction; +import org.elasticsearch.datastreams.lifecycle.health.DataStreamLifecycleHealthIndicatorService; +import org.elasticsearch.health.Diagnosis; +import org.elasticsearch.health.GetHealthAction; +import org.elasticsearch.health.HealthIndicatorResult; +import org.elasticsearch.health.HealthStatus; import org.elasticsearch.health.node.DataStreamLifecycleHealthInfo; import org.elasticsearch.health.node.DslErrorInfo; import org.elasticsearch.health.node.FetchHealthInfoCacheAction; @@ -76,9 +82,12 @@ import static org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService.DATA_STREAM_MERGE_POLICY_TARGET_FLOOR_SEGMENT_SETTING; import static org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService.ONE_HUNDRED_MB; import static org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService.TARGET_MERGE_FACTOR_VALUE; +import static org.elasticsearch.datastreams.lifecycle.health.DataStreamLifecycleHealthIndicatorService.STAGNATING_BACKING_INDICES_DIAGNOSIS_DEF; +import static org.elasticsearch.datastreams.lifecycle.health.DataStreamLifecycleHealthIndicatorService.STAGNATING_INDEX_IMPACT; import static org.elasticsearch.index.IndexSettings.LIFECYCLE_ORIGINATION_DATE; import static org.elasticsearch.indices.ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -447,6 +456,27 @@ public void testErrorRecordingOnRollover() throws Exception { assertThat(errorInfo.retryCount(), greaterThanOrEqualTo(3)); }); + GetHealthAction.Response healthResponse = client().execute(GetHealthAction.INSTANCE, new GetHealthAction.Request(true, 1000)) + .actionGet(); + HealthIndicatorResult masterIsStableIndicator = healthResponse.findIndicator(StableMasterHealthIndicatorService.NAME); + // if the cluster doesn't have a stable master we'll avoid asserting on the health report API as some indicators will not + // be computed + if (masterIsStableIndicator.status() == HealthStatus.GREEN) { + // the shards capacity indicator is dictating the overall status + assertThat(healthResponse.getStatus(), is(HealthStatus.RED)); + HealthIndicatorResult dslIndicator = healthResponse.findIndicator(DataStreamLifecycleHealthIndicatorService.NAME); + assertThat(dslIndicator.status(), is(HealthStatus.YELLOW)); + assertThat(dslIndicator.impacts(), is(STAGNATING_INDEX_IMPACT)); + assertThat( + dslIndicator.symptom(), + is("A backing index has repeatedly encountered errors whilst trying to advance in its lifecycle") + ); + + Diagnosis diagnosis = dslIndicator.diagnosisList().get(0); + assertThat(diagnosis.definition(), is(STAGNATING_BACKING_INDICES_DIAGNOSIS_DEF)); + assertThat(diagnosis.affectedResources().get(0).getValues(), containsInAnyOrder(writeIndexName)); + } + // let's reset the cluster max shards per node limit to allow rollover to proceed and check the error store is empty updateClusterSettings(Settings.builder().putNull("*")); @@ -476,6 +506,14 @@ public void testErrorRecordingOnRollover() throws Exception { DataStreamLifecycleHealthInfo dslHealthInfoOnHealthNode = healthNodeResponse.getHealthInfo().dslHealthInfo(); assertThat(dslHealthInfoOnHealthNode, is(DataStreamLifecycleHealthInfo.NO_DSL_ERRORS)); }); + + healthResponse = client().execute(GetHealthAction.INSTANCE, new GetHealthAction.Request(true, 1000)).actionGet(); + masterIsStableIndicator = healthResponse.findIndicator(StableMasterHealthIndicatorService.NAME); + // if the cluster doesn't have a stable master we'll avoid asserting on the health report API as some indicators will not + // be computed + if (masterIsStableIndicator.status() == HealthStatus.GREEN) { + assertThat(healthResponse.getStatus(), is(HealthStatus.GREEN)); + } } public void testErrorRecordingOnRetention() throws Exception { @@ -569,6 +607,30 @@ public void testErrorRecordingOnRetention() throws Exception { assertThat(List.of(firstGenerationIndex, secondGenerationIndex).contains(errorInfo.indexName()), is(true)); }); + GetHealthAction.Response healthResponse = client().execute(GetHealthAction.INSTANCE, new GetHealthAction.Request(true, 1000)) + .actionGet(); + HealthIndicatorResult masterIsStableIndicator = healthResponse.findIndicator(StableMasterHealthIndicatorService.NAME); + // if the cluster doesn't have a stable master we'll avoid asserting on the health report API as some indicators will not + // be computed + if (masterIsStableIndicator.status() == HealthStatus.GREEN) { + // the dsl indicator should turn the overall status yellow + assertThat(healthResponse.getStatus(), is(HealthStatus.YELLOW)); + HealthIndicatorResult dslIndicator = healthResponse.findIndicator(DataStreamLifecycleHealthIndicatorService.NAME); + assertThat(dslIndicator.status(), is(HealthStatus.YELLOW)); + assertThat(dslIndicator.impacts(), is(STAGNATING_INDEX_IMPACT)); + assertThat( + dslIndicator.symptom(), + is("2 backing indices have repeatedly encountered errors whilst trying to advance in its lifecycle") + ); + + Diagnosis diagnosis = dslIndicator.diagnosisList().get(0); + assertThat(diagnosis.definition(), is(STAGNATING_BACKING_INDICES_DIAGNOSIS_DEF)); + assertThat( + diagnosis.affectedResources().get(0).getValues(), + containsInAnyOrder(firstGenerationIndex, secondGenerationIndex) + ); + } + // let's mark the index as writeable and make sure it's deleted and the error store is empty updateIndexSettings(Settings.builder().put(READ_ONLY.settingName(), false), firstGenerationIndex); @@ -598,6 +660,20 @@ public void testErrorRecordingOnRetention() throws Exception { DataStreamLifecycleHealthInfo dslHealthInfoOnHealthNode = healthNodeResponse.getHealthInfo().dslHealthInfo(); assertThat(dslHealthInfoOnHealthNode, is(DataStreamLifecycleHealthInfo.NO_DSL_ERRORS)); }); + + healthResponse = client().execute(GetHealthAction.INSTANCE, new GetHealthAction.Request(true, 1000)).actionGet(); + masterIsStableIndicator = healthResponse.findIndicator(StableMasterHealthIndicatorService.NAME); + // if the cluster doesn't have a stable master we'll avoid asserting on the health report API as some indicators will not + // be computed + if (masterIsStableIndicator.status() == HealthStatus.GREEN) { + // the dsl indicator should turn the overall status yellow + assertThat(healthResponse.getStatus(), is(HealthStatus.GREEN)); + HealthIndicatorResult dslIndicator = healthResponse.findIndicator(DataStreamLifecycleHealthIndicatorService.NAME); + assertThat(dslIndicator.status(), is(HealthStatus.GREEN)); + assertThat(dslIndicator.impacts().size(), is(0)); + assertThat(dslIndicator.symptom(), is("Data streams are executing their lifecycles without issues")); + assertThat(dslIndicator.diagnosisList().size(), is(0)); + } } finally { // when the test executes successfully this will not be needed however, otherwise we need to make sure the index is // "delete-able" for test cleanup diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java index 9ac3a1afed5a5..fb93b7d688a74 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java @@ -47,6 +47,7 @@ import org.elasticsearch.datastreams.lifecycle.action.TransportGetDataStreamLifecycleAction; import org.elasticsearch.datastreams.lifecycle.action.TransportGetDataStreamLifecycleStatsAction; import org.elasticsearch.datastreams.lifecycle.action.TransportPutDataStreamLifecycleAction; +import org.elasticsearch.datastreams.lifecycle.health.DataStreamLifecycleHealthIndicatorService; import org.elasticsearch.datastreams.lifecycle.health.DataStreamLifecycleHealthInfoPublisher; import org.elasticsearch.datastreams.lifecycle.rest.RestDataStreamLifecycleStatsAction; import org.elasticsearch.datastreams.lifecycle.rest.RestDeleteDataStreamLifecycleAction; @@ -60,8 +61,10 @@ import org.elasticsearch.datastreams.rest.RestMigrateToDataStreamAction; import org.elasticsearch.datastreams.rest.RestModifyDataStreamsAction; import org.elasticsearch.datastreams.rest.RestPromoteDataStreamAction; +import org.elasticsearch.health.HealthIndicatorService; import org.elasticsearch.index.IndexSettingProvider; import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.HealthPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestHandler; @@ -76,7 +79,7 @@ import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.DATA_STREAM_LIFECYCLE_ORIGIN; -public class DataStreamsPlugin extends Plugin implements ActionPlugin { +public class DataStreamsPlugin extends Plugin implements ActionPlugin, HealthPlugin { public static final Setting TIME_SERIES_POLL_INTERVAL = Setting.timeSetting( "time_series.poll_interval", @@ -112,6 +115,7 @@ public class DataStreamsPlugin extends Plugin implements ActionPlugin { private final SetOnce dataLifecycleInitialisationService = new SetOnce<>(); private final SetOnce dataStreamLifecycleErrorsPublisher = new SetOnce<>(); + private final SetOnce dataStreamLifecycleHealthIndicatorService = new SetOnce<>(); private final Settings settings; public DataStreamsPlugin(Settings settings) { @@ -184,6 +188,8 @@ public Collection createComponents(PluginServices services) { ) ); dataLifecycleInitialisationService.get().init(); + dataStreamLifecycleHealthIndicatorService.set(new DataStreamLifecycleHealthIndicatorService()); + components.add(errorStoreInitialisationService.get()); components.add(dataLifecycleInitialisationService.get()); components.add(dataStreamLifecycleErrorsPublisher.get()); @@ -251,4 +257,9 @@ public void close() throws IOException { throw new ElasticsearchException("unable to close the data stream lifecycle service", e); } } + + @Override + public Collection getHealthIndicatorServices() { + return List.of(dataStreamLifecycleHealthIndicatorService.get()); + } } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthIndicatorService.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthIndicatorService.java new file mode 100644 index 0000000000000..0628bed0f9019 --- /dev/null +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthIndicatorService.java @@ -0,0 +1,125 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.datastreams.lifecycle.health; + +import org.elasticsearch.health.Diagnosis; +import org.elasticsearch.health.HealthIndicatorDetails; +import org.elasticsearch.health.HealthIndicatorImpact; +import org.elasticsearch.health.HealthIndicatorResult; +import org.elasticsearch.health.HealthIndicatorService; +import org.elasticsearch.health.HealthStatus; +import org.elasticsearch.health.ImpactArea; +import org.elasticsearch.health.SimpleHealthIndicatorDetails; +import org.elasticsearch.health.node.DataStreamLifecycleHealthInfo; +import org.elasticsearch.health.node.DslErrorInfo; +import org.elasticsearch.health.node.HealthInfo; + +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; + +import static java.util.stream.Collectors.toList; + +public class DataStreamLifecycleHealthIndicatorService implements HealthIndicatorService { + + public static final String NAME = "data_stream_lifecycle"; + public static final String DSL_EXPLAIN_HELP_URL = "https://ela.st/explain-data-stream-lifecycle"; + + public static final String STAGNATING_BACKING_INDEX_IMPACT_ID = "stagnating_backing_index"; + + public static final List STAGNATING_INDEX_IMPACT = List.of( + new HealthIndicatorImpact( + NAME, + STAGNATING_BACKING_INDEX_IMPACT_ID, + 3, + "Data streams backing indices cannot make progress in their lifecycle. The performance and " + + "stability of the indices and/or the cluster could be impacted.", + List.of(ImpactArea.DEPLOYMENT_MANAGEMENT) + ) + ); + + public static final Diagnosis.Definition STAGNATING_BACKING_INDICES_DIAGNOSIS_DEF = new Diagnosis.Definition( + NAME, + "stagnating_dsl_backing_index", + "Some backing indices are repeatedly encountering errors in their lifecycle execution.", + "Check the current status of the affected indices using the [GET //_lifecycle/explain] API. Please " + + "replace the in the API with the actual index name (or the data stream name for a wider overview).", + DSL_EXPLAIN_HELP_URL + ); + + @Override + public String name() { + return NAME; + } + + @Override + public HealthIndicatorResult calculate(boolean verbose, int maxAffectedResourcesCount, HealthInfo healthInfo) { + DataStreamLifecycleHealthInfo dataStreamLifecycleHealthInfo = healthInfo.dslHealthInfo(); + if (dataStreamLifecycleHealthInfo == null) { + // DSL reports health information on every run, so data will eventually arrive to the health node. In the meantime, let's + // report UNKNOWN health + return createIndicator( + HealthStatus.GREEN, + "No data stream lifecycle health data available yet. Health information will be reported after the first run.", + HealthIndicatorDetails.EMPTY, + List.of(), + List.of() + ); + } + + List stagnatingBackingIndices = dataStreamLifecycleHealthInfo.dslErrorsInfo(); + if (stagnatingBackingIndices.isEmpty()) { + return createIndicator( + HealthStatus.GREEN, + "Data streams are executing their lifecycles without issues", + createDetails(verbose, dataStreamLifecycleHealthInfo), + List.of(), + List.of() + ); + } else { + List affectedIndices = stagnatingBackingIndices.stream() + .map(DslErrorInfo::indexName) + .limit(Math.min(maxAffectedResourcesCount, stagnatingBackingIndices.size())) + .collect(toList()); + return createIndicator( + HealthStatus.YELLOW, + (stagnatingBackingIndices.size() > 1 ? stagnatingBackingIndices.size() + " backing indices have" : "A backing index has") + + " repeatedly encountered errors whilst trying to advance in its lifecycle", + createDetails(verbose, dataStreamLifecycleHealthInfo), + STAGNATING_INDEX_IMPACT, + List.of( + new Diagnosis( + STAGNATING_BACKING_INDICES_DIAGNOSIS_DEF, + List.of(new Diagnosis.Resource(Diagnosis.Resource.Type.INDEX, affectedIndices)) + ) + ) + ); + } + } + + private static HealthIndicatorDetails createDetails(boolean verbose, DataStreamLifecycleHealthInfo dataStreamLifecycleHealthInfo) { + if (verbose == false) { + return HealthIndicatorDetails.EMPTY; + } + + var details = new HashMap(); + details.put("total_backing_indices_in_error", dataStreamLifecycleHealthInfo.totalErrorEntriesCount()); + details.put("stagnating_backing_indices_count", dataStreamLifecycleHealthInfo.dslErrorsInfo().size()); + if (dataStreamLifecycleHealthInfo.dslErrorsInfo().isEmpty() == false) { + details.put("stagnating_backing_indices", dataStreamLifecycleHealthInfo.dslErrorsInfo().stream().map(dslError -> { + LinkedHashMap errorDetails = new LinkedHashMap<>(3, 1L); + errorDetails.put("index_name", dslError.indexName()); + errorDetails.put("first_occurrence_timestamp", dslError.firstOccurrence()); + errorDetails.put("retry_count", dslError.retryCount()); + return errorDetails; + }).toList()); + } + return new SimpleHealthIndicatorDetails(details); + } +} diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthIndicatorServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthIndicatorServiceTests.java new file mode 100644 index 0000000000000..877b463301311 --- /dev/null +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/health/DataStreamLifecycleHealthIndicatorServiceTests.java @@ -0,0 +1,102 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.datastreams.lifecycle.health; + +import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.common.Strings; +import org.elasticsearch.health.Diagnosis; +import org.elasticsearch.health.HealthIndicatorDetails; +import org.elasticsearch.health.HealthIndicatorResult; +import org.elasticsearch.health.HealthStatus; +import org.elasticsearch.health.node.DataStreamLifecycleHealthInfo; +import org.elasticsearch.health.node.DslErrorInfo; +import org.elasticsearch.health.node.HealthInfo; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; + +import java.util.List; +import java.util.Locale; +import java.util.Map; + +import static org.elasticsearch.datastreams.lifecycle.health.DataStreamLifecycleHealthIndicatorService.STAGNATING_BACKING_INDICES_DIAGNOSIS_DEF; +import static org.elasticsearch.datastreams.lifecycle.health.DataStreamLifecycleHealthIndicatorService.STAGNATING_INDEX_IMPACT; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.core.IsNot.not; + +public class DataStreamLifecycleHealthIndicatorServiceTests extends ESTestCase { + + private DataStreamLifecycleHealthIndicatorService service; + + @Before + public void setupService() { + service = new DataStreamLifecycleHealthIndicatorService(); + } + + public void testGreenWhenNoDSLHealthData() { + HealthIndicatorResult result = service.calculate(true, new HealthInfo(Map.of(), null)); + assertThat(result.status(), is(HealthStatus.GREEN)); + assertThat( + result.symptom(), + is("No data stream lifecycle health data available yet. Health information will be reported after the first run.") + ); + assertThat(result.details(), is(HealthIndicatorDetails.EMPTY)); + assertThat(result.impacts(), is(List.of())); + assertThat(result.diagnosisList(), is(List.of())); + } + + public void testGreenWhenEmptyListOfStagnatingIndices() { + HealthIndicatorResult result = service.calculate(true, new HealthInfo(Map.of(), new DataStreamLifecycleHealthInfo(List.of(), 15))); + assertThat(result.status(), is(HealthStatus.GREEN)); + assertThat(result.symptom(), is("Data streams are executing their lifecycles without issues")); + assertThat(result.details(), is(not(HealthIndicatorDetails.EMPTY))); + assertThat(Strings.toString(result.details()), containsString("\"total_backing_indices_in_error\":15")); + assertThat(result.impacts(), is(List.of())); + assertThat(result.diagnosisList(), is(List.of())); + } + + public void testYellowWhenStagnatingIndicesPresent() { + String secondGenerationIndex = DataStream.getDefaultBackingIndexName("foo", 2L); + String firstGenerationIndex = DataStream.getDefaultBackingIndexName("foo", 1L); + HealthIndicatorResult result = service.calculate( + true, + new HealthInfo( + Map.of(), + new DataStreamLifecycleHealthInfo( + List.of(new DslErrorInfo(secondGenerationIndex, 1L, 200), new DslErrorInfo(firstGenerationIndex, 3L, 100)), + 15 + ) + ) + ); + assertThat(result.status(), is(HealthStatus.YELLOW)); + assertThat(result.symptom(), is("2 backing indices have repeatedly encountered errors whilst trying to advance in its lifecycle")); + assertThat(result.details(), is(not(HealthIndicatorDetails.EMPTY))); + String detailsAsString = Strings.toString(result.details()); + assertThat(detailsAsString, containsString("\"total_backing_indices_in_error\":15")); + assertThat(detailsAsString, containsString("\"stagnating_backing_indices_count\":2")); + assertThat( + detailsAsString, + containsString( + String.format( + Locale.ROOT, + "\"index_name\":\"%s\"," + + "\"first_occurrence_timestamp\":1,\"retry_count\":200},{\"index_name\":\"%s\"," + + "\"first_occurrence_timestamp\":3,\"retry_count\":100", + secondGenerationIndex, + firstGenerationIndex + ) + ) + ); + assertThat(result.impacts(), is(STAGNATING_INDEX_IMPACT)); + Diagnosis diagnosis = result.diagnosisList().get(0); + assertThat(diagnosis.definition(), is(STAGNATING_BACKING_INDICES_DIAGNOSIS_DEF)); + assertThat(diagnosis.affectedResources().get(0).getValues(), containsInAnyOrder(secondGenerationIndex, firstGenerationIndex)); + } +} From 7ded90655f4d18e7edcc83e6325849bdd0c53309 Mon Sep 17 00:00:00 2001 From: Nikolaj Volgushev Date: Fri, 8 Dec 2023 16:08:26 +0100 Subject: [PATCH 11/14] Hot-reloadable remote cluster credentials (#102798) This PR enables RCS 2.0 remote clusters to be configured without the need to restart nodes. It works as the follows (assuming both clusters are already running): 1. Get a cross-cluster API key for accessing the _remote_ cluster 2. Add cross-cluster API key to keystores of the _local_ cluster, e.g. ``` echo -n xxx | ./bin/elasticsearch-keystore add cluster.remote.my.credentials -x ``` 3. Call [ReloadSecureSettings API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-reload-secure-settings.html) on the _local_ cluster 4. Configure RCS 2.0 remote cluster should now just work for the _local_ cluster, e.g. ``` PUT /_cluster/settings {"persistent":{"cluster":{"remote":{"my":{"seeds":["127.0.0.1:9443"]}}}}} ``` This PR does **not** include functionality to automatically re-build connections on secure settings reload. I will add this in a follow up PR. The high level technical approach is to maintain a credentials manager class and use this to attach credentials for connections to remote clusters. This [comment](https://github.com/elastic/elasticsearch/pull/102798/files#r1417708553) also provides more context on some lower level details. Relates: https://github.com/elastic/elasticsearch/pull/98120 Relates: ES-6764 --- docs/changelog/102798.yaml | 5 + .../transport/ProxyConnectionStrategy.java | 2 +- .../transport/RemoteClusterConnection.java | 25 +- .../RemoteClusterCredentialsManager.java | 52 +++ .../transport/RemoteClusterService.java | 27 +- .../transport/RemoteConnectionManager.java | 61 +++- .../transport/SniffConnectionStrategy.java | 6 +- .../ProxyConnectionStrategyTests.java | 54 ++- .../RemoteClusterConnectionTests.java | 69 +++- .../RemoteClusterCredentialsManagerTests.java | 43 +++ .../RemoteConnectionManagerTests.java | 32 +- .../RemoteConnectionStrategyTests.java | 18 +- .../SniffConnectionStrategyTests.java | 66 +++- .../ReloadRemoteClusterCredentialsAction.java | 50 +++ .../authz/privilege/SystemPrivilege.java | 4 +- .../xpack/security/operator/Constants.java | 1 + .../ReloadRemoteClusterCredentialsIT.java | 314 ++++++++++++++++++ .../xpack/security/Security.java | 104 ++++-- ...tReloadRemoteClusterCredentialsAction.java | 54 +++ .../RemoteClusterCredentialsResolver.java | 51 --- .../SecurityServerTransportInterceptor.java | 45 +-- .../xpack/security/LocalStateSecurity.java | 14 +- .../xpack/security/SecurityTests.java | 102 ++++++ ...RemoteClusterCredentialsResolverTests.java | 38 --- ...curityServerTransportInterceptorTests.java | 77 ++--- 25 files changed, 1054 insertions(+), 260 deletions(-) create mode 100644 docs/changelog/102798.yaml create mode 100644 server/src/main/java/org/elasticsearch/transport/RemoteClusterCredentialsManager.java create mode 100644 server/src/test/java/org/elasticsearch/transport/RemoteClusterCredentialsManagerTests.java create mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/settings/ReloadRemoteClusterCredentialsAction.java create mode 100644 x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/ReloadRemoteClusterCredentialsIT.java create mode 100644 x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/settings/TransportReloadRemoteClusterCredentialsAction.java delete mode 100644 x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/RemoteClusterCredentialsResolver.java delete mode 100644 x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/RemoteClusterCredentialsResolverTests.java diff --git a/docs/changelog/102798.yaml b/docs/changelog/102798.yaml new file mode 100644 index 0000000000000..986ad99f96a19 --- /dev/null +++ b/docs/changelog/102798.yaml @@ -0,0 +1,5 @@ +pr: 102798 +summary: Hot-reloadable remote cluster credentials +area: Security +type: enhancement +issues: [] diff --git a/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java b/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java index 320b9cfdbf7e6..cfb6f872ce748 100644 --- a/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java +++ b/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java @@ -179,7 +179,7 @@ public class ProxyConnectionStrategy extends RemoteConnectionStrategy { RemoteConnectionManager.wrapConnectionWithRemoteClusterInfo( newConnection, clusterAlias, - actualProfile.getTransportProfile() + connectionManager.getCredentialsManager() ), actualProfile.getHandshakeTimeout(), cn -> true, diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java index a055e4122257f..3c74e46851504 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java @@ -57,15 +57,28 @@ final class RemoteClusterConnection implements Closeable { * @param settings the nodes settings object * @param clusterAlias the configured alias of the cluster to connect to * @param transportService the local nodes transport service - * @param credentialsProtected Whether the remote cluster is protected by a credentials, i.e. it has a credentials configured - * via secure setting. This means the remote cluster uses the new configurable access RCS model - * (as opposed to the basic model). + * @param credentialsManager object to lookup remote cluster credentials by cluster alias. If a cluster is protected by a credential, + * i.e. it has a credential configured via secure setting. + * This means the remote cluster uses the advances RCS model (as opposed to the basic model). */ - RemoteClusterConnection(Settings settings, String clusterAlias, TransportService transportService, boolean credentialsProtected) { + RemoteClusterConnection( + Settings settings, + String clusterAlias, + TransportService transportService, + RemoteClusterCredentialsManager credentialsManager + ) { this.transportService = transportService; this.clusterAlias = clusterAlias; - ConnectionProfile profile = RemoteConnectionStrategy.buildConnectionProfile(clusterAlias, settings, credentialsProtected); - this.remoteConnectionManager = new RemoteConnectionManager(clusterAlias, createConnectionManager(profile, transportService)); + ConnectionProfile profile = RemoteConnectionStrategy.buildConnectionProfile( + clusterAlias, + settings, + credentialsManager.hasCredentials(clusterAlias) + ); + this.remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + credentialsManager, + createConnectionManager(profile, transportService) + ); this.connectionStrategy = RemoteConnectionStrategy.buildStrategy(clusterAlias, transportService, remoteConnectionManager, settings); // we register the transport service here as a listener to make sure we notify handlers on disconnect etc. this.remoteConnectionManager.addListener(transportService); diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterCredentialsManager.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterCredentialsManager.java new file mode 100644 index 0000000000000..32a5e196c3a0b --- /dev/null +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterCredentialsManager.java @@ -0,0 +1,52 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.transport; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Nullable; + +import java.util.Map; + +import static org.elasticsearch.transport.RemoteClusterService.REMOTE_CLUSTER_CREDENTIALS; + +public class RemoteClusterCredentialsManager { + + private static final Logger logger = LogManager.getLogger(RemoteClusterCredentialsManager.class); + + private volatile Map clusterCredentials; + + public RemoteClusterCredentialsManager(Settings settings) { + updateClusterCredentials(settings); + } + + public void updateClusterCredentials(Settings settings) { + clusterCredentials = REMOTE_CLUSTER_CREDENTIALS.getAsMap(settings); + logger.debug( + () -> Strings.format( + "Updated remote cluster credentials for clusters: [%s]", + Strings.collectionToCommaDelimitedString(clusterCredentials.keySet()) + ) + ); + } + + @Nullable + public SecureString resolveCredentials(String clusterAlias) { + return clusterCredentials.get(clusterAlias); + } + + public boolean hasCredentials(String clusterAlias) { + return clusterCredentials.containsKey(clusterAlias); + } + + public static final RemoteClusterCredentialsManager EMPTY = new RemoteClusterCredentialsManager(Settings.EMPTY); +} diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index c38f4b26c665f..6bfbb95cbcfe9 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -147,15 +147,14 @@ public boolean isRemoteClusterServerEnabled() { private final TransportService transportService; private final Map remoteClusters = ConcurrentCollections.newConcurrentMap(); - private final Set credentialsProtectedRemoteClusters; + private final RemoteClusterCredentialsManager remoteClusterCredentialsManager; RemoteClusterService(Settings settings, TransportService transportService) { super(settings); this.enabled = DiscoveryNode.isRemoteClusterClient(settings); this.remoteClusterServerEnabled = REMOTE_CLUSTER_SERVER_ENABLED.get(settings); this.transportService = transportService; - this.credentialsProtectedRemoteClusters = REMOTE_CLUSTER_CREDENTIALS.getAsMap(settings).keySet(); - + this.remoteClusterCredentialsManager = new RemoteClusterCredentialsManager(settings); if (remoteClusterServerEnabled) { registerRemoteClusterHandshakeRequestHandler(transportService); } @@ -305,6 +304,14 @@ private synchronized void updateSkipUnavailable(String clusterAlias, Boolean ski } } + public void updateRemoteClusterCredentials(Settings settings) { + remoteClusterCredentialsManager.updateClusterCredentials(settings); + } + + public RemoteClusterCredentialsManager getRemoteClusterCredentialsManager() { + return remoteClusterCredentialsManager; + } + @Override protected void updateRemoteCluster(String clusterAlias, Settings settings) { CountDownLatch latch = new CountDownLatch(1); @@ -363,12 +370,7 @@ synchronized void updateRemoteCluster( if (remote == null) { // this is a new cluster we have to add a new representation Settings finalSettings = Settings.builder().put(this.settings, false).put(newSettings, false).build(); - remote = new RemoteClusterConnection( - finalSettings, - clusterAlias, - transportService, - credentialsProtectedRemoteClusters.contains(clusterAlias) - ); + remote = new RemoteClusterConnection(finalSettings, clusterAlias, transportService, remoteClusterCredentialsManager); remoteClusters.put(clusterAlias, remote); remote.ensureConnected(listener.map(ignored -> RemoteClusterConnectionStatus.CONNECTED)); } else if (remote.shouldRebuildConnection(newSettings)) { @@ -380,12 +382,7 @@ synchronized void updateRemoteCluster( } remoteClusters.remove(clusterAlias); Settings finalSettings = Settings.builder().put(this.settings, false).put(newSettings, false).build(); - remote = new RemoteClusterConnection( - finalSettings, - clusterAlias, - transportService, - credentialsProtectedRemoteClusters.contains(clusterAlias) - ); + remote = new RemoteClusterConnection(finalSettings, clusterAlias, transportService, remoteClusterCredentialsManager); remoteClusters.put(clusterAlias, remote); remote.ensureConnected(listener.map(ignored -> RemoteClusterConnectionStatus.RECONNECTED)); } else { diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteConnectionManager.java b/server/src/main/java/org/elasticsearch/transport/RemoteConnectionManager.java index b16734b273376..3b531d54fb033 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteConnectionManager.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteConnectionManager.java @@ -12,6 +12,7 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; @@ -25,18 +26,19 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicLong; -import static org.elasticsearch.transport.RemoteClusterPortSettings.REMOTE_CLUSTER_PROFILE; import static org.elasticsearch.transport.RemoteClusterService.REMOTE_CLUSTER_HANDSHAKE_ACTION_NAME; public class RemoteConnectionManager implements ConnectionManager { private final String clusterAlias; + private final RemoteClusterCredentialsManager credentialsManager; private final ConnectionManager delegate; private final AtomicLong counter = new AtomicLong(); private volatile List connectedNodes = Collections.emptyList(); - RemoteConnectionManager(String clusterAlias, ConnectionManager delegate) { + RemoteConnectionManager(String clusterAlias, RemoteClusterCredentialsManager credentialsManager, ConnectionManager delegate) { this.clusterAlias = clusterAlias; + this.credentialsManager = credentialsManager; this.delegate = delegate; this.delegate.addListener(new TransportConnectionListener() { @Override @@ -51,6 +53,10 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti }); } + public RemoteClusterCredentialsManager getCredentialsManager() { + return credentialsManager; + } + /** * Remote cluster connections have a different lifecycle from intra-cluster connections. Use {@link #connectToRemoteClusterNode} * instead of this method. @@ -95,13 +101,7 @@ public void openConnection(DiscoveryNode node, @Nullable ConnectionProfile profi node, profile, listener.delegateFailureAndWrap( - (l, connection) -> l.onResponse( - new InternalRemoteConnection( - connection, - clusterAlias, - profile != null ? profile.getTransportProfile() : getConnectionProfile().getTransportProfile() - ) - ) + (l, connection) -> l.onResponse(wrapConnectionWithRemoteClusterInfo(connection, clusterAlias, credentialsManager)) ) ); } @@ -182,16 +182,35 @@ public void closeNoBlock() { * @return a cluster alias if the connection target a node in the remote cluster, otherwise an empty result */ public static Optional resolveRemoteClusterAlias(Transport.Connection connection) { + return resolveRemoteClusterAliasWithCredentials(connection).map(RemoteClusterAliasWithCredentials::clusterAlias); + } + + public record RemoteClusterAliasWithCredentials(String clusterAlias, @Nullable SecureString credentials) { + @Override + public String toString() { + return "RemoteClusterAliasWithCredentials{clusterAlias='" + clusterAlias + "', credentials='::es_redacted::'}"; + } + } + + /** + * This method returns information (alias and credentials) for remote cluster for the given transport connection. + * Either or both of alias and credentials can be null depending on the connection. + * + * @param connection the transport connection for which to resolve a remote cluster alias + */ + public static Optional resolveRemoteClusterAliasWithCredentials(Transport.Connection connection) { Transport.Connection unwrapped = TransportService.unwrapConnection(connection); if (unwrapped instanceof InternalRemoteConnection remoteConnection) { - return Optional.of(remoteConnection.getClusterAlias()); + return Optional.of( + new RemoteClusterAliasWithCredentials(remoteConnection.getClusterAlias(), remoteConnection.getClusterCredentials()) + ); } return Optional.empty(); } private Transport.Connection getConnectionInternal(DiscoveryNode node) throws NodeNotConnectedException { Transport.Connection connection = delegate.getConnection(node); - return new InternalRemoteConnection(connection, clusterAlias, getConnectionProfile().getTransportProfile()); + return wrapConnectionWithRemoteClusterInfo(connection, clusterAlias, credentialsManager); } private synchronized void addConnectedNode(DiscoveryNode addedNode) { @@ -297,21 +316,27 @@ private static final class InternalRemoteConnection implements Transport.Connect private static final Logger logger = LogManager.getLogger(InternalRemoteConnection.class); private final Transport.Connection connection; private final String clusterAlias; - private final boolean isRemoteClusterProfile; + @Nullable + private final SecureString clusterCredentials; - InternalRemoteConnection(Transport.Connection connection, String clusterAlias, String transportProfile) { + private InternalRemoteConnection(Transport.Connection connection, String clusterAlias, @Nullable SecureString clusterCredentials) { assert false == connection instanceof InternalRemoteConnection : "should not double wrap"; assert false == connection instanceof ProxyConnection : "proxy connection should wrap internal remote connection, not the other way around"; - this.clusterAlias = Objects.requireNonNull(clusterAlias); this.connection = Objects.requireNonNull(connection); - this.isRemoteClusterProfile = REMOTE_CLUSTER_PROFILE.equals(Objects.requireNonNull(transportProfile)); + this.clusterAlias = Objects.requireNonNull(clusterAlias); + this.clusterCredentials = clusterCredentials; } public String getClusterAlias() { return clusterAlias; } + @Nullable + public SecureString getClusterCredentials() { + return clusterCredentials; + } + @Override public DiscoveryNode getNode() { return connection.getNode(); @@ -321,7 +346,7 @@ public DiscoveryNode getNode() { public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException { final String effectiveAction; - if (isRemoteClusterProfile && TransportService.HANDSHAKE_ACTION_NAME.equals(action)) { + if (clusterCredentials != null && TransportService.HANDSHAKE_ACTION_NAME.equals(action)) { logger.trace("sending remote cluster specific handshake to node [{}] of remote cluster [{}]", getNode(), clusterAlias); effectiveAction = REMOTE_CLUSTER_HANDSHAKE_ACTION_NAME; } else { @@ -389,8 +414,8 @@ public boolean hasReferences() { static InternalRemoteConnection wrapConnectionWithRemoteClusterInfo( Transport.Connection connection, String clusterAlias, - String transportProfile + RemoteClusterCredentialsManager credentialsManager ) { - return new InternalRemoteConnection(connection, clusterAlias, transportProfile); + return new InternalRemoteConnection(connection, clusterAlias, credentialsManager.resolveCredentials(clusterAlias)); } } diff --git a/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java b/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java index 0dcad9cf6864c..0f68a58faf463 100644 --- a/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java +++ b/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java @@ -357,7 +357,11 @@ private ConnectionManager.ConnectionValidator getConnectionValidator(DiscoveryNo : "transport profile must be consistent between the connection manager and the actual profile"; transportService.connectionValidator(node) .validate( - RemoteConnectionManager.wrapConnectionWithRemoteClusterInfo(connection, clusterAlias, profile.getTransportProfile()), + RemoteConnectionManager.wrapConnectionWithRemoteClusterInfo( + connection, + clusterAlias, + connectionManager.getCredentialsManager() + ), profile, listener ); diff --git a/server/src/test/java/org/elasticsearch/transport/ProxyConnectionStrategyTests.java b/server/src/test/java/org/elasticsearch/transport/ProxyConnectionStrategyTests.java index ead43d0bac05e..b3c7c5adac95d 100644 --- a/server/src/test/java/org/elasticsearch/transport/ProxyConnectionStrategyTests.java +++ b/server/src/test/java/org/elasticsearch/transport/ProxyConnectionStrategyTests.java @@ -130,7 +130,11 @@ public void testProxyStrategyWillOpenExpectedNumberOfConnectionsToAddress() { ); int numOfConnections = randomIntBetween(4, 8); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); ProxyConnectionStrategy strategy = new ProxyConnectionStrategy( clusterAlias, localService, @@ -188,7 +192,11 @@ public void testProxyStrategyWillOpenNewConnectionsOnDisconnect() throws Excepti AtomicBoolean useAddress1 = new AtomicBoolean(true); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); ProxyConnectionStrategy strategy = new ProxyConnectionStrategy( clusterAlias, localService, @@ -263,7 +271,11 @@ public void testConnectFailsWithIncompatibleNodes() { ); int numOfConnections = randomIntBetween(4, 8); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); ProxyConnectionStrategy strategy = new ProxyConnectionStrategy( clusterAlias, localService, @@ -328,7 +340,11 @@ public void testConnectFailsWithNonRetryableException() { ); int numOfConnections = randomIntBetween(4, 8); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); ProxyConnectionStrategy strategy = new ProxyConnectionStrategy( clusterAlias, localService, @@ -388,7 +404,11 @@ public void testClusterNameValidationPreventConnectingToDifferentClusters() thro AtomicBoolean useAddress1 = new AtomicBoolean(true); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); ProxyConnectionStrategy strategy = new ProxyConnectionStrategy( clusterAlias, localService, @@ -459,7 +479,11 @@ public void testProxyStrategyWillResolveAddressesEachConnect() throws Exception ); int numOfConnections = randomIntBetween(4, 8); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); ProxyConnectionStrategy strategy = new ProxyConnectionStrategy( clusterAlias, localService, @@ -511,7 +535,11 @@ public void onNodeConnected(DiscoveryNode node, Transport.Connection connection) }); try ( - var remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + var remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); var strategy = new ProxyConnectionStrategy( clusterAlias, localService, @@ -554,7 +582,11 @@ public void testProxyStrategyWillNeedToBeRebuiltIfNumOfSocketsOrAddressesOrServe ); int numOfConnections = randomIntBetween(4, 8); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); ProxyConnectionStrategy strategy = new ProxyConnectionStrategy( clusterAlias, localService, @@ -672,7 +704,11 @@ public void testServerNameAttributes() { ); int numOfConnections = randomIntBetween(4, 8); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); ProxyConnectionStrategy strategy = new ProxyConnectionStrategy( clusterAlias, localService, diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java index d4f03f1027838..cbe15cc9664f4 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java @@ -62,6 +62,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.Objects; import java.util.concurrent.BrokenBarrierException; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; @@ -252,7 +253,14 @@ public void run() { AtomicReference exceptionReference = new AtomicReference<>(); String clusterAlias = "test-cluster"; Settings settings = buildRandomSettings(clusterAlias, addresses(seedNode)); - try (RemoteClusterConnection connection = new RemoteClusterConnection(settings, clusterAlias, service, randomBoolean())) { + try ( + RemoteClusterConnection connection = new RemoteClusterConnection( + settings, + clusterAlias, + service, + randomFrom(RemoteClusterCredentialsManager.EMPTY, buildCredentialsManager(clusterAlias)) + ) + ) { ActionListener listener = ActionListener.wrap(x -> { listenerCalled.countDown(); fail("expected exception"); @@ -322,7 +330,14 @@ public void testCloseWhileConcurrentlyConnecting() throws IOException, Interrupt service.acceptIncomingRequests(); String clusterAlias = "test-cluster"; Settings settings = buildRandomSettings(clusterAlias, seedNodes); - try (RemoteClusterConnection connection = new RemoteClusterConnection(settings, clusterAlias, service, false)) { + try ( + RemoteClusterConnection connection = new RemoteClusterConnection( + settings, + clusterAlias, + service, + RemoteClusterCredentialsManager.EMPTY + ) + ) { int numThreads = randomIntBetween(4, 10); Thread[] threads = new Thread[numThreads]; CyclicBarrier barrier = new CyclicBarrier(numThreads + 1); @@ -470,7 +485,12 @@ private void doTestGetConnectionInfo(boolean hasClusterCredentials) throws Excep settings = Settings.builder().put(settings).setSecureSettings(secureSettings).build(); } try ( - RemoteClusterConnection connection = new RemoteClusterConnection(settings, clusterAlias, service, hasClusterCredentials) + RemoteClusterConnection connection = new RemoteClusterConnection( + settings, + clusterAlias, + service, + hasClusterCredentials ? buildCredentialsManager(clusterAlias) : RemoteClusterCredentialsManager.EMPTY + ) ) { // test no nodes connected RemoteConnectionInfo remoteConnectionInfo = assertSerialization(connection.getConnectionInfo()); @@ -662,7 +682,12 @@ private void doTestCollectNodes(boolean hasClusterCredentials) throws Exception } try ( - RemoteClusterConnection connection = new RemoteClusterConnection(settings, clusterAlias, service, hasClusterCredentials) + RemoteClusterConnection connection = new RemoteClusterConnection( + settings, + clusterAlias, + service, + hasClusterCredentials ? buildCredentialsManager(clusterAlias) : RemoteClusterCredentialsManager.EMPTY + ) ) { CountDownLatch responseLatch = new CountDownLatch(1); AtomicReference> reference = new AtomicReference<>(); @@ -713,7 +738,14 @@ public void testNoChannelsExceptREG() throws Exception { String clusterAlias = "test-cluster"; Settings settings = buildRandomSettings(clusterAlias, addresses(seedNode)); - try (RemoteClusterConnection connection = new RemoteClusterConnection(settings, clusterAlias, service, false)) { + try ( + RemoteClusterConnection connection = new RemoteClusterConnection( + settings, + clusterAlias, + service, + RemoteClusterCredentialsManager.EMPTY + ) + ) { PlainActionFuture plainActionFuture = new PlainActionFuture<>(); connection.ensureConnected(plainActionFuture); plainActionFuture.get(10, TimeUnit.SECONDS); @@ -779,7 +811,14 @@ public void testConnectedNodesConcurrentAccess() throws IOException, Interrupted String clusterAlias = "test-cluster"; Settings settings = buildRandomSettings(clusterAlias, seedNodes); - try (RemoteClusterConnection connection = new RemoteClusterConnection(settings, clusterAlias, service, randomBoolean())) { + try ( + RemoteClusterConnection connection = new RemoteClusterConnection( + settings, + clusterAlias, + service, + randomFrom(RemoteClusterCredentialsManager.EMPTY, buildCredentialsManager(clusterAlias)) + ) + ) { final int numGetThreads = randomIntBetween(4, 10); final Thread[] getThreads = new Thread[numGetThreads]; final int numModifyingThreads = randomIntBetween(4, 10); @@ -873,7 +912,14 @@ public void testGetConnection() throws Exception { service.acceptIncomingRequests(); String clusterAlias = "test-cluster"; Settings settings = buildRandomSettings(clusterAlias, addresses(seedNode)); - try (RemoteClusterConnection connection = new RemoteClusterConnection(settings, clusterAlias, service, false)) { + try ( + RemoteClusterConnection connection = new RemoteClusterConnection( + settings, + clusterAlias, + service, + RemoteClusterCredentialsManager.EMPTY + ) + ) { PlainActionFuture.get(fut -> connection.ensureConnected(fut.map(x -> null))); for (int i = 0; i < 10; i++) { // always a direct connection as the remote node is already connected @@ -921,4 +967,13 @@ private static Settings buildSniffSettings(String clusterAlias, List see ); return builder.build(); } + + private static RemoteClusterCredentialsManager buildCredentialsManager(String clusterAlias) { + Objects.requireNonNull(clusterAlias); + final Settings.Builder builder = Settings.builder(); + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("cluster.remote." + clusterAlias + ".credentials", randomAlphaOfLength(20)); + builder.setSecureSettings(secureSettings); + return new RemoteClusterCredentialsManager(builder.build()); + } } diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterCredentialsManagerTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterCredentialsManagerTests.java new file mode 100644 index 0000000000000..f02148a40e47e --- /dev/null +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterCredentialsManagerTests.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +public class RemoteClusterCredentialsManagerTests extends ESTestCase { + public void testResolveRemoteClusterCredentials() { + final String clusterAlias = randomAlphaOfLength(9); + final String otherClusterAlias = randomAlphaOfLength(10); + + final String secret = randomAlphaOfLength(20); + final Settings settings = buildSettingsWithCredentials(clusterAlias, secret); + RemoteClusterCredentialsManager credentialsManager = new RemoteClusterCredentialsManager(settings); + assertThat(credentialsManager.resolveCredentials(clusterAlias).toString(), equalTo(secret)); + assertThat(credentialsManager.hasCredentials(otherClusterAlias), is(false)); + + final String updatedSecret = randomAlphaOfLength(21); + credentialsManager.updateClusterCredentials(buildSettingsWithCredentials(clusterAlias, updatedSecret)); + assertThat(credentialsManager.resolveCredentials(clusterAlias).toString(), equalTo(updatedSecret)); + + credentialsManager.updateClusterCredentials(Settings.EMPTY); + assertThat(credentialsManager.hasCredentials(clusterAlias), is(false)); + } + + private Settings buildSettingsWithCredentials(String clusterAlias, String secret) { + final Settings.Builder builder = Settings.builder(); + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("cluster.remote." + clusterAlias + ".credentials", secret); + return builder.setSecureSettings(secureSettings).build(); + } +} diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteConnectionManagerTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteConnectionManagerTests.java index 839138d3c7c34..b1ffda669e6a1 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteConnectionManagerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteConnectionManagerTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -23,17 +24,20 @@ import java.io.IOException; import java.net.InetAddress; import java.util.HashSet; +import java.util.Optional; import java.util.Set; import java.util.concurrent.ExecutionException; import static org.elasticsearch.transport.RemoteClusterService.REMOTE_CLUSTER_HANDSHAKE_ACTION_NAME; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; import static org.hamcrest.core.IsInstanceOf.instanceOf; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class RemoteConnectionManagerTests extends ESTestCase { @@ -49,6 +53,7 @@ public void setUp() throws Exception { transport = mock(Transport.class); remoteConnectionManager = new RemoteConnectionManager( "remote-cluster", + RemoteClusterCredentialsManager.EMPTY, new ClusterConnectionManager(Settings.EMPTY, transport, new ThreadContext(Settings.EMPTY)) ); @@ -120,10 +125,13 @@ public void testResolveRemoteClusterAlias() throws ExecutionException, Interrupt public void testRewriteHandshakeAction() throws IOException { final Transport.Connection connection = mock(Transport.Connection.class); + final String clusterAlias = randomAlphaOfLengthBetween(3, 8); + final RemoteClusterCredentialsManager credentialsResolver = mock(RemoteClusterCredentialsManager.class); + when(credentialsResolver.resolveCredentials(clusterAlias)).thenReturn(new SecureString(randomAlphaOfLength(42))); final Transport.Connection wrappedConnection = RemoteConnectionManager.wrapConnectionWithRemoteClusterInfo( connection, - randomAlphaOfLengthBetween(3, 8), - RemoteClusterPortSettings.REMOTE_CLUSTER_PROFILE + clusterAlias, + credentialsResolver ); final long requestId = randomLong(); final TransportRequest request = mock(TransportRequest.class); @@ -142,6 +150,26 @@ public void testRewriteHandshakeAction() throws IOException { verify(connection).sendRequest(requestId, anotherAction, request, options); } + public void testWrapAndResolveConnectionRoundTrip() { + final Transport.Connection connection = mock(Transport.Connection.class); + final String clusterAlias = randomAlphaOfLengthBetween(3, 8); + final RemoteClusterCredentialsManager credentialsResolver = mock(RemoteClusterCredentialsManager.class); + final SecureString credentials = new SecureString(randomAlphaOfLength(42)); + // second credential will never be resolved + when(credentialsResolver.resolveCredentials(clusterAlias)).thenReturn(credentials, (SecureString) null); + final Transport.Connection wrappedConnection = RemoteConnectionManager.wrapConnectionWithRemoteClusterInfo( + connection, + clusterAlias, + credentialsResolver + ); + + final Optional actual = RemoteConnectionManager + .resolveRemoteClusterAliasWithCredentials(wrappedConnection); + + assertThat(actual.isPresent(), is(true)); + assertThat(actual.get(), equalTo(new RemoteConnectionManager.RemoteClusterAliasWithCredentials(clusterAlias, credentials))); + } + private static class TestRemoteConnection extends CloseableConnection { private final DiscoveryNode node; diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteConnectionStrategyTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteConnectionStrategyTests.java index 5d461e906a266..ca9986ba5eb1f 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteConnectionStrategyTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteConnectionStrategyTests.java @@ -26,7 +26,11 @@ public void testStrategyChangeMeansThatStrategyMustBeRebuilt() { mock(Transport.class), threadContext ); - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager("cluster-alias", connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + "cluster-alias", + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); FakeConnectionStrategy first = new FakeConnectionStrategy( "cluster-alias", mock(TransportService.class), @@ -46,7 +50,11 @@ public void testSameStrategyChangeMeansThatStrategyDoesNotNeedToBeRebuilt() { mock(Transport.class), threadContext ); - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager("cluster-alias", connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + "cluster-alias", + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); FakeConnectionStrategy first = new FakeConnectionStrategy( "cluster-alias", mock(TransportService.class), @@ -69,7 +77,11 @@ public void testChangeInConnectionProfileMeansTheStrategyMustBeRebuilt() { assertEquals(TimeValue.MINUS_ONE, connectionManager.getConnectionProfile().getPingInterval()); assertEquals(Compression.Enabled.INDEXING_DATA, connectionManager.getConnectionProfile().getCompressionEnabled()); assertEquals(Compression.Scheme.LZ4, connectionManager.getConnectionProfile().getCompressionScheme()); - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager("cluster-alias", connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + "cluster-alias", + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); FakeConnectionStrategy first = new FakeConnectionStrategy( "cluster-alias", mock(TransportService.class), diff --git a/server/src/test/java/org/elasticsearch/transport/SniffConnectionStrategyTests.java b/server/src/test/java/org/elasticsearch/transport/SniffConnectionStrategyTests.java index 3c955258d45c8..ddee1ff4d690a 100644 --- a/server/src/test/java/org/elasticsearch/transport/SniffConnectionStrategyTests.java +++ b/server/src/test/java/org/elasticsearch/transport/SniffConnectionStrategyTests.java @@ -192,7 +192,11 @@ public void testSniffStrategyWillConnectToAndDiscoverNodes() { threadPool.getThreadContext() ); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + hasClusterCredentials ? new RemoteClusterCredentialsManager(clientSettings) : RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); SniffConnectionStrategy strategy = new SniffConnectionStrategy( clusterAlias, localService, @@ -262,7 +266,11 @@ public void testSniffStrategyWillResolveDiscoveryNodesEachConnect() throws Excep threadPool.getThreadContext() ); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); SniffConnectionStrategy strategy = new SniffConnectionStrategy( clusterAlias, localService, @@ -336,7 +344,11 @@ public void testSniffStrategyWillConnectToMaxAllowedNodesAndOpenNewConnectionsOn threadPool.getThreadContext() ); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); SniffConnectionStrategy strategy = new SniffConnectionStrategy( clusterAlias, localService, @@ -424,7 +436,11 @@ public void testDiscoverWithSingleIncompatibleSeedNode() { threadPool.getThreadContext() ); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); SniffConnectionStrategy strategy = new SniffConnectionStrategy( clusterAlias, localService, @@ -486,7 +502,11 @@ public void testConnectFailsWithIncompatibleNodes() { threadPool.getThreadContext() ); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); SniffConnectionStrategy strategy = new SniffConnectionStrategy( clusterAlias, localService, @@ -549,7 +569,11 @@ public void testFilterNodesWithNodePredicate() { threadPool.getThreadContext() ); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); SniffConnectionStrategy strategy = new SniffConnectionStrategy( clusterAlias, localService, @@ -617,7 +641,11 @@ public void testConnectFailsIfNoConnectionsOpened() { threadPool.getThreadContext() ); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); SniffConnectionStrategy strategy = new SniffConnectionStrategy( clusterAlias, localService, @@ -694,7 +722,11 @@ public void testClusterNameValidationPreventConnectingToDifferentClusters() thro threadPool.getThreadContext() ); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); SniffConnectionStrategy strategy = new SniffConnectionStrategy( clusterAlias, localService, @@ -783,7 +815,11 @@ public void testMultipleCallsToConnectEnsuresConnection() { threadPool.getThreadContext() ); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); SniffConnectionStrategy strategy = new SniffConnectionStrategy( clusterAlias, localService, @@ -895,7 +931,11 @@ public void testConfiguredProxyAddressModeWillReplaceNodeAddress() { threadPool.getThreadContext() ); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); SniffConnectionStrategy strategy = new SniffConnectionStrategy( clusterAlias, localService, @@ -964,7 +1004,11 @@ public void testSniffStrategyWillNeedToBeRebuiltIfNumOfConnectionsOrSeedsOrProxy threadPool.getThreadContext() ); try ( - RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager(clusterAlias, connectionManager); + RemoteConnectionManager remoteConnectionManager = new RemoteConnectionManager( + clusterAlias, + RemoteClusterCredentialsManager.EMPTY, + connectionManager + ); SniffConnectionStrategy strategy = new SniffConnectionStrategy( clusterAlias, localService, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/settings/ReloadRemoteClusterCredentialsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/settings/ReloadRemoteClusterCredentialsAction.java new file mode 100644 index 0000000000000..27b9460dd67cb --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/settings/ReloadRemoteClusterCredentialsAction.java @@ -0,0 +1,50 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.core.security.action.settings; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.settings.Settings; + +import java.io.IOException; + +public class ReloadRemoteClusterCredentialsAction extends ActionType { + public static final String NAME = "cluster:admin/xpack/security/remote_cluster_credentials/reload"; + public static final ReloadRemoteClusterCredentialsAction INSTANCE = new ReloadRemoteClusterCredentialsAction(); + + private ReloadRemoteClusterCredentialsAction() { + super(NAME, Writeable.Reader.localOnly()); + } + + public static class Request extends ActionRequest { + private final Settings settings; + + public Request(Settings settings) { + this.settings = settings; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + public Settings getSettings() { + return settings; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + TransportAction.localOnly(); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/SystemPrivilege.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/SystemPrivilege.java index bc42632507256..4d24a757537e2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/SystemPrivilege.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/SystemPrivilege.java @@ -12,6 +12,7 @@ import org.elasticsearch.index.seqno.RetentionLeaseSyncAction; import org.elasticsearch.persistent.CompletionPersistentTaskAction; import org.elasticsearch.transport.TransportActionProxy; +import org.elasticsearch.xpack.core.security.action.settings.ReloadRemoteClusterCredentialsAction; import org.elasticsearch.xpack.core.security.support.StringMatcher; import java.util.Collections; @@ -43,7 +44,8 @@ public final class SystemPrivilege extends Privilege { "indices:data/read/*", // needed for SystemIndexMigrator "indices:admin/refresh", // needed for SystemIndexMigrator "indices:admin/aliases", // needed for SystemIndexMigrator - TransportSearchShardsAction.TYPE.name() // added so this API can be called with the system user by other APIs + TransportSearchShardsAction.TYPE.name(), // added so this API can be called with the system user by other APIs + ReloadRemoteClusterCredentialsAction.NAME // needed for Security plugin reload of remote cluster credentials ); private static final Predicate PREDICATE = (action) -> { diff --git a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java index 6e78eb2fb5b83..ea27eb9406d09 100644 --- a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java +++ b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java @@ -255,6 +255,7 @@ public class Constants { "cluster:admin/xpack/security/profile/suggest", "cluster:admin/xpack/security/profile/set_enabled", "cluster:admin/xpack/security/realm/cache/clear", + "cluster:admin/xpack/security/remote_cluster_credentials/reload", "cluster:admin/xpack/security/role/delete", "cluster:admin/xpack/security/role/get", "cluster:admin/xpack/security/role/put", diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/ReloadRemoteClusterCredentialsIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/ReloadRemoteClusterCredentialsIT.java new file mode 100644 index 0000000000000..6042d0072270d --- /dev/null +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/ReloadRemoteClusterCredentialsIT.java @@ -0,0 +1,314 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security; + +import org.apache.lucene.search.TotalHits; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.node.reload.NodesReloadSecureSettingsResponse; +import org.elasticsearch.action.admin.cluster.remote.RemoteClusterNodesAction; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchShardsRequest; +import org.elasticsearch.action.search.SearchShardsResponse; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.action.search.TransportSearchAction; +import org.elasticsearch.action.search.TransportSearchShardsAction; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.VersionInformation; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.KeyStoreWrapper; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.env.Environment; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.test.SecuritySingleNodeTestCase; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.RemoteClusterCredentialsManager; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.security.authc.ApiKeyService; +import org.elasticsearch.xpack.security.authc.CrossClusterAccessHeaders; +import org.junit.BeforeClass; + +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + +public class ReloadRemoteClusterCredentialsIT extends SecuritySingleNodeTestCase { + private static final String CLUSTER_ALIAS = "my_remote_cluster"; + + @BeforeClass + public static void disableInFips() { + assumeFalse( + "Cannot run in FIPS mode since the keystore will be password protected and sending a password in the reload" + + "settings api call, require TLS to be configured for the transport layer", + inFipsJvm() + ); + } + + @Override + public String configRoles() { + return org.elasticsearch.core.Strings.format(""" + user: + cluster: [ "ALL" ] + indices: + - names: '*' + privileges: [ "ALL" ] + remote_indices: + - names: '*' + privileges: [ "ALL" ] + clusters: ["*"] + """); + } + + @Override + public void tearDown() throws Exception { + try { + clearRemoteCluster(); + super.tearDown(); + } finally { + ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); + } + } + + private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); + + public void testReloadRemoteClusterCredentials() throws Exception { + final String credentials = randomAlphaOfLength(42); + writeCredentialsToKeyStore(credentials); + final RemoteClusterCredentialsManager clusterCredentialsManager = getInstanceFromNode(TransportService.class) + .getRemoteClusterService() + .getRemoteClusterCredentialsManager(); + // Until we reload, credentials written to keystore are not loaded into the credentials manager + assertThat(clusterCredentialsManager.hasCredentials(CLUSTER_ALIAS), is(false)); + reloadSecureSettings(); + assertThat(clusterCredentialsManager.resolveCredentials(CLUSTER_ALIAS), equalTo(credentials)); + + // Check that credentials get used for a remote connection, once we configure it + final BlockingQueue> capturedHeaders = ConcurrentCollections.newBlockingQueue(); + try (MockTransportService remoteTransport = startTransport("remoteNodeA", threadPool, capturedHeaders)) { + final TransportAddress remoteAddress = remoteTransport.getOriginalTransport() + .profileBoundAddresses() + .get("_remote_cluster") + .publishAddress(); + + configureRemoteCluster(remoteAddress); + + // Run search to trigger header capturing on the receiving side + client().search(new SearchRequest(CLUSTER_ALIAS + ":index-a")).get(); + + assertHeadersContainCredentialsThenClear(credentials, capturedHeaders); + + // Update credentials and ensure they are used + final String updatedCredentials = randomAlphaOfLength(41); + writeCredentialsToKeyStore(updatedCredentials); + reloadSecureSettings(); + + client().search(new SearchRequest(CLUSTER_ALIAS + ":index-a")).get(); + + assertHeadersContainCredentialsThenClear(updatedCredentials, capturedHeaders); + } + } + + private void assertHeadersContainCredentialsThenClear(String credentials, BlockingQueue> capturedHeaders) { + assertThat(capturedHeaders, is(not(empty()))); + for (Map actualHeaders : capturedHeaders) { + assertThat(actualHeaders, hasKey(CrossClusterAccessHeaders.CROSS_CLUSTER_ACCESS_CREDENTIALS_HEADER_KEY)); + assertThat( + actualHeaders.get(CrossClusterAccessHeaders.CROSS_CLUSTER_ACCESS_CREDENTIALS_HEADER_KEY), + equalTo(ApiKeyService.withApiKeyPrefix(credentials)) + ); + } + capturedHeaders.clear(); + assertThat(capturedHeaders, is(empty())); + } + + private void clearRemoteCluster() throws InterruptedException, ExecutionException { + final var builder = Settings.builder() + .putNull("cluster.remote." + CLUSTER_ALIAS + ".mode") + .putNull("cluster.remote." + CLUSTER_ALIAS + ".seeds") + .putNull("cluster.remote." + CLUSTER_ALIAS + ".proxy_address"); + clusterAdmin().updateSettings(new ClusterUpdateSettingsRequest().persistentSettings(builder)).get(); + } + + @Override + protected Settings nodeSettings() { + return Settings.builder().put(super.nodeSettings()).put("xpack.security.remote_cluster_client.ssl.enabled", false).build(); + } + + private void configureRemoteCluster(TransportAddress remoteAddress) throws InterruptedException, ExecutionException { + final Settings.Builder builder = Settings.builder(); + if (randomBoolean()) { + builder.put("cluster.remote." + CLUSTER_ALIAS + ".mode", "sniff") + .put("cluster.remote." + CLUSTER_ALIAS + ".seeds", remoteAddress.toString()) + .putNull("cluster.remote." + CLUSTER_ALIAS + ".proxy_address"); + } else { + builder.put("cluster.remote." + CLUSTER_ALIAS + ".mode", "proxy") + .put("cluster.remote." + CLUSTER_ALIAS + ".proxy_address", remoteAddress.toString()) + .putNull("cluster.remote." + CLUSTER_ALIAS + ".seeds"); + } + clusterAdmin().updateSettings(new ClusterUpdateSettingsRequest().persistentSettings(builder)).get(); + } + + private void writeCredentialsToKeyStore(String credentials) throws Exception { + final Environment environment = getInstanceFromNode(Environment.class); + final KeyStoreWrapper keyStoreWrapper = KeyStoreWrapper.create(); + keyStoreWrapper.setString("cluster.remote." + CLUSTER_ALIAS + ".credentials", credentials.toCharArray()); + keyStoreWrapper.save(environment.configFile(), new char[0], false); + } + + public static MockTransportService startTransport( + final String nodeName, + final ThreadPool threadPool, + final BlockingQueue> capturedHeaders + ) { + boolean success = false; + final Settings settings = Settings.builder() + .put("node.name", nodeName) + .put("remote_cluster_server.enabled", "true") + .put("remote_cluster.port", "0") + .put("xpack.security.remote_cluster_server.ssl.enabled", "false") + .build(); + final MockTransportService service = MockTransportService.createNewService( + settings, + VersionInformation.CURRENT, + TransportVersion.current(), + threadPool, + null + ); + try { + service.registerRequestHandler( + ClusterStateAction.NAME, + EsExecutors.DIRECT_EXECUTOR_SERVICE, + ClusterStateRequest::new, + (request, channel, task) -> { + capturedHeaders.add(Map.copyOf(threadPool.getThreadContext().getHeaders())); + channel.sendResponse( + new ClusterStateResponse(ClusterName.DEFAULT, ClusterState.builder(ClusterName.DEFAULT).build(), false) + ); + } + ); + service.registerRequestHandler( + RemoteClusterNodesAction.TYPE.name(), + EsExecutors.DIRECT_EXECUTOR_SERVICE, + RemoteClusterNodesAction.Request::new, + (request, channel, task) -> { + capturedHeaders.add(Map.copyOf(threadPool.getThreadContext().getHeaders())); + channel.sendResponse(new RemoteClusterNodesAction.Response(List.of())); + } + ); + service.registerRequestHandler( + TransportSearchShardsAction.TYPE.name(), + EsExecutors.DIRECT_EXECUTOR_SERVICE, + SearchShardsRequest::new, + (request, channel, task) -> { + capturedHeaders.add(Map.copyOf(threadPool.getThreadContext().getHeaders())); + channel.sendResponse(new SearchShardsResponse(List.of(), List.of(), Collections.emptyMap())); + } + ); + service.registerRequestHandler( + TransportSearchAction.TYPE.name(), + EsExecutors.DIRECT_EXECUTOR_SERVICE, + SearchRequest::new, + (request, channel, task) -> { + capturedHeaders.add(Map.copyOf(threadPool.getThreadContext().getHeaders())); + channel.sendResponse( + new SearchResponse( + new InternalSearchResponse( + new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), Float.NaN), + InternalAggregations.EMPTY, + null, + null, + false, + null, + 1 + ), + null, + 1, + 1, + 0, + 100, + ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY + ) + ); + } + ); + service.start(); + service.acceptIncomingRequests(); + success = true; + return service; + } finally { + if (success == false) { + service.close(); + } + } + } + + private void reloadSecureSettings() throws InterruptedException { + final AtomicReference reloadSettingsError = new AtomicReference<>(); + final CountDownLatch latch = new CountDownLatch(1); + final SecureString emptyPassword = randomBoolean() ? new SecureString(new char[0]) : null; + clusterAdmin().prepareReloadSecureSettings() + .setSecureStorePassword(emptyPassword) + .setNodesIds(Strings.EMPTY_ARRAY) + .execute(new ActionListener<>() { + @Override + public void onResponse(NodesReloadSecureSettingsResponse nodesReloadResponse) { + try { + assertThat(nodesReloadResponse, notNullValue()); + final Map nodesMap = nodesReloadResponse.getNodesMap(); + assertThat(nodesMap.size(), equalTo(1)); + for (final NodesReloadSecureSettingsResponse.NodeResponse nodeResponse : nodesReloadResponse.getNodes()) { + assertThat(nodeResponse.reloadException(), nullValue()); + } + } catch (final AssertionError e) { + reloadSettingsError.set(e); + } finally { + latch.countDown(); + } + } + + @Override + public void onFailure(Exception e) { + reloadSettingsError.set(new AssertionError("Nodes request failed", e)); + latch.countDown(); + } + }); + latch.await(); + if (reloadSettingsError.get() != null) { + throw reloadSettingsError.get(); + } + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 6d7f6fcd3822b..349cebe7f705f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -13,6 +13,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.TransportVersion; @@ -156,6 +157,7 @@ import org.elasticsearch.xpack.core.security.action.service.GetServiceAccountCredentialsAction; import org.elasticsearch.xpack.core.security.action.service.GetServiceAccountNodesCredentialsAction; import org.elasticsearch.xpack.core.security.action.settings.GetSecuritySettingsAction; +import org.elasticsearch.xpack.core.security.action.settings.ReloadRemoteClusterCredentialsAction; import org.elasticsearch.xpack.core.security.action.settings.UpdateSecuritySettingsAction; import org.elasticsearch.xpack.core.security.action.token.CreateTokenAction; import org.elasticsearch.xpack.core.security.action.token.InvalidateTokenAction; @@ -247,6 +249,7 @@ import org.elasticsearch.xpack.security.action.service.TransportGetServiceAccountCredentialsAction; import org.elasticsearch.xpack.security.action.service.TransportGetServiceAccountNodesCredentialsAction; import org.elasticsearch.xpack.security.action.settings.TransportGetSecuritySettingsAction; +import org.elasticsearch.xpack.security.action.settings.TransportReloadRemoteClusterCredentialsAction; import org.elasticsearch.xpack.security.action.settings.TransportUpdateSecuritySettingsAction; import org.elasticsearch.xpack.security.action.token.TransportCreateTokenAction; import org.elasticsearch.xpack.security.action.token.TransportInvalidateTokenAction; @@ -367,7 +370,6 @@ import org.elasticsearch.xpack.security.support.CacheInvalidatorRegistry; import org.elasticsearch.xpack.security.support.ExtensionComponents; import org.elasticsearch.xpack.security.support.SecuritySystemIndices; -import org.elasticsearch.xpack.security.transport.RemoteClusterCredentialsResolver; import org.elasticsearch.xpack.security.transport.SecurityHttpSettings; import org.elasticsearch.xpack.security.transport.SecurityServerTransportInterceptor; import org.elasticsearch.xpack.security.transport.filter.IPFilter; @@ -557,6 +559,7 @@ public class Security extends Plugin private final SetOnce reservedRoleMappingAction = new SetOnce<>(); private final SetOnce workflowService = new SetOnce<>(); private final SetOnce realms = new SetOnce<>(); + private final SetOnce client = new SetOnce<>(); public Security(Settings settings) { this(settings, Collections.emptyList()); @@ -576,25 +579,30 @@ public Security(Settings settings) { runStartupChecks(settings); Automatons.updateConfiguration(settings); } else { - final List remoteClusterCredentialsSettingKeys = RemoteClusterService.REMOTE_CLUSTER_CREDENTIALS.getAllConcreteSettings( - settings - ).map(Setting::getKey).sorted().toList(); - if (false == remoteClusterCredentialsSettingKeys.isEmpty()) { - throw new IllegalArgumentException( - format( - "Found [%s] remote clusters with credentials [%s]. Security [%s] must be enabled to connect to them. " - + "Please either enable security or remove these settings from the keystore.", - remoteClusterCredentialsSettingKeys.size(), - Strings.collectionToCommaDelimitedString(remoteClusterCredentialsSettingKeys), - XPackSettings.SECURITY_ENABLED.getKey() - ) - ); - } + ensureNoRemoteClusterCredentialsOnDisabledSecurity(settings); this.bootstrapChecks.set(Collections.emptyList()); } this.securityExtensions.addAll(extensions); } + private void ensureNoRemoteClusterCredentialsOnDisabledSecurity(Settings settings) { + assert false == enabled; + final List remoteClusterCredentialsSettingKeys = RemoteClusterService.REMOTE_CLUSTER_CREDENTIALS.getAllConcreteSettings( + settings + ).map(Setting::getKey).sorted().toList(); + if (false == remoteClusterCredentialsSettingKeys.isEmpty()) { + throw new IllegalArgumentException( + format( + "Found [%s] remote clusters with credentials [%s]. Security [%s] must be enabled to connect to them. " + + "Please either enable security or remove these settings from the keystore.", + remoteClusterCredentialsSettingKeys.size(), + Strings.collectionToCommaDelimitedString(remoteClusterCredentialsSettingKeys), + XPackSettings.SECURITY_ENABLED.getKey() + ) + ); + } + } + private static void runStartupChecks(Settings settings) { validateRealmSettings(settings); if (XPackSettings.FIPS_MODE_ENABLED.get(settings)) { @@ -619,6 +627,14 @@ protected XPackLicenseState getLicenseState() { return XPackPlugin.getSharedLicenseState(); } + protected Client getClient() { + return client.get(); + } + + protected Realms getRealms() { + return realms.get(); + } + @Override public Collection createComponents(PluginServices services) { try { @@ -657,6 +673,8 @@ Collection createComponents( return Collections.singletonList(new SecurityUsageServices(null, null, null, null, null, null)); } + this.client.set(client); + // The settings in `environment` may have additional values over what was provided during construction // See Plugin#additionalSettings() this.settings = environment.settings(); @@ -983,8 +1001,6 @@ Collection createComponents( ipFilter.set(new IPFilter(settings, auditTrailService, clusterService.getClusterSettings(), getLicenseState())); components.add(ipFilter.get()); - final RemoteClusterCredentialsResolver remoteClusterCredentialsResolver = new RemoteClusterCredentialsResolver(settings); - DestructiveOperations destructiveOperations = new DestructiveOperations(settings, clusterService.getClusterSettings()); crossClusterAccessAuthcService.set(new CrossClusterAccessAuthenticationService(clusterService, apiKeyService, authcService.get())); components.add(crossClusterAccessAuthcService.get()); @@ -998,7 +1014,6 @@ Collection createComponents( securityContext.get(), destructiveOperations, crossClusterAccessAuthcService.get(), - remoteClusterCredentialsResolver, getLicenseState() ) ); @@ -1351,6 +1366,7 @@ public void onIndexModule(IndexModule module) { new ActionHandler<>(SetProfileEnabledAction.INSTANCE, TransportSetProfileEnabledAction.class), new ActionHandler<>(GetSecuritySettingsAction.INSTANCE, TransportGetSecuritySettingsAction.class), new ActionHandler<>(UpdateSecuritySettingsAction.INSTANCE, TransportUpdateSecuritySettingsAction.class), + new ActionHandler<>(ReloadRemoteClusterCredentialsAction.INSTANCE, TransportReloadRemoteClusterCredentialsAction.class), usageAction, infoAction ).filter(Objects::nonNull).toList(); @@ -1890,16 +1906,54 @@ public BiConsumer getJoinValidator() { @Override public void reload(Settings settings) throws Exception { if (enabled) { - realms.get().stream().filter(r -> JwtRealmSettings.TYPE.equals(r.realmRef().getType())).forEach(realm -> { - if (realm instanceof JwtRealm jwtRealm) { - jwtRealm.rotateClientSecret( - CLIENT_AUTHENTICATION_SHARED_SECRET.getConcreteSettingForNamespace(realm.realmRef().getName()).get(settings) - ); - } - }); + final List reloadExceptions = new ArrayList<>(); + try { + reloadRemoteClusterCredentials(settings); + } catch (Exception ex) { + reloadExceptions.add(ex); + } + + try { + reloadSharedSecretsForJwtRealms(settings); + } catch (Exception ex) { + reloadExceptions.add(ex); + } + + if (false == reloadExceptions.isEmpty()) { + final var combinedException = new ElasticsearchException( + "secure settings reload failed for one or more security components" + ); + reloadExceptions.forEach(combinedException::addSuppressed); + throw combinedException; + } + } else { + ensureNoRemoteClusterCredentialsOnDisabledSecurity(settings); } } + private void reloadSharedSecretsForJwtRealms(Settings settingsWithKeystore) { + getRealms().stream().filter(r -> JwtRealmSettings.TYPE.equals(r.realmRef().getType())).forEach(realm -> { + if (realm instanceof JwtRealm jwtRealm) { + jwtRealm.rotateClientSecret( + CLIENT_AUTHENTICATION_SHARED_SECRET.getConcreteSettingForNamespace(realm.realmRef().getName()).get(settingsWithKeystore) + ); + } + }); + } + + /** + * This method uses a transport action internally to access classes that are injectable but not part of the plugin contract. + * See {@link TransportReloadRemoteClusterCredentialsAction} for more context. + */ + private void reloadRemoteClusterCredentials(Settings settingsWithKeystore) { + // Accepting a blocking call here since the underlying action is local-only and only performs fast in-memory ops + // (extracts a subset of passed in `settingsWithKeystore` and stores them in a map) + getClient().execute( + ReloadRemoteClusterCredentialsAction.INSTANCE, + new ReloadRemoteClusterCredentialsAction.Request(settingsWithKeystore) + ).actionGet(); + } + static final class ValidateLicenseForFIPS implements BiConsumer { private final boolean inFipsMode; private final LicenseService licenseService; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/settings/TransportReloadRemoteClusterCredentialsAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/settings/TransportReloadRemoteClusterCredentialsAction.java new file mode 100644 index 0000000000000..de696a3e0353f --- /dev/null +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/settings/TransportReloadRemoteClusterCredentialsAction.java @@ -0,0 +1,54 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.action.settings; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.TransportAction; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.tasks.Task; +import org.elasticsearch.transport.RemoteClusterService; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.security.action.settings.ReloadRemoteClusterCredentialsAction; +import org.elasticsearch.xpack.security.Security; + +/** + * This is a local-only action which updates remote cluster credentials for remote cluster connections, from keystore settings reloaded via + * a call to {@link org.elasticsearch.rest.action.admin.cluster.RestReloadSecureSettingsAction}. + * + * It's invoked as part of the {@link Security#reload(Settings)} call. + * + * This action is largely an implementation detail to work around the fact that Security is a plugin without direct access to many core + * classes, including the {@link RemoteClusterService} which is required for credentials update. A transport action gives us access to + * the {@link RemoteClusterService} which is injectable but not part of the plugin contract. + */ +public class TransportReloadRemoteClusterCredentialsAction extends TransportAction< + ReloadRemoteClusterCredentialsAction.Request, + ActionResponse.Empty> { + + private final RemoteClusterService remoteClusterService; + + @Inject + public TransportReloadRemoteClusterCredentialsAction(TransportService transportService, ActionFilters actionFilters) { + super(ReloadRemoteClusterCredentialsAction.NAME, actionFilters, transportService.getTaskManager()); + this.remoteClusterService = transportService.getRemoteClusterService(); + } + + @Override + protected void doExecute( + Task task, + ReloadRemoteClusterCredentialsAction.Request request, + ActionListener listener + ) { + // We avoid stashing and marking context as system to keep the action as minimal as possible (i.e., avoid copying context) + remoteClusterService.updateRemoteClusterCredentials(request.getSettings()); + listener.onResponse(ActionResponse.Empty.INSTANCE); + } +} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/RemoteClusterCredentialsResolver.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/RemoteClusterCredentialsResolver.java deleted file mode 100644 index 93735a700bf92..0000000000000 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/RemoteClusterCredentialsResolver.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.security.transport; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.SecureString; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.xpack.security.authc.ApiKeyService; - -import java.util.Map; -import java.util.Optional; - -import static org.elasticsearch.transport.RemoteClusterService.REMOTE_CLUSTER_CREDENTIALS; - -public class RemoteClusterCredentialsResolver { - - private static final Logger logger = LogManager.getLogger(RemoteClusterCredentialsResolver.class); - - private final Map clusterCredentials; - - public RemoteClusterCredentialsResolver(final Settings settings) { - this.clusterCredentials = REMOTE_CLUSTER_CREDENTIALS.getAsMap(settings); - logger.debug( - "Read cluster credentials for remote clusters [{}]", - Strings.collectionToCommaDelimitedString(clusterCredentials.keySet()) - ); - } - - public Optional resolve(final String clusterAlias) { - final SecureString apiKey = clusterCredentials.get(clusterAlias); - if (apiKey == null) { - return Optional.empty(); - } else { - return Optional.of(new RemoteClusterCredentials(clusterAlias, ApiKeyService.withApiKeyPrefix(apiKey.toString()))); - } - } - - record RemoteClusterCredentials(String clusterAlias, String credentials) { - @Override - public String toString() { - return "RemoteClusterCredentials{clusterAlias='" + clusterAlias + "', credentials='::es_redacted::'}"; - } - } -} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java index 53dd31fe46793..162cabf5297ce 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; import org.elasticsearch.action.support.DestructiveOperations; +import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.ssl.SslConfiguration; import org.elasticsearch.common.util.Maps; @@ -24,6 +25,7 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteConnectionManager; +import org.elasticsearch.transport.RemoteConnectionManager.RemoteClusterAliasWithCredentials; import org.elasticsearch.transport.SendRequestTransportException; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportChannel; @@ -46,6 +48,7 @@ import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.security.Security; import org.elasticsearch.xpack.security.audit.AuditUtil; +import org.elasticsearch.xpack.security.authc.ApiKeyService; import org.elasticsearch.xpack.security.authc.AuthenticationService; import org.elasticsearch.xpack.security.authc.CrossClusterAccessAuthenticationService; import org.elasticsearch.xpack.security.authc.CrossClusterAccessHeaders; @@ -63,7 +66,6 @@ import static org.elasticsearch.transport.RemoteClusterPortSettings.REMOTE_CLUSTER_PROFILE; import static org.elasticsearch.transport.RemoteClusterPortSettings.REMOTE_CLUSTER_SERVER_ENABLED; import static org.elasticsearch.transport.RemoteClusterPortSettings.TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY; -import static org.elasticsearch.xpack.security.transport.RemoteClusterCredentialsResolver.RemoteClusterCredentials; public class SecurityServerTransportInterceptor implements TransportInterceptor { @@ -85,8 +87,7 @@ public class SecurityServerTransportInterceptor implements TransportInterceptor private final Settings settings; private final SecurityContext securityContext; private final CrossClusterAccessAuthenticationService crossClusterAccessAuthcService; - private final RemoteClusterCredentialsResolver remoteClusterCredentialsResolver; - private final Function> remoteClusterAliasResolver; + private final Function> remoteClusterCredentialsResolver; private final XPackLicenseState licenseState; public SecurityServerTransportInterceptor( @@ -98,7 +99,6 @@ public SecurityServerTransportInterceptor( SecurityContext securityContext, DestructiveOperations destructiveOperations, CrossClusterAccessAuthenticationService crossClusterAccessAuthcService, - RemoteClusterCredentialsResolver remoteClusterCredentialsResolver, XPackLicenseState licenseState ) { this( @@ -110,9 +110,8 @@ public SecurityServerTransportInterceptor( securityContext, destructiveOperations, crossClusterAccessAuthcService, - remoteClusterCredentialsResolver, licenseState, - RemoteConnectionManager::resolveRemoteClusterAlias + RemoteConnectionManager::resolveRemoteClusterAliasWithCredentials ); } @@ -125,10 +124,9 @@ public SecurityServerTransportInterceptor( SecurityContext securityContext, DestructiveOperations destructiveOperations, CrossClusterAccessAuthenticationService crossClusterAccessAuthcService, - RemoteClusterCredentialsResolver remoteClusterCredentialsResolver, XPackLicenseState licenseState, // Inject for simplified testing - Function> remoteClusterAliasResolver + Function> remoteClusterCredentialsResolver ) { this.settings = settings; this.threadPool = threadPool; @@ -139,7 +137,6 @@ public SecurityServerTransportInterceptor( this.crossClusterAccessAuthcService = crossClusterAccessAuthcService; this.licenseState = licenseState; this.remoteClusterCredentialsResolver = remoteClusterCredentialsResolver; - this.remoteClusterAliasResolver = remoteClusterAliasResolver; this.profileFilters = initializeProfileFilters(destructiveOperations); } @@ -159,7 +156,8 @@ public void sendRequest( TransportResponseHandler handler ) { assertNoCrossClusterAccessHeadersInContext(); - final Optional remoteClusterAlias = remoteClusterAliasResolver.apply(connection); + final Optional remoteClusterAlias = remoteClusterCredentialsResolver.apply(connection) + .map(RemoteClusterAliasWithCredentials::clusterAlias); if (PreAuthorizationUtils.shouldRemoveParentAuthorizationFromThreadContext(remoteClusterAlias, action, securityContext)) { securityContext.executeAfterRemovingParentAuthorization(original -> { sendRequestInner( @@ -278,22 +276,23 @@ public void sendRequest( * Returns cluster credentials if the connection is remote, and cluster credentials are set up for the target cluster. */ private Optional getRemoteClusterCredentials(Transport.Connection connection) { - final Optional optionalRemoteClusterAlias = remoteClusterAliasResolver.apply(connection); - if (optionalRemoteClusterAlias.isEmpty()) { + final Optional remoteClusterAliasWithCredentials = remoteClusterCredentialsResolver + .apply(connection); + if (remoteClusterAliasWithCredentials.isEmpty()) { logger.trace("Connection is not remote"); return Optional.empty(); } - final String remoteClusterAlias = optionalRemoteClusterAlias.get(); - final Optional remoteClusterCredentials = remoteClusterCredentialsResolver.resolve( - remoteClusterAlias - ); - if (remoteClusterCredentials.isEmpty()) { + final String remoteClusterAlias = remoteClusterAliasWithCredentials.get().clusterAlias(); + final SecureString remoteClusterCredentials = remoteClusterAliasWithCredentials.get().credentials(); + if (remoteClusterCredentials == null) { logger.trace("No cluster credentials are configured for remote cluster [{}]", remoteClusterAlias); return Optional.empty(); } - return remoteClusterCredentials; + return Optional.of( + new RemoteClusterCredentials(remoteClusterAlias, ApiKeyService.withApiKeyPrefix(remoteClusterCredentials.toString())) + ); } private void sendWithCrossClusterAccessHeaders( @@ -442,7 +441,7 @@ private void sendWithUser( throw new IllegalStateException("there should always be a user when sending a message for action [" + action + "]"); } - assert securityContext.getParentAuthorization() == null || remoteClusterAliasResolver.apply(connection).isPresent() == false + assert securityContext.getParentAuthorization() == null || remoteClusterCredentialsResolver.apply(connection).isEmpty() : "parent authorization header should not be set for remote cluster requests"; try { @@ -663,4 +662,12 @@ public void onFailure(Exception e) { } } } + + record RemoteClusterCredentials(String clusterAlias, String credentials) { + + @Override + public String toString() { + return "RemoteClusterCredentials{clusterAlias='" + clusterAlias + "', credentials='::es_redacted::'}"; + } + } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/LocalStateSecurity.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/LocalStateSecurity.java index d44e7c27d760e..a2aa04e0f56c3 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/LocalStateSecurity.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/LocalStateSecurity.java @@ -16,6 +16,7 @@ import org.elasticsearch.license.LicenseService; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.ReloadablePlugin; import org.elasticsearch.protocol.xpack.XPackInfoRequest; import org.elasticsearch.protocol.xpack.XPackInfoResponse; import org.elasticsearch.protocol.xpack.XPackUsageRequest; @@ -36,7 +37,7 @@ import java.util.Collections; import java.util.List; -public class LocalStateSecurity extends LocalStateCompositeXPackPlugin { +public class LocalStateSecurity extends LocalStateCompositeXPackPlugin implements ReloadablePlugin { public static class SecurityTransportXPackUsageAction extends TransportXPackUsageAction { @Inject @@ -130,4 +131,15 @@ protected Class> public List plugins() { return plugins; } + + @Override + public void reload(Settings settings) throws Exception { + plugins.stream().filter(p -> p instanceof ReloadablePlugin).forEach(p -> { + try { + ((ReloadablePlugin) p).reload(settings); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java index 6773da137ac96..75e134691225d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java @@ -9,10 +9,13 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionModule; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -72,6 +75,7 @@ import org.elasticsearch.xpack.core.security.SecurityContext; import org.elasticsearch.xpack.core.security.SecurityExtension; import org.elasticsearch.xpack.core.security.SecurityField; +import org.elasticsearch.xpack.core.security.action.settings.ReloadRemoteClusterCredentialsAction; import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.AuthenticationTestHelper; import org.elasticsearch.xpack.core.security.authc.Realm; @@ -116,6 +120,7 @@ import java.util.function.Function; import java.util.function.Predicate; import java.util.stream.Collectors; +import java.util.stream.Stream; import static java.util.Collections.emptyMap; import static org.elasticsearch.xpack.core.security.authc.RealmSettings.getFullSettingKey; @@ -133,7 +138,9 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class SecurityTests extends ESTestCase { @@ -877,6 +884,23 @@ public void testSecurityMustBeEnableToConnectRemoteClusterWithCredentials() { + "Please either enable security or remove these settings from the keystore." ) ); + + // Security off, remote cluster with credentials on reload call + final MockSecureSettings secureSettings5 = new MockSecureSettings(); + secureSettings5.setString("cluster.remote.my1.credentials", randomAlphaOfLength(20)); + secureSettings5.setString("cluster.remote.my2.credentials", randomAlphaOfLength(20)); + final Settings.Builder builder5 = Settings.builder().setSecureSettings(secureSettings5); + // Use builder with security disabled to construct valid Security instance + final var security = new Security(builder2.build()); + final IllegalArgumentException e5 = expectThrows(IllegalArgumentException.class, () -> security.reload(builder5.build())); + assertThat( + e5.getMessage(), + containsString( + "Found [2] remote clusters with credentials [cluster.remote.my1.credentials,cluster.remote.my2.credentials]. " + + "Security [xpack.security.enabled] must be enabled to connect to them. " + + "Please either enable security or remove these settings from the keystore." + ) + ); } public void testLoadExtensions() throws Exception { @@ -905,6 +929,84 @@ public List loadExtensions(Class extensionPointType) { assertThat(registry, instanceOf(DummyOperatorOnlyRegistry.class)); } + public void testReload() throws Exception { + final Settings settings = Settings.builder().put("xpack.security.enabled", true).put("path.home", createTempDir()).build(); + + final PlainActionFuture value = new PlainActionFuture<>(); + value.onResponse(ActionResponse.Empty.INSTANCE); + final Client mockedClient = mock(Client.class); + + final Realms mockedRealms = mock(Realms.class); + when(mockedRealms.stream()).thenReturn(Stream.of()); + when(mockedClient.execute(eq(ReloadRemoteClusterCredentialsAction.INSTANCE), any())).thenReturn(value); + security = new Security(settings, Collections.emptyList()) { + @Override + protected Client getClient() { + return mockedClient; + } + + @Override + protected Realms getRealms() { + return mockedRealms; + } + }; + + final Settings inputSettings = Settings.EMPTY; + security.reload(inputSettings); + + verify(mockedClient).execute(eq(ReloadRemoteClusterCredentialsAction.INSTANCE), any()); + verify(mockedRealms).stream(); + } + + public void testReloadWithFailures() { + final Settings settings = Settings.builder().put("xpack.security.enabled", true).put("path.home", createTempDir()).build(); + + final boolean failRemoteClusterCredentialsReload = randomBoolean(); + final PlainActionFuture value = new PlainActionFuture<>(); + if (failRemoteClusterCredentialsReload) { + value.onFailure(new RuntimeException("failed remote cluster credentials reload")); + } else { + value.onResponse(ActionResponse.Empty.INSTANCE); + } + final Client mockedClient = mock(Client.class); + when(mockedClient.execute(eq(ReloadRemoteClusterCredentialsAction.INSTANCE), any())).thenReturn(value); + + final Realms mockedRealms = mock(Realms.class); + final boolean failRealmsReload = (false == failRemoteClusterCredentialsReload) || randomBoolean(); + if (failRealmsReload) { + when(mockedRealms.stream()).thenThrow(new RuntimeException("failed jwt realms reload")); + } else { + when(mockedRealms.stream()).thenReturn(Stream.of()); + } + security = new Security(settings, Collections.emptyList()) { + @Override + protected Client getClient() { + return mockedClient; + } + + @Override + protected Realms getRealms() { + return mockedRealms; + } + }; + + final Settings inputSettings = Settings.EMPTY; + final var exception = expectThrows(ElasticsearchException.class, () -> security.reload(inputSettings)); + + assertThat(exception.getMessage(), containsString("secure settings reload failed for one or more security component")); + if (failRemoteClusterCredentialsReload) { + assertThat(exception.getSuppressed()[0].getMessage(), containsString("failed remote cluster credentials reload")); + if (failRealmsReload) { + assertThat(exception.getSuppressed()[1].getMessage(), containsString("failed jwt realms reload")); + } + } else { + assertThat(exception.getSuppressed()[0].getMessage(), containsString("failed jwt realms reload")); + } + // Verify both called despite failure + verify(mockedClient).execute(eq(ReloadRemoteClusterCredentialsAction.INSTANCE), any()); + verify(mockedRealms).stream(); + } + public void testLoadNoExtensions() throws Exception { Settings settings = Settings.builder() .put("xpack.security.enabled", true) diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/RemoteClusterCredentialsResolverTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/RemoteClusterCredentialsResolverTests.java deleted file mode 100644 index debb50384e217..0000000000000 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/RemoteClusterCredentialsResolverTests.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ -package org.elasticsearch.xpack.security.transport; - -import org.elasticsearch.common.settings.MockSecureSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.security.authc.ApiKeyService; - -import java.util.Optional; - -import static org.elasticsearch.xpack.security.transport.RemoteClusterCredentialsResolver.RemoteClusterCredentials; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; - -public class RemoteClusterCredentialsResolverTests extends ESTestCase { - - public void testResolveRemoteClusterCredentials() { - final String clusterNameA = "clusterA"; - final String clusterDoesNotExist = randomAlphaOfLength(10); - final Settings.Builder builder = Settings.builder(); - - final String secret = randomAlphaOfLength(20); - final MockSecureSettings secureSettings = new MockSecureSettings(); - secureSettings.setString("cluster.remote." + clusterNameA + ".credentials", secret); - final Settings settings = builder.setSecureSettings(secureSettings).build(); - RemoteClusterCredentialsResolver remoteClusterAuthorizationResolver = new RemoteClusterCredentialsResolver(settings); - final Optional remoteClusterCredentials = remoteClusterAuthorizationResolver.resolve(clusterNameA); - assertThat(remoteClusterCredentials.isPresent(), is(true)); - assertThat(remoteClusterCredentials.get().clusterAlias(), equalTo(clusterNameA)); - assertThat(remoteClusterCredentials.get().credentials(), equalTo(ApiKeyService.withApiKeyPrefix(secret))); - assertThat(remoteClusterAuthorizationResolver.resolve(clusterDoesNotExist), is(Optional.empty())); - } -} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java index 9bd5d416940d3..822c04be4363f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.ssl.SslClientAuthenticationMode; import org.elasticsearch.common.ssl.SslConfiguration; @@ -33,6 +34,7 @@ import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteClusterPortSettings; +import org.elasticsearch.transport.RemoteConnectionManager.RemoteClusterAliasWithCredentials; import org.elasticsearch.transport.SendRequestTransportException; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.Transport.Connection; @@ -77,6 +79,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; +import java.util.function.Function; import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.DATA_STREAM_LIFECYCLE_ORIGIN; import static org.elasticsearch.test.ActionListenerUtils.anyActionListener; @@ -87,7 +90,6 @@ import static org.elasticsearch.xpack.core.security.authc.CrossClusterAccessSubjectInfo.CROSS_CLUSTER_ACCESS_SUBJECT_INFO_HEADER_KEY; import static org.elasticsearch.xpack.core.security.authz.RoleDescriptorTests.randomUniquelyNamedRoleDescriptors; import static org.elasticsearch.xpack.security.authc.CrossClusterAccessHeaders.CROSS_CLUSTER_ACCESS_CREDENTIALS_HEADER_KEY; -import static org.elasticsearch.xpack.security.transport.RemoteClusterCredentialsResolver.RemoteClusterCredentials; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; @@ -153,7 +155,6 @@ public void testSendAsync() throws Exception { new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) ), mock(CrossClusterAccessAuthenticationService.class), - new RemoteClusterCredentialsResolver(settings), mockLicenseState ); ClusterServiceUtils.setState(clusterService, clusterService.state()); // force state update to trigger listener @@ -205,7 +206,6 @@ public void testSendAsyncSwitchToSystem() throws Exception { new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) ), mock(CrossClusterAccessAuthenticationService.class), - new RemoteClusterCredentialsResolver(settings), mockLicenseState ); ClusterServiceUtils.setState(clusterService, clusterService.state()); // force state update to trigger listener @@ -250,7 +250,6 @@ public void testSendWithoutUser() throws Exception { new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) ), mock(CrossClusterAccessAuthenticationService.class), - new RemoteClusterCredentialsResolver(settings), mockLicenseState ) { @Override @@ -313,7 +312,6 @@ public void testSendToNewerVersionSetsCorrectVersion() throws Exception { new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) ), mock(CrossClusterAccessAuthenticationService.class), - new RemoteClusterCredentialsResolver(settings), mockLicenseState ); ClusterServiceUtils.setState(clusterService, clusterService.state()); // force state update to trigger listener @@ -382,7 +380,6 @@ public void testSendToOlderVersionSetsCorrectVersion() throws Exception { new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) ), mock(CrossClusterAccessAuthenticationService.class), - new RemoteClusterCredentialsResolver(settings), mockLicenseState ); ClusterServiceUtils.setState(clusterService, clusterService.state()); // force state update to trigger listener @@ -449,7 +446,6 @@ public void testSetUserBasedOnActionOrigin() { new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) ), mock(CrossClusterAccessAuthenticationService.class), - new RemoteClusterCredentialsResolver(settings), mockLicenseState ); @@ -604,7 +600,6 @@ public void testSendWithCrossClusterAccessHeadersWithUnsupportedLicense() throws AuthenticationTestHelper.builder().build().writeToContext(threadContext); final String remoteClusterAlias = randomAlphaOfLengthBetween(5, 10); - final RemoteClusterCredentialsResolver remoteClusterCredentialsResolver = mockRemoteClusterCredentialsResolver(remoteClusterAlias); final SecurityServerTransportInterceptor interceptor = new SecurityServerTransportInterceptor( settings, @@ -618,9 +613,8 @@ public void testSendWithCrossClusterAccessHeadersWithUnsupportedLicense() throws new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) ), mock(CrossClusterAccessAuthenticationService.class), - remoteClusterCredentialsResolver, unsupportedLicenseState, - ignored -> Optional.of(remoteClusterAlias) + mockRemoteClusterCredentialsResolver(remoteClusterAlias) ); final AsyncSender sender = interceptor.interceptSender(mock(AsyncSender.class, ignored -> { @@ -661,18 +655,16 @@ public TransportResponse read(StreamInput in) { actualException.get().getCause().getMessage(), equalTo("current license is non-compliant for [" + Security.ADVANCED_REMOTE_CLUSTER_SECURITY_FEATURE.getName() + "]") ); - verify(remoteClusterCredentialsResolver, times(1)).resolve(eq(remoteClusterAlias)); assertThat(securityContext.getThreadContext().getHeader(CROSS_CLUSTER_ACCESS_SUBJECT_INFO_HEADER_KEY), nullValue()); assertThat(securityContext.getThreadContext().getHeader(CROSS_CLUSTER_ACCESS_CREDENTIALS_HEADER_KEY), nullValue()); } - private RemoteClusterCredentialsResolver mockRemoteClusterCredentialsResolver(String remoteClusterAlias) { - final RemoteClusterCredentialsResolver remoteClusterCredentialsResolver = mock(RemoteClusterCredentialsResolver.class); - final String remoteClusterCredential = ApiKeyService.withApiKeyPrefix(randomAlphaOfLengthBetween(10, 42)); - when(remoteClusterCredentialsResolver.resolve(any())).thenReturn( - Optional.of(new RemoteClusterCredentials(remoteClusterAlias, remoteClusterCredential)) + private Function> mockRemoteClusterCredentialsResolver( + String remoteClusterAlias + ) { + return connection -> Optional.of( + new RemoteClusterAliasWithCredentials(remoteClusterAlias, new SecureString(randomAlphaOfLengthBetween(10, 42).toCharArray())) ); - return remoteClusterCredentialsResolver; } public void testSendWithCrossClusterAccessHeadersForSystemUserRegularAction() throws Exception { @@ -736,12 +728,9 @@ private void doTestSendWithCrossClusterAccessHeaders( ) throws IOException { authentication.writeToContext(threadContext); final String expectedRequestId = AuditUtil.getOrGenerateRequestId(threadContext); - final RemoteClusterCredentialsResolver remoteClusterCredentialsResolver = mock(RemoteClusterCredentialsResolver.class); final String remoteClusterAlias = randomAlphaOfLengthBetween(5, 10); - final String remoteClusterCredential = ApiKeyService.withApiKeyPrefix(randomAlphaOfLengthBetween(10, 42)); - when(remoteClusterCredentialsResolver.resolve(any())).thenReturn( - Optional.of(new RemoteClusterCredentials(remoteClusterAlias, remoteClusterCredential)) - ); + final String encodedApiKey = randomAlphaOfLengthBetween(10, 42); + final String remoteClusterCredential = ApiKeyService.withApiKeyPrefix(encodedApiKey); final AuthorizationService authzService = mock(AuthorizationService.class); // We capture the listener so that we can complete the full flow, by calling onResponse further down @SuppressWarnings("unchecked") @@ -760,9 +749,8 @@ private void doTestSendWithCrossClusterAccessHeaders( new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) ), mock(CrossClusterAccessAuthenticationService.class), - remoteClusterCredentialsResolver, mockLicenseState, - ignored -> Optional.of(remoteClusterAlias) + ignored -> Optional.of(new RemoteClusterAliasWithCredentials(remoteClusterAlias, new SecureString(encodedApiKey.toCharArray()))) ); final AtomicBoolean calledWrappedSender = new AtomicBoolean(false); @@ -861,7 +849,6 @@ public TransportResponse read(StreamInput in) { } assertThat(sentCredential.get(), equalTo(remoteClusterCredential)); verify(securityContext, never()).executeAsInternalUser(any(), any(), anyConsumer()); - verify(remoteClusterCredentialsResolver, times(1)).resolve(eq(remoteClusterAlias)); assertThat(securityContext.getThreadContext().getHeader(CROSS_CLUSTER_ACCESS_SUBJECT_INFO_HEADER_KEY), nullValue()); assertThat(securityContext.getThreadContext().getHeader(CROSS_CLUSTER_ACCESS_CREDENTIALS_HEADER_KEY), nullValue()); assertThat(AuditUtil.extractRequestId(securityContext.getThreadContext()), equalTo(expectedRequestId)); @@ -874,15 +861,9 @@ public void testSendWithUserIfCrossClusterAccessHeadersConditionNotMet() throws if (false == (notRemoteConnection || noCredential)) { noCredential = true; } + final boolean finalNoCredential = noCredential; final String remoteClusterAlias = randomAlphaOfLengthBetween(5, 10); - final RemoteClusterCredentialsResolver remoteClusterCredentialsResolver = mock(RemoteClusterCredentialsResolver.class); - when(remoteClusterCredentialsResolver.resolve(any())).thenReturn( - noCredential - ? Optional.empty() - : Optional.of( - new RemoteClusterCredentials(remoteClusterAlias, ApiKeyService.withApiKeyPrefix(randomAlphaOfLengthBetween(10, 42))) - ) - ); + final String encodedApiKey = randomAlphaOfLengthBetween(10, 42); final AuthenticationTestHelper.AuthenticationTestBuilder builder = AuthenticationTestHelper.builder(); final Authentication authentication = randomFrom( builder.apiKey().build(), @@ -904,9 +885,12 @@ public void testSendWithUserIfCrossClusterAccessHeadersConditionNotMet() throws new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) ), mock(CrossClusterAccessAuthenticationService.class), - remoteClusterCredentialsResolver, mockLicenseState, - ignored -> notRemoteConnection ? Optional.empty() : Optional.of(remoteClusterAlias) + ignored -> notRemoteConnection + ? Optional.empty() + : (finalNoCredential + ? Optional.of(new RemoteClusterAliasWithCredentials(remoteClusterAlias, null)) + : Optional.of(new RemoteClusterAliasWithCredentials(remoteClusterAlias, new SecureString(encodedApiKey.toCharArray())))) ); final AtomicBoolean calledWrappedSender = new AtomicBoolean(false); @@ -944,12 +928,9 @@ public void testSendWithCrossClusterAccessHeadersThrowsOnOldConnection() throws .realm() .build(); authentication.writeToContext(threadContext); - final RemoteClusterCredentialsResolver remoteClusterCredentialsResolver = mock(RemoteClusterCredentialsResolver.class); final String remoteClusterAlias = randomAlphaOfLengthBetween(5, 10); - final String remoteClusterCredential = ApiKeyService.withApiKeyPrefix(randomAlphaOfLengthBetween(10, 42)); - when(remoteClusterCredentialsResolver.resolve(any())).thenReturn( - Optional.of(new RemoteClusterCredentials(remoteClusterAlias, remoteClusterCredential)) - ); + final String encodedApiKey = randomAlphaOfLengthBetween(10, 42); + final String remoteClusterCredential = ApiKeyService.withApiKeyPrefix(encodedApiKey); final SecurityServerTransportInterceptor interceptor = new SecurityServerTransportInterceptor( settings, @@ -963,9 +944,8 @@ public void testSendWithCrossClusterAccessHeadersThrowsOnOldConnection() throws new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) ), mock(CrossClusterAccessAuthenticationService.class), - remoteClusterCredentialsResolver, mockLicenseState, - ignored -> Optional.of(remoteClusterAlias) + ignored -> Optional.of(new RemoteClusterAliasWithCredentials(remoteClusterAlias, new SecureString(encodedApiKey.toCharArray()))) ); final AsyncSender sender = interceptor.interceptSender(new AsyncSender() { @@ -1029,7 +1009,6 @@ public TransportResponse read(StreamInput in) { + "] does not support receiving them" ) ); - verify(remoteClusterCredentialsResolver, times(1)).resolve(eq(remoteClusterAlias)); assertThat(securityContext.getThreadContext().getHeader(CROSS_CLUSTER_ACCESS_SUBJECT_INFO_HEADER_KEY), nullValue()); assertThat(securityContext.getThreadContext().getHeader(CROSS_CLUSTER_ACCESS_CREDENTIALS_HEADER_KEY), nullValue()); } @@ -1040,12 +1019,9 @@ public void testSendRemoteRequestFailsIfUserHasNoRemoteIndicesPrivileges() throw .realm() .build(); authentication.writeToContext(threadContext); - final RemoteClusterCredentialsResolver remoteClusterCredentialsResolver = mock(RemoteClusterCredentialsResolver.class); final String remoteClusterAlias = randomAlphaOfLengthBetween(5, 10); - final String remoteClusterCredential = ApiKeyService.withApiKeyPrefix(randomAlphaOfLengthBetween(10, 42)); - when(remoteClusterCredentialsResolver.resolve(any())).thenReturn( - Optional.of(new RemoteClusterCredentials(remoteClusterAlias, remoteClusterCredential)) - ); + final String encodedApiKey = randomAlphaOfLengthBetween(10, 42); + final String remoteClusterCredential = ApiKeyService.withApiKeyPrefix(encodedApiKey); final AuthorizationService authzService = mock(AuthorizationService.class); doAnswer(invocation -> { @@ -1067,9 +1043,8 @@ public void testSendRemoteRequestFailsIfUserHasNoRemoteIndicesPrivileges() throw new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) ), mock(CrossClusterAccessAuthenticationService.class), - remoteClusterCredentialsResolver, mockLicenseState, - ignored -> Optional.of(remoteClusterAlias) + ignored -> Optional.of(new RemoteClusterAliasWithCredentials(remoteClusterAlias, new SecureString(encodedApiKey.toCharArray()))) ); final AsyncSender sender = interceptor.interceptSender(new AsyncSender() { @@ -1171,7 +1146,6 @@ public void testProfileFiltersCreatedDifferentlyForDifferentTransportAndRemoteCl new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) ), mock(CrossClusterAccessAuthenticationService.class), - new RemoteClusterCredentialsResolver(settings), mockLicenseState ); @@ -1225,7 +1199,6 @@ public void testNoProfileFilterForRemoteClusterWhenTheFeatureIsDisabled() { new ClusterSettings(Settings.EMPTY, Collections.singleton(DestructiveOperations.REQUIRES_NAME_SETTING)) ), mock(CrossClusterAccessAuthenticationService.class), - new RemoteClusterCredentialsResolver(settings), mockLicenseState ); From 18691593e850bba997d0c7127379e478e7a974e4 Mon Sep 17 00:00:00 2001 From: Stuart Tettemer Date: Fri, 8 Dec 2023 09:22:22 -0600 Subject: [PATCH 12/14] Metrics: Allow AsyncCounters to switch providers (#103025) {Double,Long}AsyncCounters were not in the registrar list so they were not included when switching providers. Other changes: * Moved DoubleAsyncCounter's register and get to match the order of the rest of APMMeterRegistry. * Fixed RecordingOtelMeter's Histogram and DoubleUpDownCounter * Fixed String format in RecordingMeter's asserts. --- docs/changelog/103025.yaml | 5 ++ .../telemetry/apm/APMMeterRegistry.java | 61 ++++++++------- .../telemetry/apm/APMMeterRegistryTests.java | 77 ++++++++++++++++++- .../telemetry/apm/RecordingOtelMeter.java | 10 ++- .../telemetry/MetricRecorder.java | 4 +- 5 files changed, 124 insertions(+), 33 deletions(-) create mode 100644 docs/changelog/103025.yaml diff --git a/docs/changelog/103025.yaml b/docs/changelog/103025.yaml new file mode 100644 index 0000000000000..856a7c022d5dd --- /dev/null +++ b/docs/changelog/103025.yaml @@ -0,0 +1,5 @@ +pr: 103025 +summary: "Metrics: Allow `AsyncCounters` to switch providers" +area: Infra/Core +type: bug +issues: [] diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APMMeterRegistry.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APMMeterRegistry.java index 51f008db646fa..cd6d3d209b3ed 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APMMeterRegistry.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/APMMeterRegistry.java @@ -48,12 +48,12 @@ */ public class APMMeterRegistry implements MeterRegistry { private final Registrar doubleCounters = new Registrar<>(); + private final Registrar doubleAsynchronousCounters = new Registrar<>(); private final Registrar doubleUpDownCounters = new Registrar<>(); private final Registrar doubleGauges = new Registrar<>(); private final Registrar doubleHistograms = new Registrar<>(); private final Registrar longCounters = new Registrar<>(); private final Registrar longAsynchronousCounters = new Registrar<>(); - private final Registrar doubleAsynchronousCounters = new Registrar<>(); private final Registrar longUpDownCounters = new Registrar<>(); private final Registrar longGauges = new Registrar<>(); private final Registrar longHistograms = new Registrar<>(); @@ -66,10 +66,12 @@ public APMMeterRegistry(Meter meter) { private final List> registrars = List.of( doubleCounters, + doubleAsynchronousCounters, doubleUpDownCounters, doubleGauges, doubleHistograms, longCounters, + longAsynchronousCounters, longUpDownCounters, longGauges, longHistograms @@ -81,7 +83,7 @@ public APMMeterRegistry(Meter meter) { @Override public DoubleCounter registerDoubleCounter(String name, String description, String unit) { try (ReleasableLock lock = registerLock.acquire()) { - return doubleCounters.register(new DoubleCounterAdapter(meter, name, description, unit)); + return register(doubleCounters, new DoubleCounterAdapter(meter, name, description, unit)); } } @@ -90,10 +92,27 @@ public DoubleCounter getDoubleCounter(String name) { return doubleCounters.get(name); } + @Override + public DoubleAsyncCounter registerDoubleAsyncCounter( + String name, + String description, + String unit, + Supplier observer + ) { + try (ReleasableLock lock = registerLock.acquire()) { + return register(doubleAsynchronousCounters, new DoubleAsyncCounterAdapter(meter, name, description, unit, observer)); + } + } + + @Override + public DoubleAsyncCounter getDoubleAsyncCounter(String name) { + return doubleAsynchronousCounters.get(name); + } + @Override public DoubleUpDownCounter registerDoubleUpDownCounter(String name, String description, String unit) { try (ReleasableLock lock = registerLock.acquire()) { - return doubleUpDownCounters.register(new DoubleUpDownCounterAdapter(meter, name, description, unit)); + return register(doubleUpDownCounters, new DoubleUpDownCounterAdapter(meter, name, description, unit)); } } @@ -105,7 +124,7 @@ public DoubleUpDownCounter getDoubleUpDownCounter(String name) { @Override public DoubleGauge registerDoubleGauge(String name, String description, String unit, Supplier observer) { try (ReleasableLock lock = registerLock.acquire()) { - return doubleGauges.register(new DoubleGaugeAdapter(meter, name, description, unit, observer)); + return register(doubleGauges, new DoubleGaugeAdapter(meter, name, description, unit, observer)); } } @@ -117,7 +136,7 @@ public DoubleGauge getDoubleGauge(String name) { @Override public DoubleHistogram registerDoubleHistogram(String name, String description, String unit) { try (ReleasableLock lock = registerLock.acquire()) { - return doubleHistograms.register(new DoubleHistogramAdapter(meter, name, description, unit)); + return register(doubleHistograms, new DoubleHistogramAdapter(meter, name, description, unit)); } } @@ -129,14 +148,14 @@ public DoubleHistogram getDoubleHistogram(String name) { @Override public LongCounter registerLongCounter(String name, String description, String unit) { try (ReleasableLock lock = registerLock.acquire()) { - return longCounters.register(new LongCounterAdapter(meter, name, description, unit)); + return register(longCounters, new LongCounterAdapter(meter, name, description, unit)); } } @Override public LongAsyncCounter registerLongAsyncCounter(String name, String description, String unit, Supplier observer) { try (ReleasableLock lock = registerLock.acquire()) { - return longAsynchronousCounters.register(new LongAsyncCounterAdapter(meter, name, description, unit, observer)); + return register(longAsynchronousCounters, new LongAsyncCounterAdapter(meter, name, description, unit, observer)); } } @@ -145,23 +164,6 @@ public LongAsyncCounter getLongAsyncCounter(String name) { return longAsynchronousCounters.get(name); } - @Override - public DoubleAsyncCounter registerDoubleAsyncCounter( - String name, - String description, - String unit, - Supplier observer - ) { - try (ReleasableLock lock = registerLock.acquire()) { - return doubleAsynchronousCounters.register(new DoubleAsyncCounterAdapter(meter, name, description, unit, observer)); - } - } - - @Override - public DoubleAsyncCounter getDoubleAsyncCounter(String name) { - return doubleAsynchronousCounters.get(name); - } - @Override public LongCounter getLongCounter(String name) { return longCounters.get(name); @@ -170,7 +172,7 @@ public LongCounter getLongCounter(String name) { @Override public LongUpDownCounter registerLongUpDownCounter(String name, String description, String unit) { try (ReleasableLock lock = registerLock.acquire()) { - return longUpDownCounters.register(new LongUpDownCounterAdapter(meter, name, description, unit)); + return register(longUpDownCounters, new LongUpDownCounterAdapter(meter, name, description, unit)); } } @@ -182,7 +184,7 @@ public LongUpDownCounter getLongUpDownCounter(String name) { @Override public LongGauge registerLongGauge(String name, String description, String unit, Supplier observer) { try (ReleasableLock lock = registerLock.acquire()) { - return longGauges.register(new LongGaugeAdapter(meter, name, description, unit, observer)); + return register(longGauges, new LongGaugeAdapter(meter, name, description, unit, observer)); } } @@ -194,7 +196,7 @@ public LongGauge getLongGauge(String name) { @Override public LongHistogram registerLongHistogram(String name, String description, String unit) { try (ReleasableLock lock = registerLock.acquire()) { - return longHistograms.register(new LongHistogramAdapter(meter, name, description, unit)); + return register(longHistograms, new LongHistogramAdapter(meter, name, description, unit)); } } @@ -203,6 +205,11 @@ public LongHistogram getLongHistogram(String name) { return longHistograms.get(name); } + private > T register(Registrar registrar, T adapter) { + assert registrars.contains(registrar) : "usage of unknown registrar"; + return registrar.register(adapter); + } + public void setProvider(Meter meter) { try (ReleasableLock lock = registerLock.acquire()) { this.meter = meter; diff --git a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/APMMeterRegistryTests.java b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/APMMeterRegistryTests.java index 82dd911d1b821..778ca108dc5fe 100644 --- a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/APMMeterRegistryTests.java +++ b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/APMMeterRegistryTests.java @@ -12,19 +12,37 @@ import io.opentelemetry.api.metrics.Meter; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.telemetry.Measurement; import org.elasticsearch.telemetry.apm.internal.APMAgentSettings; import org.elasticsearch.telemetry.apm.internal.APMMeterService; import org.elasticsearch.telemetry.apm.internal.TestAPMMeterService; +import org.elasticsearch.telemetry.metric.DoubleAsyncCounter; import org.elasticsearch.telemetry.metric.DoubleCounter; +import org.elasticsearch.telemetry.metric.DoubleGauge; +import org.elasticsearch.telemetry.metric.DoubleHistogram; +import org.elasticsearch.telemetry.metric.DoubleUpDownCounter; +import org.elasticsearch.telemetry.metric.DoubleWithAttributes; +import org.elasticsearch.telemetry.metric.Instrument; +import org.elasticsearch.telemetry.metric.LongAsyncCounter; import org.elasticsearch.telemetry.metric.LongCounter; +import org.elasticsearch.telemetry.metric.LongGauge; +import org.elasticsearch.telemetry.metric.LongHistogram; +import org.elasticsearch.telemetry.metric.LongUpDownCounter; +import org.elasticsearch.telemetry.metric.LongWithAttributes; import org.elasticsearch.test.ESTestCase; +import java.util.Collections; +import java.util.List; +import java.util.function.Supplier; + import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.sameInstance; public class APMMeterRegistryTests extends ESTestCase { - Meter testOtel = new RecordingOtelMeter(); + RecordingOtelMeter testOtel = new RecordingOtelMeter(); Meter noopOtel = OpenTelemetry.noop().getMeter("noop"); @@ -97,4 +115,61 @@ public void testMaxNameLength() { ); assertThat(iae.getMessage(), containsString("exceeds maximum length [255]")); } + + public void testAllInstrumentsSwitchProviders() { + TestAPMMeterService apmMeter = new TestAPMMeterService( + Settings.builder().put(APMAgentSettings.TELEMETRY_METRICS_ENABLED_SETTING.getKey(), false).build(), + () -> testOtel, + () -> noopOtel + ); + APMMeterRegistry registry = apmMeter.getMeterRegistry(); + + Supplier doubleObserver = () -> new DoubleWithAttributes(1.5, Collections.emptyMap()); + DoubleCounter dc = registry.registerDoubleCounter("dc", "", ""); + DoubleUpDownCounter dudc = registry.registerDoubleUpDownCounter("dudc", "", ""); + DoubleHistogram dh = registry.registerDoubleHistogram("dh", "", ""); + DoubleAsyncCounter dac = registry.registerDoubleAsyncCounter("dac", "", "", doubleObserver); + DoubleGauge dg = registry.registerDoubleGauge("dg", "", "", doubleObserver); + + Supplier longObserver = () -> new LongWithAttributes(100, Collections.emptyMap()); + LongCounter lc = registry.registerLongCounter("lc", "", ""); + LongUpDownCounter ludc = registry.registerLongUpDownCounter("ludc", "", ""); + LongHistogram lh = registry.registerLongHistogram("lh", "", ""); + LongAsyncCounter lac = registry.registerLongAsyncCounter("lac", "", "", longObserver); + LongGauge lg = registry.registerLongGauge("lg", "", "", longObserver); + + apmMeter.setEnabled(true); + + testOtel.collectMetrics(); + dc.increment(); + dudc.add(1.0); + dh.record(1.0); + lc.increment(); + ludc.add(1); + lh.record(1); + + hasOneDouble(dc, 1.0d); + hasOneDouble(dudc, 1.0d); + hasOneDouble(dh, 1.0d); + hasOneDouble(dac, 1.5d); + hasOneDouble(dg, 1.5d); + + hasOneLong(lc, 1L); + hasOneLong(ludc, 1L); + hasOneLong(lh, 1L); + hasOneLong(lac, 100L); + hasOneLong(lg, 100L); + } + + private void hasOneDouble(Instrument instrument, double value) { + List measurements = testOtel.getRecorder().getMeasurements(instrument); + assertThat(measurements, hasSize(1)); + assertThat(measurements.get(0).getDouble(), equalTo(value)); + } + + private void hasOneLong(Instrument instrument, long value) { + List measurements = testOtel.getRecorder().getMeasurements(instrument); + assertThat(measurements, hasSize(1)); + assertThat(measurements.get(0).getLong(), equalTo(value)); + } } diff --git a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/RecordingOtelMeter.java b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/RecordingOtelMeter.java index 94627c1e53813..793803fb8c277 100644 --- a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/RecordingOtelMeter.java +++ b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/RecordingOtelMeter.java @@ -355,7 +355,7 @@ public ObservableDoubleMeasurement buildObserver() { private class DoubleUpDownRecorder extends AbstractInstrument implements DoubleUpDownCounter, OtelInstrument { DoubleUpDownRecorder(String name) { - super(name, InstrumentType.LONG_UP_DOWN_COUNTER); + super(name, InstrumentType.DOUBLE_UP_DOWN_COUNTER); } protected DoubleUpDownRecorder(String name, InstrumentType instrument) { @@ -616,7 +616,9 @@ public LongHistogramBuilder ofLongs() { @Override public DoubleHistogram build() { - return new DoubleHistogramRecorder(name); + DoubleHistogramRecorder histogram = new DoubleHistogramRecorder(name); + recorder.register(histogram, histogram.getInstrument(), name, description, unit); + return histogram; } } @@ -661,7 +663,9 @@ public LongHistogramBuilder setUnit(String unit) { @Override public LongHistogram build() { - return new LongHistogramRecorder(name); + LongHistogramRecorder histogram = new LongHistogramRecorder(name); + recorder.register(histogram, histogram.getInstrument(), name, description, unit); + return histogram; } } diff --git a/test/framework/src/main/java/org/elasticsearch/telemetry/MetricRecorder.java b/test/framework/src/main/java/org/elasticsearch/telemetry/MetricRecorder.java index cf8f23f06155d..aa14d0067b68e 100644 --- a/test/framework/src/main/java/org/elasticsearch/telemetry/MetricRecorder.java +++ b/test/framework/src/main/java/org/elasticsearch/telemetry/MetricRecorder.java @@ -39,7 +39,7 @@ private record RegisteredMetric( ) { void register(String name, String description, String unit, I instrument) { assert registered.containsKey(name) == false - : Strings.format("unexpected [{}]: [{}][{}], already registered[{}]", name, description, unit, registered.get(name)); + : Strings.format("unexpected [%s]: [%s][%s], already registered[%s]", name, description, unit, registered.get(name)); registered.put(name, new Registration(name, description, unit)); instruments.put(name, instrument); if (instrument instanceof Runnable callback) { @@ -48,7 +48,7 @@ void register(String name, String description, String unit, I instrument) { } void call(String name, Measurement call) { - assert registered.containsKey(name) : Strings.format("call for unregistered metric [{}]: [{}]", name, call); + assert registered.containsKey(name) : Strings.format("call for unregistered metric [%s]: [%s]", name, call); called.computeIfAbsent(Objects.requireNonNull(name), k -> new CopyOnWriteArrayList<>()).add(call); } From ab12d4e3620dc9deb89f4129d1f4c104b0e17913 Mon Sep 17 00:00:00 2001 From: Stuart Tettemer Date: Fri, 8 Dec 2023 09:24:10 -0600 Subject: [PATCH 13/14] Metrics: Handle null observations in observers (#103091) Avoid null pointer error when an observation is null [0]. * To get the tests working, replace the this-escape buildInstrument with a Builder. * Removes locking around close --- docs/changelog/103091.yaml | 5 ++ .../telemetry/apm/AbstractInstrument.java | 61 ++++++++++--------- .../metrics/DoubleAsyncCounterAdapter.java | 45 ++++++-------- .../metrics/DoubleCounterAdapter.java | 22 ++++--- .../internal/metrics/DoubleGaugeAdapter.java | 48 ++++++--------- .../metrics/DoubleHistogramAdapter.java | 19 +++--- .../metrics/DoubleUpDownCounterAdapter.java | 23 +++---- .../metrics/LongAsyncCounterAdapter.java | 44 ++++++------- .../internal/metrics/LongCounterAdapter.java | 20 +++--- .../internal/metrics/LongGaugeAdapter.java | 50 ++++++--------- .../metrics/LongHistogramAdapter.java | 24 ++++---- .../metrics/LongUpDownCounterAdapter.java | 19 +++--- .../apm/internal/metrics/OtelHelper.java | 45 ++++++++++++++ .../metrics/AsyncCountersAdapterTests.java | 22 +++++++ .../internal/metrics/GaugeAdapterTests.java | 12 ++++ 15 files changed, 262 insertions(+), 197 deletions(-) create mode 100644 docs/changelog/103091.yaml diff --git a/docs/changelog/103091.yaml b/docs/changelog/103091.yaml new file mode 100644 index 0000000000000..ae4ac11933d4e --- /dev/null +++ b/docs/changelog/103091.yaml @@ -0,0 +1,5 @@ +pr: 103091 +summary: "Metrics: Handle null observations in observers" +area: Infra/Core +type: bug +issues: [] diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/AbstractInstrument.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/AbstractInstrument.java index bbeaba0f6f088..72c6ccf905873 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/AbstractInstrument.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/AbstractInstrument.java @@ -17,6 +17,7 @@ import java.security.PrivilegedAction; import java.util.Objects; import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Function; /** * An instrument that contains the name, description and unit. The delegate may be replaced when @@ -25,27 +26,14 @@ * @param delegated instrument */ public abstract class AbstractInstrument implements Instrument { - private static final int MAX_NAME_LENGTH = 255; - private final AtomicReference delegate; + private final AtomicReference delegate = new AtomicReference<>(); private final String name; - private final String description; - private final String unit; + private final Function instrumentBuilder; - @SuppressWarnings("this-escape") - public AbstractInstrument(Meter meter, String name, String description, String unit) { - this.name = Objects.requireNonNull(name); - if (name.length() > MAX_NAME_LENGTH) { - throw new IllegalArgumentException( - "Instrument name [" + name + "] with length [" + name.length() + "] exceeds maximum length [" + MAX_NAME_LENGTH + "]" - ); - } - this.description = Objects.requireNonNull(description); - this.unit = Objects.requireNonNull(unit); - this.delegate = new AtomicReference<>(doBuildInstrument(meter)); - } - - private T doBuildInstrument(Meter meter) { - return AccessController.doPrivileged((PrivilegedAction) () -> buildInstrument(meter)); + public AbstractInstrument(Meter meter, Builder builder) { + this.name = builder.getName(); + this.instrumentBuilder = m -> AccessController.doPrivileged((PrivilegedAction) () -> builder.build(m)); + this.delegate.set(this.instrumentBuilder.apply(meter)); } @Override @@ -53,21 +41,36 @@ public String getName() { return name; } - public String getUnit() { - return unit.toString(); - } - protected T getInstrument() { return delegate.get(); } - protected String getDescription() { - return description; - } - void setProvider(@Nullable Meter meter) { - delegate.set(doBuildInstrument(Objects.requireNonNull(meter))); + delegate.set(instrumentBuilder.apply(Objects.requireNonNull(meter))); } - protected abstract T buildInstrument(Meter meter); + protected abstract static class Builder { + private static final int MAX_NAME_LENGTH = 255; + + protected final String name; + protected final String description; + protected final String unit; + + public Builder(String name, String description, String unit) { + if (name.length() > MAX_NAME_LENGTH) { + throw new IllegalArgumentException( + "Instrument name [" + name + "] with length [" + name.length() + "] exceeds maximum length [" + MAX_NAME_LENGTH + "]" + ); + } + this.name = Objects.requireNonNull(name); + this.description = Objects.requireNonNull(description); + this.unit = Objects.requireNonNull(unit); + } + + public String getName() { + return name; + } + + public abstract T build(Meter meter); + } } diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleAsyncCounterAdapter.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleAsyncCounterAdapter.java index a1ea9f33e31fb..6b17a83619ef7 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleAsyncCounterAdapter.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleAsyncCounterAdapter.java @@ -11,47 +11,40 @@ import io.opentelemetry.api.metrics.Meter; import io.opentelemetry.api.metrics.ObservableDoubleCounter; -import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.telemetry.apm.AbstractInstrument; import org.elasticsearch.telemetry.metric.DoubleAsyncCounter; import org.elasticsearch.telemetry.metric.DoubleWithAttributes; import java.util.Objects; -import java.util.concurrent.locks.ReentrantLock; import java.util.function.Supplier; public class DoubleAsyncCounterAdapter extends AbstractInstrument implements DoubleAsyncCounter { - private final Supplier observer; - private final ReleasableLock closedLock = new ReleasableLock(new ReentrantLock()); - private boolean closed = false; public DoubleAsyncCounterAdapter(Meter meter, String name, String description, String unit, Supplier observer) { - super(meter, name, description, unit); - this.observer = observer; + super(meter, new Builder(name, description, unit, observer)); } @Override - protected ObservableDoubleCounter buildInstrument(Meter meter) { - var builder = Objects.requireNonNull(meter).counterBuilder(getName()); - return builder.setDescription(getDescription()).setUnit(getUnit()).ofDoubles().buildWithCallback(measurement -> { - DoubleWithAttributes observation; - try { - observation = observer.get(); - } catch (RuntimeException err) { - assert false : "observer must not throw [" + err.getMessage() + "]"; - return; - } - measurement.record(observation.value(), OtelHelper.fromMap(observation.attributes())); - }); + public void close() throws Exception { + getInstrument().close(); } - @Override - public void close() throws Exception { - try (ReleasableLock lock = closedLock.acquire()) { - if (closed == false) { - getInstrument().close(); - } - closed = true; + private static class Builder extends AbstractInstrument.Builder { + private final Supplier observer; + + private Builder(String name, String description, String unit, Supplier observer) { + super(name, description, unit); + this.observer = Objects.requireNonNull(observer); + } + + @Override + public ObservableDoubleCounter build(Meter meter) { + return Objects.requireNonNull(meter) + .counterBuilder(name) + .setDescription(description) + .setUnit(unit) + .ofDoubles() + .buildWithCallback(OtelHelper.doubleMeasurementCallback(observer)); } } } diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleCounterAdapter.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleCounterAdapter.java index faba8c2e3e67e..398419f6c3f69 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleCounterAdapter.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleCounterAdapter.java @@ -22,16 +22,7 @@ public class DoubleCounterAdapter extends AbstractInstrument implements org.elasticsearch.telemetry.metric.DoubleCounter { public DoubleCounterAdapter(Meter meter, String name, String description, String unit) { - super(meter, name, description, unit); - } - - protected io.opentelemetry.api.metrics.DoubleCounter buildInstrument(Meter meter) { - return Objects.requireNonNull(meter) - .counterBuilder(getName()) - .ofDoubles() - .setDescription(getDescription()) - .setUnit(getUnit()) - .build(); + super(meter, new Builder(name, description, unit)); } @Override @@ -50,4 +41,15 @@ public void incrementBy(double inc, Map attributes) { assert inc >= 0; getInstrument().add(inc, OtelHelper.fromMap(attributes)); } + + private static class Builder extends AbstractInstrument.Builder { + private Builder(String name, String description, String unit) { + super(name, description, unit); + } + + @Override + public DoubleCounter build(Meter meter) { + return Objects.requireNonNull(meter).counterBuilder(name).ofDoubles().setDescription(description).setUnit(unit).build(); + } + } } diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleGaugeAdapter.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleGaugeAdapter.java index faef8bd723fcf..ed6ecee66d696 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleGaugeAdapter.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleGaugeAdapter.java @@ -11,12 +11,10 @@ import io.opentelemetry.api.metrics.Meter; import io.opentelemetry.api.metrics.ObservableDoubleGauge; -import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.telemetry.apm.AbstractInstrument; import org.elasticsearch.telemetry.metric.DoubleWithAttributes; import java.util.Objects; -import java.util.concurrent.locks.ReentrantLock; import java.util.function.Supplier; /** @@ -26,40 +24,30 @@ public class DoubleGaugeAdapter extends AbstractInstrument observer; - private final ReleasableLock closedLock = new ReleasableLock(new ReentrantLock()); - private boolean closed = false; - public DoubleGaugeAdapter(Meter meter, String name, String description, String unit, Supplier observer) { - super(meter, name, description, unit); - this.observer = observer; + super(meter, new Builder(name, description, unit, observer)); } @Override - protected io.opentelemetry.api.metrics.ObservableDoubleGauge buildInstrument(Meter meter) { - return Objects.requireNonNull(meter) - .gaugeBuilder(getName()) - .setDescription(getDescription()) - .setUnit(getUnit()) - .buildWithCallback(measurement -> { - DoubleWithAttributes observation; - try { - observation = observer.get(); - } catch (RuntimeException err) { - assert false : "observer must not throw [" + err.getMessage() + "]"; - return; - } - measurement.record(observation.value(), OtelHelper.fromMap(observation.attributes())); - }); + public void close() throws Exception { + getInstrument().close(); } - @Override - public void close() throws Exception { - try (ReleasableLock lock = closedLock.acquire()) { - if (closed == false) { - getInstrument().close(); - } - closed = true; + private static class Builder extends AbstractInstrument.Builder { + private final Supplier observer; + + private Builder(String name, String description, String unit, Supplier observer) { + super(name, description, unit); + this.observer = Objects.requireNonNull(observer); + } + + @Override + public ObservableDoubleGauge build(Meter meter) { + return Objects.requireNonNull(meter) + .gaugeBuilder(name) + .setDescription(description) + .setUnit(unit) + .buildWithCallback(OtelHelper.doubleMeasurementCallback(observer)); } } } diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleHistogramAdapter.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleHistogramAdapter.java index e126aa6af7cf0..47b54a84d0aa9 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleHistogramAdapter.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleHistogramAdapter.java @@ -24,13 +24,7 @@ public class DoubleHistogramAdapter extends AbstractInstrument org.elasticsearch.telemetry.metric.DoubleHistogram { public DoubleHistogramAdapter(Meter meter, String name, String description, String unit) { - super(meter, name, description, unit); - } - - @Override - protected io.opentelemetry.api.metrics.DoubleHistogram buildInstrument(Meter meter) { - var builder = Objects.requireNonNull(meter).histogramBuilder(getName()); - return builder.setDescription(getDescription()).setUnit(getUnit()).build(); + super(meter, new Builder(name, description, unit)); } @Override @@ -42,4 +36,15 @@ public void record(double value) { public void record(double value, Map attributes) { getInstrument().record(value, OtelHelper.fromMap(attributes)); } + + private static class Builder extends AbstractInstrument.Builder { + private Builder(String name, String description, String unit) { + super(name, description, unit); + } + + @Override + public DoubleHistogram build(Meter meter) { + return Objects.requireNonNull(meter).histogramBuilder(name).setDescription(description).setUnit(unit).build(); + } + } } diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleUpDownCounterAdapter.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleUpDownCounterAdapter.java index a204627a04f1e..7cb7100ba2631 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleUpDownCounterAdapter.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/DoubleUpDownCounterAdapter.java @@ -24,17 +24,7 @@ public class DoubleUpDownCounterAdapter extends AbstractInstrument attributes) { getInstrument().add(inc, OtelHelper.fromMap(attributes)); } + + private static class Builder extends AbstractInstrument.Builder { + private Builder(String name, String description, String unit) { + super(name, description, unit); + } + + @Override + public DoubleUpDownCounter build(Meter meter) { + return Objects.requireNonNull(meter).upDownCounterBuilder(name).ofDoubles().setDescription(description).setUnit(unit).build(); + } + } } diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongAsyncCounterAdapter.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongAsyncCounterAdapter.java index 126cca1964283..14c58139d03e1 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongAsyncCounterAdapter.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongAsyncCounterAdapter.java @@ -11,47 +11,39 @@ import io.opentelemetry.api.metrics.Meter; import io.opentelemetry.api.metrics.ObservableLongCounter; -import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.telemetry.apm.AbstractInstrument; import org.elasticsearch.telemetry.metric.LongAsyncCounter; import org.elasticsearch.telemetry.metric.LongWithAttributes; import java.util.Objects; -import java.util.concurrent.locks.ReentrantLock; import java.util.function.Supplier; public class LongAsyncCounterAdapter extends AbstractInstrument implements LongAsyncCounter { - private final Supplier observer; - private final ReleasableLock closedLock = new ReleasableLock(new ReentrantLock()); - private boolean closed = false; public LongAsyncCounterAdapter(Meter meter, String name, String description, String unit, Supplier observer) { - super(meter, name, description, unit); - this.observer = observer; + super(meter, new Builder(name, description, unit, observer)); } @Override - protected ObservableLongCounter buildInstrument(Meter meter) { - var builder = Objects.requireNonNull(meter).counterBuilder(getName()); - return builder.setDescription(getDescription()).setUnit(getUnit()).buildWithCallback(measurement -> { - LongWithAttributes observation; - try { - observation = observer.get(); - } catch (RuntimeException err) { - assert false : "observer must not throw [" + err.getMessage() + "]"; - return; - } - measurement.record(observation.value(), OtelHelper.fromMap(observation.attributes())); - }); + public void close() throws Exception { + getInstrument().close(); } - @Override - public void close() throws Exception { - try (ReleasableLock lock = closedLock.acquire()) { - if (closed == false) { - getInstrument().close(); - } - closed = true; + private static class Builder extends AbstractInstrument.Builder { + private final Supplier observer; + + private Builder(String name, String description, String unit, Supplier observer) { + super(name, description, unit); + this.observer = Objects.requireNonNull(observer); + } + + @Override + public ObservableLongCounter build(Meter meter) { + return Objects.requireNonNull(meter) + .counterBuilder(name) + .setDescription(description) + .setUnit(unit) + .buildWithCallback(OtelHelper.longMeasurementCallback(observer)); } } } diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongCounterAdapter.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongCounterAdapter.java index 9b46b8c97994a..54a7c3e3cab29 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongCounterAdapter.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongCounterAdapter.java @@ -20,15 +20,8 @@ * LongCounterAdapter wraps an otel LongCounter */ public class LongCounterAdapter extends AbstractInstrument implements org.elasticsearch.telemetry.metric.LongCounter { - public LongCounterAdapter(Meter meter, String name, String description, String unit) { - super(meter, name, description, unit); - } - - @Override - protected io.opentelemetry.api.metrics.LongCounter buildInstrument(Meter meter) { - var builder = Objects.requireNonNull(meter).counterBuilder(getName()); - return builder.setDescription(getDescription()).setUnit(getUnit()).build(); + super(meter, new Builder(name, description, unit)); } @Override @@ -47,4 +40,15 @@ public void incrementBy(long inc, Map attributes) { assert inc >= 0; getInstrument().add(inc, OtelHelper.fromMap(attributes)); } + + private static class Builder extends AbstractInstrument.Builder { + private Builder(String name, String description, String unit) { + super(name, description, unit); + } + + @Override + public LongCounter build(Meter meter) { + return Objects.requireNonNull(meter).counterBuilder(name).setDescription(description).setUnit(unit).build(); + } + } } diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongGaugeAdapter.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongGaugeAdapter.java index e297ba7ee963a..52c19c80c284f 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongGaugeAdapter.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongGaugeAdapter.java @@ -11,53 +11,41 @@ import io.opentelemetry.api.metrics.Meter; import io.opentelemetry.api.metrics.ObservableLongGauge; -import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.telemetry.apm.AbstractInstrument; import org.elasticsearch.telemetry.metric.LongWithAttributes; import java.util.Objects; -import java.util.concurrent.locks.ReentrantLock; import java.util.function.Supplier; /** * LongGaugeAdapter wraps an otel ObservableLongGauge */ public class LongGaugeAdapter extends AbstractInstrument implements org.elasticsearch.telemetry.metric.LongGauge { - private final Supplier observer; - private final ReleasableLock closedLock = new ReleasableLock(new ReentrantLock()); - private boolean closed = false; - public LongGaugeAdapter(Meter meter, String name, String description, String unit, Supplier observer) { - super(meter, name, description, unit); - this.observer = observer; + super(meter, new Builder(name, description, unit, observer)); } @Override - protected io.opentelemetry.api.metrics.ObservableLongGauge buildInstrument(Meter meter) { - return Objects.requireNonNull(meter) - .gaugeBuilder(getName()) - .ofLongs() - .setDescription(getDescription()) - .setUnit(getUnit()) - .buildWithCallback(measurement -> { - LongWithAttributes observation; - try { - observation = observer.get(); - } catch (RuntimeException err) { - assert false : "observer must not throw [" + err.getMessage() + "]"; - return; - } - measurement.record(observation.value(), OtelHelper.fromMap(observation.attributes())); - }); + public void close() throws Exception { + getInstrument().close(); } - @Override - public void close() throws Exception { - try (ReleasableLock lock = closedLock.acquire()) { - if (closed == false) { - getInstrument().close(); - } - closed = true; + private static class Builder extends AbstractInstrument.Builder { + private final Supplier observer; + + private Builder(String name, String description, String unit, Supplier observer) { + super(name, description, unit); + this.observer = Objects.requireNonNull(observer); + } + + @Override + public ObservableLongGauge build(Meter meter) { + return Objects.requireNonNull(meter) + .gaugeBuilder(name) + .ofLongs() + .setDescription(description) + .setUnit(unit) + .buildWithCallback(OtelHelper.longMeasurementCallback(observer)); } } } diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongHistogramAdapter.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongHistogramAdapter.java index 2b8e76df0dd0e..286922027ce2a 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongHistogramAdapter.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongHistogramAdapter.java @@ -20,19 +20,8 @@ * LongHistogramAdapter wraps an otel LongHistogram */ public class LongHistogramAdapter extends AbstractInstrument implements org.elasticsearch.telemetry.metric.LongHistogram { - public LongHistogramAdapter(Meter meter, String name, String description, String unit) { - super(meter, name, description, unit); - } - - @Override - protected io.opentelemetry.api.metrics.LongHistogram buildInstrument(Meter meter) { - return Objects.requireNonNull(meter) - .histogramBuilder(getName()) - .ofLongs() - .setDescription(getDescription()) - .setUnit(getUnit()) - .build(); + super(meter, new Builder(name, description, unit)); } @Override @@ -44,4 +33,15 @@ public void record(long value) { public void record(long value, Map attributes) { getInstrument().record(value, OtelHelper.fromMap(attributes)); } + + private static class Builder extends AbstractInstrument.Builder { + private Builder(String name, String description, String unit) { + super(name, description, unit); + } + + @Override + public LongHistogram build(Meter meter) { + return Objects.requireNonNull(meter).histogramBuilder(name).ofLongs().setDescription(description).setUnit(unit).build(); + } + } } diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongUpDownCounterAdapter.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongUpDownCounterAdapter.java index a59a114bc2264..fa09740085aa3 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongUpDownCounterAdapter.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/LongUpDownCounterAdapter.java @@ -24,13 +24,7 @@ public class LongUpDownCounterAdapter extends AbstractInstrument attributes) { getInstrument().add(inc, OtelHelper.fromMap(attributes)); } + + private static class Builder extends AbstractInstrument.Builder { + private Builder(String name, String description, String unit) { + super(name, description, unit); + } + + @Override + public LongUpDownCounter build(Meter meter) { + return Objects.requireNonNull(meter).upDownCounterBuilder(name).setDescription(description).setUnit(unit).build(); + } + } } diff --git a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/OtelHelper.java b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/OtelHelper.java index 18bf66cee5391..3e8ab415bd25e 100644 --- a/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/OtelHelper.java +++ b/modules/apm/src/main/java/org/elasticsearch/telemetry/apm/internal/metrics/OtelHelper.java @@ -9,10 +9,21 @@ package org.elasticsearch.telemetry.apm.internal.metrics; import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.ObservableDoubleMeasurement; +import io.opentelemetry.api.metrics.ObservableLongMeasurement; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.telemetry.metric.DoubleWithAttributes; +import org.elasticsearch.telemetry.metric.LongWithAttributes; import java.util.Map; +import java.util.function.Consumer; +import java.util.function.Supplier; class OtelHelper { + private static final Logger logger = LogManager.getLogger(OtelHelper.class); + static Attributes fromMap(Map attributes) { if (attributes == null || attributes.isEmpty()) { return Attributes.empty(); @@ -41,4 +52,38 @@ static Attributes fromMap(Map attributes) { }); return builder.build(); } + + static Consumer doubleMeasurementCallback(Supplier observer) { + return measurement -> { + DoubleWithAttributes observation; + try { + observation = observer.get(); + } catch (RuntimeException err) { + assert false : "observer must not throw [" + err.getMessage() + "]"; + logger.error("doubleMeasurementCallback observer unexpected error", err); + return; + } + if (observation == null) { + return; + } + measurement.record(observation.value(), OtelHelper.fromMap(observation.attributes())); + }; + } + + static Consumer longMeasurementCallback(Supplier observer) { + return measurement -> { + LongWithAttributes observation; + try { + observation = observer.get(); + } catch (RuntimeException err) { + assert false : "observer must not throw [" + err.getMessage() + "]"; + logger.error("longMeasurementCallback observer unexpected error", err); + return; + } + if (observation == null) { + return; + } + measurement.record(observation.value(), OtelHelper.fromMap(observation.attributes())); + }; + } } diff --git a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/metrics/AsyncCountersAdapterTests.java b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/metrics/AsyncCountersAdapterTests.java index fa8706deee870..3e23b741e01e5 100644 --- a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/metrics/AsyncCountersAdapterTests.java +++ b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/metrics/AsyncCountersAdapterTests.java @@ -99,4 +99,26 @@ public void testDoubleAsyncAdapter() throws Exception { metrics = otelMeter.getRecorder().getMeasurements(doubleAsyncCounter); assertThat(metrics, hasSize(0)); } + + public void testNullGaugeRecord() throws Exception { + DoubleAsyncCounter dcounter = registry.registerDoubleAsyncCounter( + "name", + "desc", + "unit", + new AtomicReference()::get + ); + otelMeter.collectMetrics(); + List metrics = otelMeter.getRecorder().getMeasurements(dcounter); + assertThat(metrics, hasSize(0)); + + LongAsyncCounter lcounter = registry.registerLongAsyncCounter( + "name", + "desc", + "unit", + new AtomicReference()::get + ); + otelMeter.collectMetrics(); + metrics = otelMeter.getRecorder().getMeasurements(lcounter); + assertThat(metrics, hasSize(0)); + } } diff --git a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/metrics/GaugeAdapterTests.java b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/metrics/GaugeAdapterTests.java index e8cd18521f842..10f2d58768d48 100644 --- a/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/metrics/GaugeAdapterTests.java +++ b/modules/apm/src/test/java/org/elasticsearch/telemetry/apm/internal/metrics/GaugeAdapterTests.java @@ -100,4 +100,16 @@ public void testDoubleGaugeRecord() throws Exception { metrics = otelMeter.getRecorder().getMeasurements(gauge); assertThat(metrics, hasSize(0)); } + + public void testNullGaugeRecord() throws Exception { + DoubleGauge dgauge = registry.registerDoubleGauge("name", "desc", "unit", new AtomicReference()::get); + otelMeter.collectMetrics(); + List metrics = otelMeter.getRecorder().getMeasurements(dgauge); + assertThat(metrics, hasSize(0)); + + LongGauge lgauge = registry.registerLongGauge("name", "desc", "unit", new AtomicReference()::get); + otelMeter.collectMetrics(); + metrics = otelMeter.getRecorder().getMeasurements(lgauge); + assertThat(metrics, hasSize(0)); + } } From ff1e2dfb2926fda6c3d1a216e23153125e331d24 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Fri, 8 Dec 2023 07:24:42 -0800 Subject: [PATCH 14/14] Deep copy term returned from ordinal lookup (#103167) We should perform a deep copy of the term returned from the ordinal lookup before passing it to the constant block since that BytesRef may be reused in subsequent lookups. Closes #103106 --- .../index/mapper/BlockDocValuesReader.java | 3 +- .../ValuesSourceReaderOperatorTests.java | 65 +++++++++++++++---- 2 files changed, 55 insertions(+), 13 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BlockDocValuesReader.java b/server/src/main/java/org/elasticsearch/index/mapper/BlockDocValuesReader.java index 2160f52cbec02..a9781aeb9e2db 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BlockDocValuesReader.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BlockDocValuesReader.java @@ -558,7 +558,8 @@ private static class SingletonOrdinals extends BlockDocValuesReader { private BlockLoader.Block readSingleDoc(BlockFactory factory, int docId) throws IOException { if (ordinals.advanceExact(docId)) { BytesRef v = ordinals.lookupOrd(ordinals.ordValue()); - return factory.constantBytes(v); + // the returned BytesRef can be reused + return factory.constantBytes(BytesRef.deepCopyOf(v)); } else { return factory.constantNulls(); } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java index 424f88413af8f..03815dcdaea78 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java @@ -78,6 +78,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; @@ -111,6 +112,7 @@ public class ValuesSourceReaderOperatorTests extends OperatorTestCase { private Directory directory = newDirectory(); private IndexReader reader; + private static final Map keyToTags = new HashMap<>(); @After public void closeIndex() throws IOException { @@ -144,12 +146,14 @@ static Operator.OperatorFactory factory(IndexReader reader, String name, BlockLo @Override protected SourceOperator simpleInput(BlockFactory blockFactory, int size) { - // The test wants more than one segment. We shoot for 10. - int commitEvery = Math.max(1, (int) Math.ceil((double) size / 10)); - return simpleInput(driverContext(), size, commitEvery); + return simpleInput(driverContext(), size, commitEvery(size), randomPageSize()); } - private SourceOperator simpleInput(DriverContext context, int size, int commitEvery) { + private int commitEvery(int numDocs) { + return Math.max(1, (int) Math.ceil((double) numDocs / 10)); + } + + private SourceOperator simpleInput(DriverContext context, int size, int commitEvery, int pageSize) { try { initIndex(size, commitEvery); } catch (IOException e) { @@ -160,13 +164,14 @@ private SourceOperator simpleInput(DriverContext context, int size, int commitEv ctx -> new MatchAllDocsQuery(), DataPartitioning.SHARD, randomIntBetween(1, 10), - size, + pageSize, LuceneOperator.NO_LIMIT ); return luceneFactory.get(context); } private void initIndex(int size, int commitEvery) throws IOException { + keyToTags.clear(); try ( IndexWriter writer = new IndexWriter( directory, @@ -181,9 +186,8 @@ private void initIndex(int size, int commitEvery) throws IOException { doc.add(new SortedNumericDocValuesField("short", (short) d)); doc.add(new SortedNumericDocValuesField("byte", (byte) d)); doc.add(new SortedNumericDocValuesField("long", d)); - doc.add( - new KeywordFieldMapper.KeywordField("kwd", new BytesRef(Integer.toString(d)), KeywordFieldMapper.Defaults.FIELD_TYPE) - ); + String tag = keyToTags.computeIfAbsent(d, k -> "tag-" + randomIntBetween(1, 5)); + doc.add(new KeywordFieldMapper.KeywordField("kwd", new BytesRef(tag), KeywordFieldMapper.Defaults.FIELD_TYPE)); doc.add(new StoredField("stored_kwd", new BytesRef(Integer.toString(d)))); doc.add(new StoredField("stored_text", Integer.toString(d))); doc.add(new SortedNumericDocValuesField("bool", d % 2 == 0 ? 1 : 0)); @@ -295,6 +299,37 @@ public void testLoadAllInOnePage() { ); } + public void testManySingleDocPages() { + DriverContext driverContext = driverContext(); + int numDocs = between(10, 100); + List input = CannedSourceOperator.collectPages(simpleInput(driverContext, numDocs, between(1, numDocs), 1)); + Randomness.shuffle(input); + List operators = new ArrayList<>(); + Checks checks = new Checks(Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING); + FieldCase testCase = new FieldCase( + new KeywordFieldMapper.KeywordFieldType("kwd"), + checks::tags, + StatusChecks::keywordsFromDocValues + ); + operators.add( + new ValuesSourceReaderOperator.Factory( + List.of(testCase.info, fieldInfo(docValuesNumberField("key", NumberFieldMapper.NumberType.INTEGER))), + List.of(new ValuesSourceReaderOperator.ShardContext(reader, () -> SourceLoader.FROM_STORED_SOURCE)), + 0 + ).get(driverContext) + ); + List results = drive(operators, input.iterator(), driverContext); + assertThat(results, hasSize(input.size())); + for (Page page : results) { + assertThat(page.getBlockCount(), equalTo(3)); + IntVector keys = page.getBlock(2).asVector(); + for (int p = 0; p < page.getPositionCount(); p++) { + int key = keys.getInt(p); + testCase.checkResults.check(page.getBlock(1), p, key); + } + } + } + public void testEmpty() { DriverContext driverContext = driverContext(); loadSimpleAndAssert( @@ -440,7 +475,8 @@ public void testLoadAllStatusAllInOnePage() { private void testLoadAllStatus(boolean allInOnePage) { DriverContext driverContext = driverContext(); - List input = CannedSourceOperator.collectPages(simpleInput(driverContext.blockFactory(), between(100, 5000))); + int numDocs = between(100, 5000); + List input = CannedSourceOperator.collectPages(simpleInput(driverContext, numDocs, commitEvery(numDocs), numDocs)); assertThat(reader.leaves(), hasSize(10)); assertThat(input, hasSize(10)); List cases = infoAndChecksForEachType(Block.MvOrdering.DEDUPLICATED_AND_SORTED_ASCENDING); @@ -585,7 +621,7 @@ private List infoAndChecksForEachType(Block.MvOrdering docValuesMvOrd r.add(new FieldCase(new BooleanFieldMapper.BooleanFieldType("bool"), checks::bools, StatusChecks::boolFromDocValues)); r.add(new FieldCase(new BooleanFieldMapper.BooleanFieldType("mv_bool"), checks::mvBools, StatusChecks::mvBoolFromDocValues)); r.add(new FieldCase(new BooleanFieldMapper.BooleanFieldType("missing_bool"), checks::constantNulls, StatusChecks::constantNulls)); - r.add(new FieldCase(new KeywordFieldMapper.KeywordFieldType("kwd"), checks::strings, StatusChecks::keywordsFromDocValues)); + r.add(new FieldCase(new KeywordFieldMapper.KeywordFieldType("kwd"), checks::tags, StatusChecks::keywordsFromDocValues)); r.add( new FieldCase( new KeywordFieldMapper.KeywordFieldType("mv_kwd"), @@ -611,7 +647,7 @@ private List infoAndChecksForEachType(Block.MvOrdering docValuesMvOrd r.add( new FieldCase( textFieldWithDelegate("text_with_delegate", new KeywordFieldMapper.KeywordFieldType("kwd")), - checks::strings, + checks::tags, StatusChecks::textWithDelegate ) ); @@ -680,6 +716,11 @@ void strings(Block block, int position, int key) { assertThat(keywords.getBytesRef(position, new BytesRef()).utf8ToString(), equalTo(Integer.toString(key))); } + void tags(Block block, int position, int key) { + BytesRefVector keywords = ((BytesRefBlock) block).asVector(); + assertThat(keywords.getBytesRef(position, new BytesRef()).utf8ToString(), equalTo(keyToTags.get(key))); + } + void bools(Block block, int position, int key) { BooleanVector bools = ((BooleanBlock) block).asVector(); assertThat(bools.getBoolean(position), equalTo(key % 2 == 0)); @@ -1285,7 +1326,7 @@ public void testSequentialStoredFieldsBigEnough() { private void testSequentialStoredFields(boolean sequential, int docCount) { DriverContext driverContext = driverContext(); - List source = CannedSourceOperator.collectPages(simpleInput(driverContext, docCount, docCount)); + List source = CannedSourceOperator.collectPages(simpleInput(driverContext, docCount, docCount, docCount)); assertThat(source, hasSize(1)); // We want one page for simpler assertions, and we want them all in one segment assertTrue(source.get(0).getBlock(0).asVector().singleSegmentNonDecreasing()); Operator op = new ValuesSourceReaderOperator.Factory(