diff --git a/CHANGELOG.md b/CHANGELOG.md index d18a4cdd80d1c..6dea6a4589387 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -103,6 +103,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add average concurrency metric for concurrent segment search ([#9670](https://github.com/opensearch-project/OpenSearch/issues/9670)) - [Remote state] Integrate remote cluster state in publish/commit flow ([#9665](https://github.com/opensearch-project/OpenSearch/pull/9665)) - [Segment Replication] Adding segment replication statistics rolled up at index, node and cluster level ([#9709](https://github.com/opensearch-project/OpenSearch/pull/9709)) +- [Remote Store] Changes to introduce repository registration during bootstrap via node attributes. ([#9105](https://github.com/opensearch-project/OpenSearch/pull/9105)) ### Dependencies - Bump `org.apache.logging.log4j:log4j-core` from 2.17.1 to 2.20.0 ([#8307](https://github.com/opensearch-project/OpenSearch/pull/8307)) diff --git a/distribution/src/config/opensearch.yml b/distribution/src/config/opensearch.yml index 3c4fe822005e0..de2d0e023a200 100644 --- a/distribution/src/config/opensearch.yml +++ b/distribution/src/config/opensearch.yml @@ -92,10 +92,10 @@ ${path.logs} # cluster.remote_store.enabled: true # # Repository to use for segment upload while enforcing remote store for an index -# cluster.remote_store.segment.repository: my-repo-1 +# node.attr.remote_store.segment.repository: my-repo-1 # # Repository to use for translog upload while enforcing remote store for an index -# cluster.remote_store.translog.repository: my-repo-1 +# node.attr.remote_store.translog.repository: my-repo-1 # # ---------------------------------- Experimental Features ----------------------------------- # diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java index 6d2d8df106513..73f5278c175a2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java @@ -26,10 +26,15 @@ import java.nio.file.Path; import java.util.Arrays; import java.util.Collections; +import java.util.Locale; import java.util.Set; import java.util.stream.Collectors; -import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; +import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; public abstract class AbstractRemoteStoreMockRepositoryIntegTestCase extends AbstractSnapshotIntegTestCase { @@ -47,7 +52,6 @@ protected Settings featureFlagSettings() { public void setup() { FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); FeatureFlagSetter.set(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL); - internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REPOSITORY_NAME, TRANSLOG_REPOSITORY_NAME)); } @Override @@ -66,6 +70,43 @@ protected Settings remoteStoreIndexSettings(int numberOfReplicas) { .build(); } + public Settings buildRemoteStoreNodeAttributes(Path repoLocation, double ioFailureRate, String skipExceptionBlobList, long maxFailure) { + String segmentRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + REPOSITORY_NAME + ); + String translogRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + TRANSLOG_REPOSITORY_NAME + ); + String segmentRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + REPOSITORY_NAME + ); + String translogRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + TRANSLOG_REPOSITORY_NAME + ); + + return Settings.builder() + .put("node.attr." + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, REPOSITORY_NAME) + .put(segmentRepoTypeAttributeKey, "mock") + .put(segmentRepoSettingsAttributeKeyPrefix + "location", repoLocation) + .put(segmentRepoSettingsAttributeKeyPrefix + "random_control_io_exception_rate", ioFailureRate) + .put(segmentRepoSettingsAttributeKeyPrefix + "skip_exception_on_verification_file", true) + .put(segmentRepoSettingsAttributeKeyPrefix + "skip_exception_on_list_blobs", true) + .put(segmentRepoSettingsAttributeKeyPrefix + "skip_exception_on_blobs", skipExceptionBlobList) + .put(segmentRepoSettingsAttributeKeyPrefix + "max_failure_number", maxFailure) + .put("node.attr." + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, TRANSLOG_REPOSITORY_NAME) + .put(translogRepoTypeAttributeKey, "mock") + .put(translogRepoSettingsAttributeKeyPrefix + "location", repoLocation) + .build(); + } + protected void deleteRepo() { logger.info("--> Deleting the repository={}", REPOSITORY_NAME); assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_NAME)); @@ -74,26 +115,18 @@ protected void deleteRepo() { } protected String setup(Path repoLocation, double ioFailureRate, String skipExceptionBlobList, long maxFailure) { - logger.info("--> Creating repository={} at the path={}", REPOSITORY_NAME, repoLocation); // The random_control_io_exception_rate setting ensures that 10-25% of all operations to remote store results in /// IOException. skip_exception_on_verification_file & skip_exception_on_list_blobs settings ensures that the // repository creation can happen without failure. - createRepository( - REPOSITORY_NAME, - "mock", - Settings.builder() - .put("location", repoLocation) - .put("random_control_io_exception_rate", ioFailureRate) - .put("skip_exception_on_verification_file", true) - .put("skip_exception_on_list_blobs", true) - // Skipping is required for metadata as it is part of recovery - .put("skip_exception_on_blobs", skipExceptionBlobList) - .put("max_failure_number", maxFailure) - ); - logger.info("--> Creating repository={} at the path={}", TRANSLOG_REPOSITORY_NAME, repoLocation); - createRepository(TRANSLOG_REPOSITORY_NAME, "mock", Settings.builder().put("location", repoLocation)); + Settings.Builder settings = Settings.builder() + .put(buildRemoteStoreNodeAttributes(repoLocation, ioFailureRate, skipExceptionBlobList, maxFailure)); + + if (randomBoolean()) { + settings.put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT); + } - String dataNodeName = internalCluster().startDataOnlyNodes(1).get(0); + internalCluster().startClusterManagerOnlyNode(settings.build()); + String dataNodeName = internalCluster().startDataOnlyNode(settings.build()); createIndex(INDEX_NAME); logger.info("--> Created index={}", INDEX_NAME); ensureYellowAndNoInitializingShards(INDEX_NAME); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexClusterDefaultDocRepIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexClusterDefaultDocRepIT.java index 37dab5faaeb57..e1ab101fddf55 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexClusterDefaultDocRepIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexClusterDefaultDocRepIT.java @@ -75,8 +75,8 @@ public void testDefaultRemoteStoreNoUserOverrideExceptReplicationTypeSegment() t verifyRemoteStoreIndexSettings( indexSettings, "true", - "my-segment-repo-1", - "my-translog-repo-1", + REPOSITORY_NAME, + REPOSITORY_2_NAME, ReplicationType.SEGMENT.toString(), IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL ); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexIT.java index 9991126bb790c..d427a4db84ba2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexIT.java @@ -13,12 +13,9 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexSettings; import org.opensearch.indices.replication.common.ReplicationType; -import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.OpenSearchIntegTestCase; -import org.junit.After; import org.junit.Before; import java.util.Locale; @@ -28,53 +25,15 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; import static org.opensearch.index.IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; -import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.containsString; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) -public class CreateRemoteIndexIT extends OpenSearchIntegTestCase { - - @After - public void teardown() { - assertAcked(clusterAdmin().prepareDeleteRepository("my-segment-repo-1")); - assertAcked(clusterAdmin().prepareDeleteRepository("my-translog-repo-1")); - assertAcked(clusterAdmin().prepareDeleteRepository("my-custom-repo")); - } - - @Override - protected Settings nodeSettings(int nodeOriginal) { - Settings settings = super.nodeSettings(nodeOriginal); - Settings.Builder builder = Settings.builder() - .put(remoteStoreClusterSettings("my-segment-repo-1", "my-translog-repo-1")) - .put(settings); - return builder.build(); - } - - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.REMOTE_STORE, "true").build(); - } +public class CreateRemoteIndexIT extends RemoteStoreBaseIntegTestCase { @Before - public void setup() { - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - internalCluster().startClusterManagerOnlyNode(); - assertAcked( - clusterAdmin().preparePutRepository("my-segment-repo-1") - .setType("fs") - .setSettings(Settings.builder().put("location", randomRepoPath().toAbsolutePath())) - ); - assertAcked( - clusterAdmin().preparePutRepository("my-translog-repo-1") - .setType("fs") - .setSettings(Settings.builder().put("location", randomRepoPath().toAbsolutePath())) - ); - assertAcked( - clusterAdmin().preparePutRepository("my-custom-repo") - .setType("fs") - .setSettings(Settings.builder().put("location", randomRepoPath().toAbsolutePath())) - ); + public void setup() throws Exception { + internalCluster().startNodes(2); } public void testDefaultRemoteStoreNoUserOverride() throws Exception { @@ -91,8 +50,8 @@ public void testDefaultRemoteStoreNoUserOverride() throws Exception { verifyRemoteStoreIndexSettings( indexSettings, "true", - "my-segment-repo-1", - "my-translog-repo-1", + REPOSITORY_NAME, + REPOSITORY_2_NAME, ReplicationType.SEGMENT.toString(), IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL ); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/PrimaryTermValidationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/PrimaryTermValidationIT.java index f72d107a367de..e14a4062f7775 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/PrimaryTermValidationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/PrimaryTermValidationIT.java @@ -27,7 +27,9 @@ import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.disruption.NetworkDisruption; import org.opensearch.test.transport.MockTransportService; +import org.junit.Before; +import java.nio.file.Path; import java.util.Arrays; import java.util.Collection; import java.util.HashSet; @@ -35,22 +37,28 @@ import java.util.stream.Collectors; import java.util.stream.Stream; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.equalTo; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) - public class PrimaryTermValidationIT extends RemoteStoreBaseIntegTestCase { private static final String INDEX_NAME = "remote-store-test-idx-1"; + protected Path absolutePath; + protected Path absolutePath2; @Override protected Collection> nodePlugins() { return Arrays.asList(MockTransportService.TestPlugin.class); } + @Before + public void setup() { + absolutePath = randomRepoPath().toAbsolutePath(); + absolutePath2 = randomRepoPath().toAbsolutePath(); + } + public void testPrimaryTermValidation() throws Exception { // Follower checker interval is lower compared to leader checker so that the cluster manager can remove the node // with network partition faster. The follower check retry count is also kept 1. @@ -61,20 +69,12 @@ public void testPrimaryTermValidation() throws Exception { .put(FollowersChecker.FOLLOWER_CHECK_TIMEOUT_SETTING.getKey(), "1s") .put(FollowersChecker.FOLLOWER_CHECK_INTERVAL_SETTING.getKey(), "1s") .put(FollowersChecker.FOLLOWER_CHECK_RETRY_COUNT_SETTING.getKey(), 1) - .put(remoteStoreClusterSettings(REPOSITORY_NAME, REPOSITORY_2_NAME, true)) + .put(remoteStoreClusterSettings(REPOSITORY_NAME, absolutePath, REPOSITORY_2_NAME, absolutePath2)) .build(); internalCluster().startClusterManagerOnlyNode(clusterSettings); - - // Create repository - absolutePath = randomRepoPath().toAbsolutePath(); - assertAcked( - clusterAdmin().preparePutRepository(REPOSITORY_NAME).setType("fs").setSettings(Settings.builder().put("location", absolutePath)) - ); - absolutePath2 = randomRepoPath().toAbsolutePath(); - putRepository(absolutePath2, REPOSITORY_2_NAME); - - // Start data nodes and create index internalCluster().startDataOnlyNodes(2, clusterSettings); + + // Create index createIndex(INDEX_NAME, remoteStoreIndexSettings(1)); ensureYellowAndNoInitializingShards(INDEX_NAME); ensureGreen(INDEX_NAME); @@ -156,6 +156,7 @@ public void testPrimaryTermValidation() throws Exception { // received the following exception. ShardNotFoundException exception = assertThrows(ShardNotFoundException.class, () -> indexSameDoc(primaryNode, INDEX_NAME)); assertTrue(exception.getMessage().contains("no such shard")); + internalCluster().clearDisruptionScheme(); ensureStableCluster(3); ensureGreen(INDEX_NAME); } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java index 97c44280f4429..b45b19c43b3b3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java @@ -18,7 +18,6 @@ import java.nio.file.Path; import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) public class RemoteIndexPrimaryRelocationIT extends IndexPrimaryRelocationIT { @@ -29,15 +28,12 @@ public class RemoteIndexPrimaryRelocationIT extends IndexPrimaryRelocationIT { public void setup() { absolutePath = randomRepoPath().toAbsolutePath(); - assertAcked( - clusterAdmin().preparePutRepository(REPOSITORY_NAME).setType("fs").setSettings(Settings.builder().put("location", absolutePath)) - ); } protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) - .put(remoteStoreClusterSettings(REPOSITORY_NAME, REPOSITORY_NAME, false)) + .put(remoteStoreClusterSettings(REPOSITORY_NAME, absolutePath)) .build(); } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java index 11942711b7c22..d11c09928a08f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java @@ -31,11 +31,19 @@ public class RemoteIndexRecoveryIT extends IndexRecoveryIT { protected static final String REPOSITORY_NAME = "test-remote-store-repo"; - protected Path absolutePath; + protected Path repositoryPath; + + @Before + public void setup() { + repositoryPath = randomRepoPath().toAbsolutePath(); + } @Override protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(remoteStoreClusterSettings(REPOSITORY_NAME)).build(); + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(remoteStoreClusterSettings(REPOSITORY_NAME, repositoryPath)) + .build(); } @Override @@ -47,17 +55,6 @@ protected Settings featureFlagSettings() { .build(); } - @Before - @Override - public void setUp() throws Exception { - super.setUp(); - internalCluster().startClusterManagerOnlyNode(); - absolutePath = randomRepoPath().toAbsolutePath(); - assertAcked( - clusterAdmin().preparePutRepository(REPOSITORY_NAME).setType("fs").setSettings(Settings.builder().put("location", absolutePath)) - ); - } - @Override public Settings indexSettings() { return Settings.builder() diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java index 70e571604ca53..346e9d12410b7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java @@ -28,6 +28,7 @@ import org.opensearch.snapshots.AbstractSnapshotIntegTestCase; import org.opensearch.snapshots.SnapshotState; import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.After; import org.junit.Before; @@ -43,6 +44,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteRestoreSnapshotIT extends AbstractSnapshotIntegTestCase { private static final String BASE_REMOTE_REPO = "test-rs-repo" + TEST_REMOTE_STORE_REPO_SUFFIX; private Path remoteRepoPath; @@ -50,7 +52,6 @@ public class RemoteRestoreSnapshotIT extends AbstractSnapshotIntegTestCase { @Before public void setup() { remoteRepoPath = randomRepoPath().toAbsolutePath(); - createRepository(BASE_REMOTE_REPO, "fs", remoteRepoPath); } @After @@ -63,7 +64,7 @@ protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() .put(super.nodeSettings(nodeOrdinal)) .put(FeatureFlags.REMOTE_STORE, "true") - .put(remoteStoreClusterSettings(BASE_REMOTE_REPO)) + .put(remoteStoreClusterSettings(BASE_REMOTE_REPO, remoteRepoPath)) .build(); } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteSegmentStatsFromNodesStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteSegmentStatsFromNodesStatsIT.java index c2e79ea2de5ef..32f3b1066aacd 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteSegmentStatsFromNodesStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteSegmentStatsFromNodesStatsIT.java @@ -27,7 +27,6 @@ public class RemoteSegmentStatsFromNodesStatsIT extends RemoteStoreBaseIntegTest @Before public void setup() { setupCustomCluster(); - setupRepo(false); } private void setupCustomCluster() { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java index 15d4b8d1de58a..7173fda89505c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java @@ -16,13 +16,22 @@ import org.opensearch.action.index.IndexResponse; import org.opensearch.action.support.WriteRequest; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.RepositoriesMetadata; +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; import org.opensearch.index.mapper.MapperService; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.node.Node; +import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.repositories.fs.FsRepository; import org.opensearch.test.OpenSearchIntegTestCase; import org.junit.After; @@ -34,13 +43,16 @@ import java.nio.file.attribute.BasicFileAttributes; import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_ENABLED_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; public class RemoteStoreBaseIntegTestCase extends OpenSearchIntegTestCase { @@ -53,8 +65,9 @@ public class RemoteStoreBaseIntegTestCase extends OpenSearchIntegTestCase { protected static final String MAX_SEQ_NO_TOTAL = "max-seq-no-total"; protected static final String MAX_SEQ_NO_REFRESHED_OR_FLUSHED = "max-seq-no-refreshed-or-flushed"; - protected Path absolutePath; - protected Path absolutePath2; + protected Path segmentRepoPath; + protected Path translogRepoPath; + protected boolean clusterSettingsSuppliedByTest = false; private final List documentKeys = List.of( randomAlphaOfLength(5), randomAlphaOfLength(5), @@ -108,10 +121,18 @@ protected boolean addMockInternalEngine() { @Override protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put(remoteStoreClusterSettings(REPOSITORY_NAME, REPOSITORY_2_NAME, true)) - .build(); + if (segmentRepoPath == null || translogRepoPath == null) { + segmentRepoPath = randomRepoPath().toAbsolutePath(); + translogRepoPath = randomRepoPath().toAbsolutePath(); + } + if (clusterSettingsSuppliedByTest) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal)).build(); + } else { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(remoteStoreClusterSettings(REPOSITORY_NAME, segmentRepoPath, REPOSITORY_2_NAME, translogRepoPath)) + .build(); + } } @Override @@ -153,34 +174,72 @@ protected BulkResponse indexBulk(String indexName, int numDocs) { return client().bulk(bulkRequest).actionGet(); } - public static Settings remoteStoreClusterSettings(String segmentRepoName) { - return remoteStoreClusterSettings(segmentRepoName, segmentRepoName); + public static Settings remoteStoreClusterSettings(String name, Path path) { + return remoteStoreClusterSettings(name, path, name, path); } public static Settings remoteStoreClusterSettings( String segmentRepoName, + Path segmentRepoPath, String translogRepoName, - boolean randomizeSameRepoForRSSAndRTS + Path translogRepoPath ) { - return remoteStoreClusterSettings( - segmentRepoName, - randomizeSameRepoForRSSAndRTS ? (randomBoolean() ? translogRepoName : segmentRepoName) : translogRepoName - ); - } - - public static Settings remoteStoreClusterSettings(String segmentRepoName, String translogRepoName) { - Settings.Builder settingsBuilder = Settings.builder() - .put(CLUSTER_REMOTE_STORE_ENABLED_SETTING.getKey(), true) - .put(CLUSTER_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING.getKey(), segmentRepoName) - .put(CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING.getKey(), translogRepoName); + Settings.Builder settingsBuilder = Settings.builder(); if (randomBoolean()) { settingsBuilder.put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT); } + settingsBuilder.put(buildRemoteStoreNodeAttributes(segmentRepoName, segmentRepoPath, translogRepoName, translogRepoPath, false)); + return settingsBuilder.build(); } + public static Settings buildRemoteStoreNodeAttributes( + String segmentRepoName, + Path segmentRepoPath, + String translogRepoName, + Path translogRepoPath, + boolean withRateLimiterAttributes + ) { + String segmentRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + segmentRepoName + ); + String segmentRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + segmentRepoName + ); + String translogRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + translogRepoName + ); + String translogRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + translogRepoName + ); + + Settings.Builder settings = Settings.builder() + .put("node.attr." + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, segmentRepoName) + .put(segmentRepoTypeAttributeKey, FsRepository.TYPE) + .put(segmentRepoSettingsAttributeKeyPrefix + "location", segmentRepoPath) + .put("node.attr." + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, translogRepoName) + .put(translogRepoTypeAttributeKey, FsRepository.TYPE) + .put(translogRepoSettingsAttributeKeyPrefix + "location", translogRepoPath); + + if (withRateLimiterAttributes) { + settings.put(segmentRepoSettingsAttributeKeyPrefix + "compress", randomBoolean()) + .put(segmentRepoSettingsAttributeKeyPrefix + "max_remote_download_bytes_per_sec", "2kb") + .put(segmentRepoSettingsAttributeKeyPrefix + "chunk_size", 200, ByteSizeUnit.BYTES); + } + + return settings.build(); + } + private Settings defaultIndexSettings() { return Settings.builder() .put(super.indexSettings()) @@ -212,32 +271,86 @@ protected Settings remoteStoreIndexSettings(int numberOfReplicas, long totalFiel .build(); } - protected void putRepository(Path path) { - putRepository(path, REPOSITORY_NAME); + @After + public void teardown() { + clusterSettingsSuppliedByTest = false; + assertRemoteStoreRepositoryOnAllNodes(REPOSITORY_NAME); + assertRemoteStoreRepositoryOnAllNodes(REPOSITORY_2_NAME); + assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_NAME)); + assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_2_NAME)); } - protected void putRepository(Path path, String repoName) { - assertAcked(clusterAdmin().preparePutRepository(repoName).setType("fs").setSettings(Settings.builder().put("location", path))); - } + public RepositoryMetadata buildRepositoryMetadata(DiscoveryNode node, String name) { + Map nodeAttributes = node.getAttributes(); + String type = nodeAttributes.get(String.format(Locale.getDefault(), REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, name)); + + String settingsAttributeKeyPrefix = String.format(Locale.getDefault(), REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, name); + Map settingsMap = node.getAttributes() + .keySet() + .stream() + .filter(key -> key.startsWith(settingsAttributeKeyPrefix)) + .collect(Collectors.toMap(key -> key.replace(settingsAttributeKeyPrefix, ""), key -> node.getAttributes().get(key))); + + Settings.Builder settings = Settings.builder(); + settingsMap.entrySet().forEach(entry -> settings.put(entry.getKey(), entry.getValue())); + settings.put(BlobStoreRepository.SYSTEM_REPOSITORY_SETTING.getKey(), true); - protected void setupRepo() { - setupRepo(true); + return new RepositoryMetadata(name, type, settings.build()); } - protected void setupRepo(boolean startDedicatedClusterManager) { - if (startDedicatedClusterManager) { - internalCluster().startClusterManagerOnlyNode(); + public void assertRemoteStoreRepositoryOnAllNodes(String repositoryName) { + RepositoriesMetadata repositories = internalCluster().getInstance(ClusterService.class, internalCluster().getNodeNames()[0]) + .state() + .metadata() + .custom(RepositoriesMetadata.TYPE); + RepositoryMetadata actualRepository = repositories.repository(repositoryName); + + for (String nodeName : internalCluster().getNodeNames()) { + ClusterService clusterService = internalCluster().getInstance(ClusterService.class, nodeName); + DiscoveryNode node = clusterService.localNode(); + RepositoryMetadata expectedRepository = buildRepositoryMetadata(node, repositoryName); + assertTrue(actualRepository.equalsIgnoreGenerations(expectedRepository)); } - absolutePath = randomRepoPath().toAbsolutePath(); - putRepository(absolutePath); - absolutePath2 = randomRepoPath().toAbsolutePath(); - putRepository(absolutePath2, REPOSITORY_2_NAME); } - @After - public void teardown() { - assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_NAME)); - assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_2_NAME)); + public Settings buildClusterSettingsWith() { + String segmentRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + REPOSITORY_NAME + ); + String segmentRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + REPOSITORY_NAME + ); + String translogRepoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + REPOSITORY_2_NAME + ); + String translogRepoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + REPOSITORY_2_NAME + ); + return Settings.builder() + .put( + Node.NODE_ATTRIBUTES.getKey() + RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, + REPOSITORY_NAME + ) + .put(segmentRepoTypeAttributeKey, FsRepository.TYPE) + .put(segmentRepoSettingsAttributeKeyPrefix + "location", randomRepoPath()) + .put(segmentRepoSettingsAttributeKeyPrefix + "compress", randomBoolean()) + .put(segmentRepoSettingsAttributeKeyPrefix + "max_remote_download_bytes_per_sec", "2kb") + .put(segmentRepoSettingsAttributeKeyPrefix + "chunk_size", 200, ByteSizeUnit.BYTES) + .put( + Node.NODE_ATTRIBUTES.getKey() + RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, + REPOSITORY_2_NAME + ) + .put(translogRepoTypeAttributeKey, FsRepository.TYPE) + .put(translogRepoSettingsAttributeKeyPrefix + "location", randomRepoPath()) + .build(); } public static int getFileCount(Path path) throws Exception { @@ -257,5 +370,4 @@ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) { return filesExisting.get(); } - } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreForceMergeIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreForceMergeIT.java index abc0f35d48eab..0bcde4b44c734 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreForceMergeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreForceMergeIT.java @@ -16,7 +16,6 @@ import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; -import org.junit.Before; import java.io.IOException; import java.util.ArrayList; @@ -29,7 +28,7 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; -@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, numDataNodes = 3) +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteStoreForceMergeIT extends RemoteStoreBaseIntegTestCase { private static final String INDEX_NAME = "remote-store-test-idx-1"; @@ -41,11 +40,6 @@ protected Collection> nodePlugins() { return Arrays.asList(MockTransportService.TestPlugin.class); } - @Before - public void setup() { - setupRepo(); - } - @Override public Settings indexSettings() { return remoteStoreIndexSettings(0); @@ -97,6 +91,7 @@ private void verifyRestoredData(Map indexStats, long deletedDocs) private void testRestoreWithMergeFlow(int numberOfIterations, boolean invokeFlush, boolean flushAfterMerge, long deletedDocs) throws IOException { + internalCluster().startNodes(3); createIndex(INDEX_NAME, remoteStoreIndexSettings(0)); ensureYellowAndNoInitializingShards(INDEX_NAME); ensureGreen(INDEX_NAME); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index b87ccdb22d014..bd019693f01ff 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -29,7 +29,6 @@ import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; import org.hamcrest.MatcherAssert; -import org.junit.Before; import java.nio.file.Path; import java.util.Arrays; @@ -47,7 +46,7 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.oneOf; -@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, numDataNodes = 0) +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteStoreIT extends RemoteStoreBaseIntegTestCase { protected final String INDEX_NAME = "remote-store-test-idx-1"; @@ -57,18 +56,13 @@ protected Collection> nodePlugins() { return Arrays.asList(MockTransportService.TestPlugin.class); } - @Before - public void setup() { - setupRepo(); - } - @Override public Settings indexSettings() { return remoteStoreIndexSettings(0); } private void testPeerRecovery(int numberOfIterations, boolean invokeFlush) throws Exception { - internalCluster().startDataOnlyNodes(3); + internalCluster().startNodes(3); createIndex(INDEX_NAME, remoteStoreIndexSettings(0)); ensureYellowAndNoInitializingShards(INDEX_NAME); ensureGreen(INDEX_NAME); @@ -129,7 +123,7 @@ public void testPeerRecoveryWithRemoteStoreAndRemoteTranslogRefresh() throws Exc } private void verifyRemoteStoreCleanup() throws Exception { - internalCluster().startDataOnlyNodes(3); + internalCluster().startNodes(3); createIndex(INDEX_NAME, remoteStoreIndexSettings(1)); indexData(5, randomBoolean(), INDEX_NAME); @@ -138,7 +132,7 @@ private void verifyRemoteStoreCleanup() throws Exception { .prepareGetSettings(INDEX_NAME) .get() .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); - Path indexPath = Path.of(String.valueOf(absolutePath), indexUUID); + Path indexPath = Path.of(String.valueOf(segmentRepoPath), indexUUID); assertTrue(getFileCount(indexPath) > 0); assertAcked(client().admin().indices().delete(new DeleteIndexRequest(INDEX_NAME)).get()); // Delete is async. Give time for it @@ -155,7 +149,7 @@ public void testRemoteTranslogCleanup() throws Exception { } public void testStaleCommitDeletionWithInvokeFlush() throws Exception { - internalCluster().startDataOnlyNodes(1); + internalCluster().startNode(); createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); int numberOfIterations = randomIntBetween(5, 15); indexData(numberOfIterations, true, INDEX_NAME); @@ -164,7 +158,7 @@ public void testStaleCommitDeletionWithInvokeFlush() throws Exception { .prepareGetSettings(INDEX_NAME) .get() .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); - Path indexPath = Path.of(String.valueOf(absolutePath), indexUUID, "/0/segments/metadata"); + Path indexPath = Path.of(String.valueOf(segmentRepoPath), indexUUID, "/0/segments/metadata"); // Delete is async. assertBusy(() -> { int actualFileCount = getFileCount(indexPath); @@ -182,7 +176,7 @@ public void testStaleCommitDeletionWithInvokeFlush() throws Exception { } public void testStaleCommitDeletionWithoutInvokeFlush() throws Exception { - internalCluster().startDataOnlyNodes(1); + internalCluster().startNode(); createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l, -1)); int numberOfIterations = randomIntBetween(5, 15); indexData(numberOfIterations, false, INDEX_NAME); @@ -191,7 +185,7 @@ public void testStaleCommitDeletionWithoutInvokeFlush() throws Exception { .prepareGetSettings(INDEX_NAME) .get() .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); - Path indexPath = Path.of(String.valueOf(absolutePath), indexUUID, "/0/segments/metadata"); + Path indexPath = Path.of(String.valueOf(segmentRepoPath), indexUUID, "/0/segments/metadata"); int actualFileCount = getFileCount(indexPath); // We also allow (numberOfIterations + 1) as index creation also triggers refresh. MatcherAssert.assertThat(actualFileCount, is(oneOf(numberOfIterations - 1, numberOfIterations, numberOfIterations + 1))); @@ -202,7 +196,7 @@ public void testStaleCommitDeletionWithoutInvokeFlush() throws Exception { * default. */ public void testDefaultBufferInterval() throws ExecutionException, InterruptedException { - setupRepo(); + internalCluster().startClusterManagerOnlyNode(); String clusterManagerName = internalCluster().getClusterManagerName(); String dataNode = internalCluster().startDataOnlyNodes(1).get(0); createIndex(INDEX_NAME); @@ -230,7 +224,7 @@ public void testDefaultBufferInterval() throws ExecutionException, InterruptedEx * with and without cluster default. */ public void testOverriddenBufferInterval() throws ExecutionException, InterruptedException { - setupRepo(); + internalCluster().startClusterManagerOnlyNode(); String clusterManagerName = internalCluster().getClusterManagerName(); String dataNode = internalCluster().startDataOnlyNodes(1).get(0); @@ -287,7 +281,7 @@ public void testOverriddenBufferInterval() throws ExecutionException, Interrupte * This tests validation which kicks in during index creation failing creation if the value is less than minimum allowed value. */ public void testOverriddenBufferIntervalValidation() { - setupRepo(); + internalCluster().startClusterManagerOnlyNode(); TimeValue bufferInterval = TimeValue.timeValueSeconds(-1); Settings indexSettings = Settings.builder() .put(indexSettings()) @@ -308,7 +302,6 @@ public void testOverriddenBufferIntervalValidation() { */ public void testClusterBufferIntervalValidation() { String clusterManagerName = internalCluster().startClusterManagerOnlyNode(); - setupRepo(false); IllegalArgumentException exception = assertThrows( IllegalArgumentException.class, () -> client(clusterManagerName).admin() diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java new file mode 100644 index 0000000000000..4d56a1e94e3fc --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRepositoryRegistrationIT.java @@ -0,0 +1,97 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore; + +import org.opensearch.plugins.Plugin; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.disruption.NetworkDisruption; +import org.opensearch.test.transport.MockTransportService; + +import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; +import java.util.Set; +import java.util.stream.Collectors; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class RemoteStoreRepositoryRegistrationIT extends RemoteStoreBaseIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(MockTransportService.TestPlugin.class); + } + + public void testSingleNodeClusterRepositoryRegistration() throws Exception { + internalCluster().startNode(); + } + + public void testMultiNodeClusterRepositoryRegistration() throws Exception { + internalCluster().startNodes(3); + } + + public void testMultiNodeClusterRepositoryRegistrationWithMultipleClusterManager() throws Exception { + internalCluster().startClusterManagerOnlyNodes(3); + internalCluster().startNodes(3); + } + + public void testMultiNodeClusterActiveClusterManagerShutDown() throws Exception { + internalCluster().startNodes(3); + internalCluster().stopCurrentClusterManagerNode(); + ensureStableCluster(2); + } + + public void testMultiNodeClusterActiveMClusterManagerRestart() throws Exception { + internalCluster().startNodes(3); + String clusterManagerNodeName = internalCluster().getClusterManagerName(); + internalCluster().restartNode(clusterManagerNodeName); + ensureStableCluster(3); + } + + public void testMultiNodeClusterRandomNodeRestart() throws Exception { + internalCluster().startNodes(3); + internalCluster().restartRandomDataNode(); + ensureStableCluster(3); + } + + public void testMultiNodeClusterActiveClusterManagerRecoverNetworkIsolation() { + internalCluster().startClusterManagerOnlyNodes(3); + String dataNode = internalCluster().startNode(); + + NetworkDisruption partition = isolateClusterManagerDisruption(NetworkDisruption.DISCONNECT); + internalCluster().setDisruptionScheme(partition); + + partition.startDisrupting(); + ensureStableCluster(3, dataNode); + partition.stopDisrupting(); + + ensureStableCluster(4); + + internalCluster().clearDisruptionScheme(); + } + + public void testMultiNodeClusterRandomNodeRecoverNetworkIsolation() { + Set nodesInOneSide = internalCluster().startNodes(3).stream().collect(Collectors.toCollection(HashSet::new)); + Set nodesInAnotherSide = internalCluster().startNodes(3).stream().collect(Collectors.toCollection(HashSet::new)); + ensureStableCluster(6); + + NetworkDisruption networkDisruption = new NetworkDisruption( + new NetworkDisruption.TwoPartitions(nodesInOneSide, nodesInAnotherSide), + NetworkDisruption.DISCONNECT + ); + internalCluster().setDisruptionScheme(networkDisruption); + + networkDisruption.startDisrupting(); + ensureStableCluster(3, nodesInOneSide.stream().findAny().get()); + networkDisruption.stopDisrupting(); + + ensureStableCluster(6); + + internalCluster().clearDisruptionScheme(); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java index 85c2514ebf00f..b84b9e38c63a3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreRestoreIT.java @@ -15,13 +15,11 @@ import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.plugins.Plugin; import org.opensearch.repositories.RepositoriesService; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.transport.MockTransportService; -import org.junit.Before; import java.io.IOException; import java.util.Arrays; @@ -35,7 +33,7 @@ import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.hamcrest.Matchers.greaterThan; -@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, numDataNodes = 0) +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteStoreRestoreIT extends RemoteStoreBaseIntegTestCase { private static final String INDEX_NAME = "remote-store-test-idx-1"; private static final String INDEX_NAMES = "test-remote-store-1,test-remote-store-2,remote-store-test-index-1,remote-store-test-index-2"; @@ -53,11 +51,6 @@ protected Collection> nodePlugins() { return Arrays.asList(MockTransportService.TestPlugin.class); } - @Before - public void setup() { - setupRepo(); - } - private void restore(String... indices) { boolean restoreAllShards = randomBoolean(); if (restoreAllShards) { @@ -94,8 +87,19 @@ private void verifyRestoredData(Map indexStats, String indexName) } private void prepareCluster(int numClusterManagerNodes, int numDataOnlyNodes, String indices, int replicaCount, int shardCount) { - internalCluster().startClusterManagerOnlyNodes(numClusterManagerNodes); - internalCluster().startDataOnlyNodes(numDataOnlyNodes); + prepareCluster(numClusterManagerNodes, numDataOnlyNodes, indices, replicaCount, shardCount, Settings.EMPTY); + } + + private void prepareCluster( + int numClusterManagerNodes, + int numDataOnlyNodes, + String indices, + int replicaCount, + int shardCount, + Settings clusterSettings + ) { + internalCluster().startClusterManagerOnlyNodes(numClusterManagerNodes, clusterSettings); + internalCluster().startDataOnlyNodes(numDataOnlyNodes, clusterSettings); for (String index : indices.split(",")) { createIndex(index, remoteStoreIndexSettings(replicaCount, shardCount)); ensureYellowAndNoInitializingShards(index); @@ -187,7 +191,7 @@ private void restoreAndVerify(int shardCount, int replicaCount, Map indexStats = indexData(numberOfIterations, invokeFlush, INDEX_NAME); assertEquals(shardCount, getNumShards(INDEX_NAME).totalNumShards); @@ -269,7 +273,7 @@ private void testRestoreFlowMultipleIndices(int numberOfIterations, boolean invo public void testRestoreFlowAllShardsNoRedIndex() throws InterruptedException { int shardCount = randomIntBetween(1, 5); - prepareCluster(0, 3, INDEX_NAME, 0, shardCount); + prepareCluster(1, 3, INDEX_NAME, 0, shardCount); indexData(randomIntBetween(2, 5), true, INDEX_NAME); assertEquals(shardCount, getNumShards(INDEX_NAME).totalNumShards); @@ -285,7 +289,7 @@ public void testRestoreFlowAllShardsNoRedIndex() throws InterruptedException { public void testRestoreFlowNoRedIndex() throws Exception { int shardCount = randomIntBetween(1, 5); - prepareCluster(0, 3, INDEX_NAME, 0, shardCount); + prepareCluster(1, 3, INDEX_NAME, 0, shardCount); Map indexStats = indexData(randomIntBetween(2, 5), true, INDEX_NAME); assertEquals(shardCount, getNumShards(INDEX_NAME).totalNumShards); @@ -459,22 +463,16 @@ public void testRTSRestoreDataOnlyInTranslog() throws Exception { } public void testRateLimitedRemoteDownloads() throws Exception { - assertAcked( - client().admin() - .cluster() - .preparePutRepository(REPOSITORY_NAME) - .setType("fs") - .setSettings( - Settings.builder() - .put("location", randomRepoPath()) - .put("compress", randomBoolean()) - .put("max_remote_download_bytes_per_sec", "2kb") - .put("chunk_size", 200, ByteSizeUnit.BYTES) - - ) - ); + clusterSettingsSuppliedByTest = true; int shardCount = randomIntBetween(1, 3); - prepareCluster(0, 3, INDEX_NAME, 0, shardCount); + prepareCluster( + 1, + 3, + INDEX_NAME, + 0, + shardCount, + buildRemoteStoreNodeAttributes(REPOSITORY_NAME, randomRepoPath(), REPOSITORY_2_NAME, randomRepoPath(), true) + ); Map indexStats = indexData(5, false, INDEX_NAME); assertEquals(shardCount, getNumShards(INDEX_NAME).totalNumShards); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(INDEX_NAME))); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java index 2b9fb9c497cb6..8ae25c6758195 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java @@ -39,14 +39,14 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; -@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 3) +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteStoreStatsIT extends RemoteStoreBaseIntegTestCase { private static final String INDEX_NAME = "remote-store-test-idx-1"; @Before public void setup() { - setupRepo(); + internalCluster().startNodes(3); } public void testStatsResponseFromAllNodes() { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/ReplicaToPrimaryPromotionIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/ReplicaToPrimaryPromotionIT.java index abad56d892d88..4e3f01b8f257f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/ReplicaToPrimaryPromotionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/ReplicaToPrimaryPromotionIT.java @@ -34,13 +34,13 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; -@OpenSearchIntegTestCase.ClusterScope(numDataNodes = 0) +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class ReplicaToPrimaryPromotionIT extends RemoteStoreBaseIntegTestCase { private int shard_count = 5; @Before public void setup() { - setupRepo(); + internalCluster().startClusterManagerOnlyNode(); } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java index 22250c3b793cf..1b817408596ab 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java @@ -30,10 +30,17 @@ public class SegmentReplicationUsingRemoteStoreIT extends SegmentReplicationIT { private static final String REPOSITORY_NAME = "test-remote-store-repo"; + protected Path absolutePath; @Override protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(remoteStoreClusterSettings(REPOSITORY_NAME)).build(); + if (absolutePath == null) { + absolutePath = randomRepoPath().toAbsolutePath(); + } + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(remoteStoreClusterSettings(REPOSITORY_NAME, absolutePath)) + .build(); } protected boolean segmentReplicationWithRemoteEnabled() { @@ -52,10 +59,6 @@ protected Settings featureFlagSettings() { @Before public void setup() { internalCluster().startClusterManagerOnlyNode(); - Path absolutePath = randomRepoPath().toAbsolutePath(); - assertAcked( - clusterAdmin().preparePutRepository(REPOSITORY_NAME).setType("fs").setSettings(Settings.builder().put("location", absolutePath)) - ); } @After diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationWithRemoteStorePressureIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationWithRemoteStorePressureIT.java index 99927797d5fbc..fa0944e5bfee0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationWithRemoteStorePressureIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationWithRemoteStorePressureIT.java @@ -29,6 +29,7 @@ public class SegmentReplicationWithRemoteStorePressureIT extends SegmentReplicationPressureIT { private static final String REPOSITORY_NAME = "test-remote-store-repo"; + protected Path absolutePath; @Override protected boolean segmentReplicationWithRemoteEnabled() { @@ -46,16 +47,16 @@ protected Settings featureFlagSettings() { @Override protected Settings nodeSettings(int nodeOrdinal) { - return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(remoteStoreClusterSettings(REPOSITORY_NAME)).build(); + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(remoteStoreClusterSettings(REPOSITORY_NAME, absolutePath)) + .build(); } @Before public void setup() { + absolutePath = randomRepoPath().toAbsolutePath(); internalCluster().startClusterManagerOnlyNode(); - Path absolutePath = randomRepoPath().toAbsolutePath(); - assertAcked( - clusterAdmin().preparePutRepository(REPOSITORY_NAME).setType("fs").setSettings(Settings.builder().put("location", absolutePath)) - ); } @After diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartFileCorruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartFileCorruptionIT.java index f2d2e6c04d114..7112b266840ac 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartFileCorruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartFileCorruptionIT.java @@ -16,7 +16,6 @@ import org.opensearch.plugins.Plugin; import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; import org.opensearch.remotestore.multipart.mocks.MockFsRepositoryPlugin; -import org.junit.Before; import java.util.Collection; import java.util.stream.Collectors; @@ -31,11 +30,6 @@ protected Collection> nodePlugins() { return Stream.concat(super.nodePlugins().stream(), Stream.of(MockFsRepositoryPlugin.class)).collect(Collectors.toList()); } - @Before - public void setup() { - setupRepo(); - } - protected Settings remoteStoreIndexSettings() { return Settings.builder() .put(super.indexSettings()) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java index 842a576a92a38..21f48ba99e651 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartIT.java @@ -9,6 +9,8 @@ package org.opensearch.remotestore.multipart; import org.opensearch.client.Client; +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.unit.ByteSizeUnit; @@ -16,38 +18,81 @@ import org.opensearch.remotestore.RemoteStoreIT; import org.opensearch.remotestore.multipart.mocks.MockFsRepositoryPlugin; import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.junit.Before; import java.nio.file.Path; import java.util.Collection; +import java.util.Locale; +import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; public class RemoteStoreMultipartIT extends RemoteStoreIT { + Path repositoryLocation; + boolean compress; + boolean overrideBuildRepositoryMetadata; + @Override protected Collection> nodePlugins() { return Stream.concat(super.nodePlugins().stream(), Stream.of(MockFsRepositoryPlugin.class)).collect(Collectors.toList()); } + @Before + public void setup() { + overrideBuildRepositoryMetadata = false; + repositoryLocation = randomRepoPath(); + compress = randomBoolean(); + } + @Override - protected void putRepository(Path path) { - assertAcked( - clusterAdmin().preparePutRepository(REPOSITORY_NAME) - .setType(MockFsRepositoryPlugin.TYPE) - .setSettings(Settings.builder().put("location", path)) - ); + public RepositoryMetadata buildRepositoryMetadata(DiscoveryNode node, String name) { + if (overrideBuildRepositoryMetadata) { + Map nodeAttributes = node.getAttributes(); + String type = nodeAttributes.get(String.format(Locale.getDefault(), REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, name)); + + String settingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + name + ); + Map settingsMap = node.getAttributes() + .keySet() + .stream() + .filter(key -> key.startsWith(settingsAttributeKeyPrefix)) + .collect(Collectors.toMap(key -> key.replace(settingsAttributeKeyPrefix, ""), key -> node.getAttributes().get(key))); + + Settings.Builder settings = Settings.builder(); + settingsMap.entrySet().forEach(entry -> settings.put(entry.getKey(), entry.getValue())); + settings.put(BlobStoreRepository.SYSTEM_REPOSITORY_SETTING.getKey(), true); + + if (name.equals(REPOSITORY_NAME)) { + settings.put("location", repositoryLocation) + .put("compress", compress) + .put("max_remote_upload_bytes_per_sec", "1kb") + .put("chunk_size", 100, ByteSizeUnit.BYTES); + return new RepositoryMetadata(name, MockFsRepositoryPlugin.TYPE, settings.build()); + } + + return new RepositoryMetadata(name, type, settings.build()); + } else { + return super.buildRepositoryMetadata(node, name); + } } public void testRateLimitedRemoteUploads() throws Exception { - internalCluster().startDataOnlyNodes(1); + overrideBuildRepositoryMetadata = true; + internalCluster().startNode(); Client client = client(); logger.info("--> updating repository"); - Path repositoryLocation = randomRepoPath(); assertAcked( client.admin() .cluster() @@ -55,8 +100,9 @@ public void testRateLimitedRemoteUploads() throws Exception { .setType(MockFsRepositoryPlugin.TYPE) .setSettings( Settings.builder() + .put(BlobStoreRepository.SYSTEM_REPOSITORY_SETTING.getKey(), true) .put("location", repositoryLocation) - .put("compress", randomBoolean()) + .put("compress", compress) .put("max_remote_upload_bytes_per_sec", "1kb") .put("chunk_size", 100, ByteSizeUnit.BYTES) ) diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java index 072e03e8a2f79..9ec8cee2685fe 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java @@ -161,8 +161,9 @@ public void testCloneShallowSnapshotIndex() throws Exception { disableRepoConsistencyCheck("This test uses remote store repository"); FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); final String remoteStoreRepoName = "remote-store-repo-name"; - internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(remoteStoreRepoName)); - internalCluster().startDataOnlyNode(); + final Path remoteStoreRepoPath = randomRepoPath(); + internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(remoteStoreRepoName, remoteStoreRepoPath)); + internalCluster().startDataOnlyNode(remoteStoreClusterSettings(remoteStoreRepoName, remoteStoreRepoPath)); final String snapshotRepoName = "snapshot-repo-name"; final Path snapshotRepoPath = randomRepoPath(); @@ -172,9 +173,6 @@ public void testCloneShallowSnapshotIndex() throws Exception { final Path shallowSnapshotRepoPath = randomRepoPath(); createRepository(shallowSnapshotRepoName, "fs", snapshotRepoSettingsForShallowCopy(shallowSnapshotRepoPath)); - final Path remoteStoreRepoPath = randomRepoPath(); - createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); - final String indexName = "index-1"; createIndexWithRandomDocs(indexName, randomIntBetween(5, 10)); @@ -208,10 +206,14 @@ public void testShallowCloneNameAvailability() throws Exception { disableRepoConsistencyCheck("This test uses remote store repository"); FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); final String remoteStoreRepoName = "remote-store-repo-name"; + final Path remoteStorePath = randomRepoPath().toAbsolutePath(); internalCluster().startClusterManagerOnlyNode( - Settings.builder().put(LARGE_SNAPSHOT_POOL_SETTINGS).put(remoteStoreClusterSettings(remoteStoreRepoName)).build() + Settings.builder() + .put(LARGE_SNAPSHOT_POOL_SETTINGS) + .put(remoteStoreClusterSettings(remoteStoreRepoName, remoteStorePath)) + .build() ); - internalCluster().startDataOnlyNode(); + internalCluster().startDataOnlyNode(remoteStoreClusterSettings(remoteStoreRepoName, remoteStorePath)); final String shallowSnapshotRepoName = "shallow-snapshot-repo-name"; final Path shallowSnapshotRepoPath = randomRepoPath(); @@ -245,16 +247,14 @@ public void testCloneAfterRepoShallowSettingEnabled() throws Exception { disableRepoConsistencyCheck("This test uses remote store repository"); FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); final String remoteStoreRepoName = "remote-store-repo-name"; - internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(remoteStoreRepoName)); - internalCluster().startDataOnlyNode(); + final Path remoteStoreRepoPath = randomRepoPath(); + internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(remoteStoreRepoName, remoteStoreRepoPath)); + internalCluster().startDataOnlyNode(remoteStoreClusterSettings(remoteStoreRepoName, remoteStoreRepoPath)); final String snapshotRepoName = "snapshot-repo-name"; final Path snapshotRepoPath = randomRepoPath(); createRepository(snapshotRepoName, "fs", snapshotRepoPath); - final Path remoteStoreRepoPath = randomRepoPath(); - createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); - final String indexName = "index-1"; createIndexWithRandomDocs(indexName, randomIntBetween(5, 10)); @@ -282,16 +282,14 @@ public void testCloneAfterRepoShallowSettingDisabled() throws Exception { disableRepoConsistencyCheck("This test uses remote store repository"); FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); final String remoteStoreRepoName = "remote-store-repo-name"; - internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(remoteStoreRepoName)); - internalCluster().startDataOnlyNode(); + final Path remoteStoreRepoPath = randomRepoPath(); + internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(remoteStoreRepoName, remoteStoreRepoPath)); + internalCluster().startDataOnlyNode(remoteStoreClusterSettings(remoteStoreRepoName, remoteStoreRepoPath)); final String snapshotRepoName = "snapshot-repo-name"; final Path snapshotRepoPath = randomRepoPath(); createRepository(snapshotRepoName, "fs", snapshotRepoSettingsForShallowCopy(snapshotRepoPath)); - final Path remoteStoreRepoPath = randomRepoPath(); - createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); - final String indexName = "index-1"; createIndexWithRandomDocs(indexName, randomIntBetween(5, 10)); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java index 3a9eacd6ac183..448b860683668 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java @@ -42,16 +42,14 @@ public class DeleteSnapshotIT extends AbstractSnapshotIntegTestCase { public void testDeleteSnapshot() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME)); - internalCluster().startDataOnlyNode(); + final Path remoteStoreRepoPath = randomRepoPath(); + internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); + internalCluster().startDataOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); final String snapshotRepoName = "snapshot-repo-name"; final Path snapshotRepoPath = randomRepoPath(); createRepository(snapshotRepoName, "fs", snapshotRepoPath); - final Path remoteStoreRepoPath = randomRepoPath(); - createRepository(REMOTE_REPO_NAME, "fs", remoteStoreRepoPath); - final String indexName = "index-1"; createIndexWithRandomDocs(indexName, randomIntBetween(5, 10)); @@ -72,15 +70,13 @@ public void testDeleteSnapshot() throws Exception { public void testDeleteShallowCopySnapshot() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME)); - internalCluster().startDataOnlyNode(); + final Path remoteStoreRepoPath = randomRepoPath(); + internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); + internalCluster().startDataOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); final String snapshotRepoName = "snapshot-repo-name"; createRepository(snapshotRepoName, "fs", snapshotRepoSettingsForShallowCopy()); - final Path remoteStoreRepoPath = randomRepoPath(); - createRepository(REMOTE_REPO_NAME, "fs", remoteStoreRepoPath); - final String indexName = "index-1"; createIndexWithRandomDocs(indexName, randomIntBetween(5, 10)); @@ -104,14 +100,12 @@ public void testDeleteMultipleShallowCopySnapshotsCase1() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME)); - internalCluster().startDataOnlyNode(); + final Path remoteStoreRepoPath = randomRepoPath(); + internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); + internalCluster().startDataOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); final Client clusterManagerClient = internalCluster().clusterManagerClient(); ensureStableCluster(2); - final Path remoteStoreRepoPath = randomRepoPath(); - createRepository(REMOTE_REPO_NAME, "fs", remoteStoreRepoPath); - final String snapshotRepoName = "snapshot-repo-name"; final Path snapshotRepoPath = randomRepoPath(); createRepository(snapshotRepoName, "mock", snapshotRepoSettingsForShallowCopy(snapshotRepoPath)); @@ -150,8 +144,9 @@ public void testDeleteMultipleShallowCopySnapshotsCase2() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME)); - final String dataNode = internalCluster().startDataOnlyNode(); + final Path remoteStoreRepoPath = randomRepoPath(); + internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); + final String dataNode = internalCluster().startDataOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); ensureStableCluster(2); final String clusterManagerNode = internalCluster().getClusterManagerName(); @@ -161,9 +156,6 @@ public void testDeleteMultipleShallowCopySnapshotsCase2() throws Exception { final String testIndex = "index-test"; createIndexWithContent(testIndex); - final Path remoteStoreRepoPath = randomRepoPath(); - createRepository(REMOTE_REPO_NAME, "fs", remoteStoreRepoPath); - final String remoteStoreEnabledIndexName = "remote-index-1"; final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(); createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); @@ -238,8 +230,9 @@ public void testDeleteMultipleShallowCopySnapshotsCase3() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME)); - internalCluster().startDataOnlyNode(); + final Path remoteStoreRepoPath = randomRepoPath(); + internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); + internalCluster().startDataOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); final Client clusterManagerClient = internalCluster().clusterManagerClient(); ensureStableCluster(2); @@ -247,9 +240,6 @@ public void testDeleteMultipleShallowCopySnapshotsCase3() throws Exception { final Path snapshotRepoPath = randomRepoPath(); createRepository(snapshotRepoName, "mock", snapshotRepoSettingsForShallowCopy(snapshotRepoPath)); - final Path remoteStoreRepoPath = randomRepoPath(); - createRepository(REMOTE_REPO_NAME, "fs", remoteStoreRepoPath); - final String testIndex = "index-test"; createIndexWithContent(testIndex); @@ -300,8 +290,9 @@ public void testRemoteStoreCleanupForDeletedIndex() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME)); - internalCluster().startDataOnlyNode(); + final Path remoteStoreRepoPath = randomRepoPath(); + internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); + internalCluster().startDataOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME, remoteStoreRepoPath)); final Client clusterManagerClient = internalCluster().clusterManagerClient(); ensureStableCluster(2); @@ -309,9 +300,6 @@ public void testRemoteStoreCleanupForDeletedIndex() throws Exception { final Path snapshotRepoPath = randomRepoPath(); createRepository(snapshotRepoName, "mock", snapshotRepoSettingsForShallowCopy(snapshotRepoPath)); - final Path remoteStoreRepoPath = randomRepoPath(); - createRepository(REMOTE_REPO_NAME, "fs", remoteStoreRepoPath); - final String testIndex = "index-test"; createIndexWithContent(testIndex); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/RemoteIndexSnapshotStatusApiIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/RemoteIndexSnapshotStatusApiIT.java index fb91b1d7a006c..979df95547d06 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/RemoteIndexSnapshotStatusApiIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/RemoteIndexSnapshotStatusApiIT.java @@ -40,7 +40,9 @@ import org.opensearch.common.action.ActionFuture; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.threadpool.ThreadPool; +import org.junit.Before; import java.nio.file.Path; @@ -49,8 +51,17 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteIndexSnapshotStatusApiIT extends AbstractSnapshotIntegTestCase { + protected Path absolutePath; + final String remoteStoreRepoName = "remote-store-repo-name"; + + @Before + public void setup() { + absolutePath = randomRepoPath().toAbsolutePath(); + } + @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder() @@ -58,22 +69,18 @@ protected Settings nodeSettings(int nodeOrdinal) { .put(ThreadPool.ESTIMATED_TIME_INTERVAL_SETTING.getKey(), 0) // We have tests that check by-timestamp order .put(FeatureFlags.REMOTE_STORE, "true") .put(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL, "true") - .put(remoteStoreClusterSettings("remote-store-repo-name")) + .put(remoteStoreClusterSettings(remoteStoreRepoName, absolutePath)) .build(); } public void testStatusAPICallForShallowCopySnapshot() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used for the test"); internalCluster().startClusterManagerOnlyNode(); - internalCluster().startDataOnlyNode(); + internalCluster().startDataOnlyNodes(2); final String snapshotRepoName = "snapshot-repo-name"; createRepository(snapshotRepoName, "fs", snapshotRepoSettingsForShallowCopy()); - final Path remoteStoreRepoPath = randomRepoPath(); - final String remoteStoreRepoName = "remote-store-repo-name"; - createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); - final String remoteStoreEnabledIndexName = "remote-index-1"; final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(); createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); @@ -104,15 +111,11 @@ public void testStatusAPICallForShallowCopySnapshot() throws Exception { public void testStatusAPIStatsForBackToBackShallowSnapshot() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used for the test"); internalCluster().startClusterManagerOnlyNode(); - internalCluster().startDataOnlyNode(); + internalCluster().startDataOnlyNodes(2); final String snapshotRepoName = "snapshot-repo-name"; createRepository(snapshotRepoName, "fs", snapshotRepoSettingsForShallowCopy()); - final Path remoteStoreRepoPath = randomRepoPath(); - final String remoteStoreRepoName = "remote-store-repo-name"; - createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); - final String remoteStoreEnabledIndexName = "remote-index-1"; final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(); createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); @@ -152,15 +155,11 @@ public void testStatusAPIStatsForBackToBackShallowSnapshot() throws Exception { public void testStatusAPICallInProgressShallowSnapshot() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used for the test"); internalCluster().startClusterManagerOnlyNode(); - internalCluster().startDataOnlyNode(); + internalCluster().startDataOnlyNodes(2); final String snapshotRepoName = "snapshot-repo-name"; createRepository(snapshotRepoName, "mock", snapshotRepoSettingsForShallowCopy().put("block_on_data", true)); - final Path remoteStoreRepoPath = randomRepoPath(); - final String remoteStoreRepoName = "remote-store-repo-name"; - createRepository(remoteStoreRepoName, "mock", remoteStoreRepoPath); - final String remoteStoreEnabledIndexName = "remote-index-1"; final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(); createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java index 1559e77e1cf2d..eb30460ca1b7f 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java @@ -85,6 +85,7 @@ import org.opensearch.discovery.SeedHostsResolver; import org.opensearch.monitor.NodeHealthService; import org.opensearch.monitor.StatusInfo; +import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.threadpool.Scheduler; import org.opensearch.threadpool.ThreadPool.Names; import org.opensearch.transport.TransportService; @@ -182,6 +183,7 @@ public class Coordinator extends AbstractLifecycleComponent implements Discovery private Optional currentPublication = Optional.empty(); private final NodeHealthService nodeHealthService; private final PersistedStateRegistry persistedStateRegistry; + private final RemoteStoreNodeService remoteStoreNodeService; /** * @param nodeName The name of the node, used to name the {@link java.util.concurrent.ExecutorService} of the {@link SeedHostsResolver}. @@ -203,7 +205,8 @@ public Coordinator( RerouteService rerouteService, ElectionStrategy electionStrategy, NodeHealthService nodeHealthService, - PersistedStateRegistry persistedStateRegistry + PersistedStateRegistry persistedStateRegistry, + RemoteStoreNodeService remoteStoreNodeService ) { this.settings = settings; this.transportService = transportService; @@ -217,6 +220,7 @@ public Coordinator( allocationService, clusterManagerService, transportService, + remoteStoreNodeService, this::getCurrentTerm, this::getStateForClusterManagerService, this::handleJoinRequest, @@ -290,6 +294,7 @@ public Coordinator( this.nodeHealthService = nodeHealthService; this.persistedStateRegistry = persistedStateRegistry; this.localNodeCommissioned = true; + this.remoteStoreNodeService = remoteStoreNodeService; } private ClusterFormationState getClusterFormationState() { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java index 9a1f14295fad8..9bf6bac07da53 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java @@ -61,6 +61,7 @@ import org.opensearch.core.transport.TransportResponse.Empty; import org.opensearch.monitor.NodeHealthService; import org.opensearch.monitor.StatusInfo; +import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPool.Names; import org.opensearch.transport.BytesTransportRequest; @@ -135,6 +136,7 @@ public class JoinHelper { AllocationService allocationService, ClusterManagerService clusterManagerService, TransportService transportService, + RemoteStoreNodeService remoteStoreNodeService, LongSupplier currentTermSupplier, Supplier currentStateSupplier, BiConsumer joinHandler, @@ -152,7 +154,13 @@ public class JoinHelper { this.nodeCommissioned = nodeCommissioned; this.namedWriteableRegistry = namedWriteableRegistry; - this.joinTaskExecutorGenerator = () -> new JoinTaskExecutor(settings, allocationService, logger, rerouteService) { + this.joinTaskExecutorGenerator = () -> new JoinTaskExecutor( + settings, + allocationService, + logger, + rerouteService, + remoteStoreNodeService + ) { private final long term = currentTermSupplier.getAsLong(); diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java index 564819a70111d..15eaf9c8bcc1e 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java @@ -40,6 +40,7 @@ import org.opensearch.cluster.decommission.NodeDecommissionedException; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.RepositoriesMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.RerouteService; @@ -47,6 +48,8 @@ import org.opensearch.common.Priority; import org.opensearch.common.settings.Settings; import org.opensearch.core.action.ActionListener; +import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; +import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.persistent.PersistentTasksCustomMetadata; import java.util.ArrayList; @@ -61,6 +64,9 @@ import static org.opensearch.cluster.decommission.DecommissionHelper.nodeCommissioned; import static org.opensearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.CompatibilityMode; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.CompatibilityMode.STRICT; +import static org.opensearch.node.remotestore.RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING; /** * Main executor for Nodes joining the OpenSearch cluster @@ -74,6 +80,8 @@ public class JoinTaskExecutor implements ClusterStateTaskExecutor execute(ClusterState currentState, List jo DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(newState.nodes()); + // TODO: We are using one of the existing node to build the repository metadata, this will need to be updated + // once we start supporting mixed compatibility mode. An optimization can be done as this will get invoked + // for every set of node join task which we can optimize to not compute if cluster state already has + // repository information. + RepositoriesMetadata repositoriesMetadata = remoteStoreNodeService.updateRepositoriesMetadata( + (currentNodes.getNodes().values()).stream().findFirst().get(), + currentState.getMetadata().custom(RepositoriesMetadata.TYPE) + ); + assert nodesBuilder.isLocalNodeElectedClusterManager(); Version minClusterNodeVersion = newState.nodes().getMinNodeVersion(); @@ -170,17 +194,17 @@ public ClusterTasksResult execute(ClusterState currentState, List jo // processing any joins Map joiniedNodeNameIds = new HashMap<>(); for (final Task joinTask : joiningNodes) { + final DiscoveryNode node = joinTask.node(); if (joinTask.isBecomeClusterManagerTask() || joinTask.isFinishElectionTask()) { // noop - } else if (currentNodes.nodeExistsWithSameRoles(joinTask.node())) { - logger.debug("received a join request for an existing node [{}]", joinTask.node()); + } else if (currentNodes.nodeExistsWithSameRoles(node)) { + logger.debug("received a join request for an existing node [{}]", node); } else { - final DiscoveryNode node = joinTask.node(); try { if (enforceMajorVersion) { ensureMajorVersionBarrier(node.getVersion(), minClusterNodeVersion); } - ensureNodesCompatibility(node.getVersion(), minClusterNodeVersion, maxClusterNodeVersion); + ensureNodesCompatibility(node, currentNodes, currentState.metadata(), minClusterNodeVersion, maxClusterNodeVersion); // we do this validation quite late to prevent race conditions between nodes joining and importing dangling indices // we have to reject nodes that don't support all indices we have in this cluster ensureIndexCompatibility(node.getVersion(), currentState.getMetadata()); @@ -232,16 +256,36 @@ public ClusterTasksResult execute(ClusterState currentState, List jo .coordinationMetadata(coordMetadataBuilder.build()) .build(); return results.build( - allocationService.adaptAutoExpandReplicas(newState.nodes(nodesBuilder).metadata(newMetadata).build()) + allocationService.adaptAutoExpandReplicas( + newState.nodes(nodesBuilder) + .metadata(updateMetadataWithRepositoriesMetadata(newMetadata, repositoriesMetadata)) + .build() + ) ); } } - return results.build(allocationService.adaptAutoExpandReplicas(newState.nodes(nodesBuilder).build())); + return results.build( + allocationService.adaptAutoExpandReplicas( + newState.nodes(nodesBuilder) + .metadata(updateMetadataWithRepositoriesMetadata(currentState.metadata(), repositoriesMetadata)) + .build() + ) + ); } else { // we must return a new cluster state instance to force publishing. This is important // for the joining node to finalize its join and set us as a cluster-manager - return results.build(newState.build()); + return results.build( + newState.metadata(updateMetadataWithRepositoriesMetadata(currentState.metadata(), repositoriesMetadata)).build() + ); + } + } + + private Metadata updateMetadataWithRepositoriesMetadata(Metadata currentMetadata, RepositoriesMetadata repositoriesMetadata) { + if (repositoriesMetadata == null || repositoriesMetadata.repositories() == null || repositoriesMetadata.repositories().isEmpty()) { + return currentMetadata; + } else { + return Metadata.builder(currentMetadata).putCustom(RepositoriesMetadata.TYPE, repositoriesMetadata.get()).build(); } } @@ -359,16 +403,24 @@ public static void ensureIndexCompatibility(final Version nodeVersion, Metadata /** * ensures that the joining node has a version that's compatible with all current nodes */ - public static void ensureNodesCompatibility(final Version joiningNodeVersion, DiscoveryNodes currentNodes) { + public static void ensureNodesCompatibility(final DiscoveryNode joiningNode, DiscoveryNodes currentNodes, Metadata metadata) { final Version minNodeVersion = currentNodes.getMinNodeVersion(); final Version maxNodeVersion = currentNodes.getMaxNodeVersion(); - ensureNodesCompatibility(joiningNodeVersion, minNodeVersion, maxNodeVersion); + ensureNodesCompatibility(joiningNode, currentNodes, metadata, minNodeVersion, maxNodeVersion); } /** - * ensures that the joining node has a version that's compatible with a given version range + * ensures that the joining node has a version that's compatible with a given version range and ensures that the + * joining node has required attributes to join a remotestore cluster. */ - public static void ensureNodesCompatibility(Version joiningNodeVersion, Version minClusterNodeVersion, Version maxClusterNodeVersion) { + public static void ensureNodesCompatibility( + DiscoveryNode joiningNode, + DiscoveryNodes currentNodes, + Metadata metadata, + Version minClusterNodeVersion, + Version maxClusterNodeVersion + ) { + Version joiningNodeVersion = joiningNode.getVersion(); assert minClusterNodeVersion.onOrBefore(maxClusterNodeVersion) : minClusterNodeVersion + " > " + maxClusterNodeVersion; if (joiningNodeVersion.isCompatible(maxClusterNodeVersion) == false) { throw new IllegalStateException( @@ -390,6 +442,8 @@ public static void ensureNodesCompatibility(Version joiningNodeVersion, Version + "], which is incompatible." ); } + + ensureRemoteStoreNodesCompatibility(joiningNode, currentNodes, metadata); } /** @@ -422,12 +476,65 @@ public static void ensureNodeCommissioned(DiscoveryNode node, Metadata metadata) } } + /** + * The method ensures homogeneity - + * 1. The joining node has to be a remote store backed if it's joining a remote store backed cluster. Validates + * remote store attributes of joining node against the existing nodes of cluster. + * 2. The joining node has to be a non-remote store backed if it is joining a non-remote store backed cluster. + * Validates no remote store attributes are present in joining node as existing nodes in the cluster doesn't have + * remote store attributes. + * + * A remote store backed node is the one which holds all the remote store attributes and a remote store backed + * cluster is the one which has only homogeneous remote store backed nodes with same node attributes + * + * TODO: When we support moving from remote store cluster to non remote store and vice versa the this logic will + * needs to be modified. + */ + private static void ensureRemoteStoreNodesCompatibility(DiscoveryNode joiningNode, DiscoveryNodes currentNodes, Metadata metadata) { + List existingNodes = new ArrayList<>(currentNodes.getNodes().values()); + + assert existingNodes.isEmpty() == false; + + // TODO: The below check is valid till we don't support migration, once we start supporting migration a remote + // store node will be able to join a non remote store cluster and vice versa. #7986 + CompatibilityMode remoteStoreCompatibilityMode = REMOTE_STORE_COMPATIBILITY_MODE_SETTING.get(metadata.settings()); + if (STRICT.equals(remoteStoreCompatibilityMode)) { + DiscoveryNode existingNode = existingNodes.get(0); + if (joiningNode.isRemoteStoreNode()) { + if (existingNode.isRemoteStoreNode()) { + RemoteStoreNodeAttribute joiningRemoteStoreNodeAttribute = new RemoteStoreNodeAttribute(joiningNode); + RemoteStoreNodeAttribute existingRemoteStoreNodeAttribute = new RemoteStoreNodeAttribute(existingNode); + if (existingRemoteStoreNodeAttribute.equals(joiningRemoteStoreNodeAttribute) == false) { + throw new IllegalStateException( + "a remote store node [" + + joiningNode + + "] is trying to join a remote store cluster with incompatible node attributes in " + + "comparison with existing node [" + + existingNode + + "]" + ); + } + } else { + throw new IllegalStateException( + "a remote store node [" + joiningNode + "] is trying to join a non remote store cluster" + ); + } + } else { + if (existingNode.isRemoteStoreNode()) { + throw new IllegalStateException( + "a non remote store node [" + joiningNode + "] is trying to join a remote store cluster" + ); + } + } + } + } + public static Collection> addBuiltInJoinValidators( Collection> onJoinValidators ) { final Collection> validators = new ArrayList<>(); validators.add((node, state) -> { - ensureNodesCompatibility(node.getVersion(), state.getNodes()); + ensureNodesCompatibility(node, state.getNodes(), state.metadata()); ensureIndexCompatibility(node.getVersion(), state.getMetadata()); ensureNodeCommissioned(node, state.getMetadata()); }); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index fd7fe29442eb2..c43353e9e64e0 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -94,6 +94,8 @@ import org.opensearch.indices.ShardLimitValidator; import org.opensearch.indices.SystemIndices; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.node.Node; +import org.opensearch.node.remotestore.RemoteStoreNodeAttribute; import org.opensearch.threadpool.ThreadPool; import java.io.IOException; @@ -132,10 +134,8 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; import static org.opensearch.cluster.metadata.Metadata.DEFAULT_REPLICA_COUNT_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_ENABLED_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreAttributePresent; /** * Service responsible for submitting create index requests @@ -927,7 +927,8 @@ static Settings aggregateIndexSettings( } /** - * Updates index settings to set replication strategy by default based on cluster level settings + * Updates index settings to set replication strategy by default based on cluster level settings or remote store + * node attributes * @param settingsBuilder index settings builder to be updated with relevant settings * @param requestSettings settings passed in during index create request * @param clusterSettings cluster level settings @@ -937,7 +938,7 @@ private static void updateReplicationStrategy(Settings.Builder settingsBuilder, settingsBuilder.put(SETTING_REPLICATION_TYPE, INDEX_REPLICATION_TYPE_SETTING.get(requestSettings)); } else if (CLUSTER_REPLICATION_TYPE_SETTING.exists(clusterSettings)) { settingsBuilder.put(SETTING_REPLICATION_TYPE, CLUSTER_REPLICATION_TYPE_SETTING.get(clusterSettings)); - } else if (CLUSTER_REMOTE_STORE_ENABLED_SETTING.get(clusterSettings)) { + } else if (isRemoteStoreAttributePresent(clusterSettings)) { settingsBuilder.put(SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT); } else { settingsBuilder.put(SETTING_REPLICATION_TYPE, CLUSTER_REPLICATION_TYPE_SETTING.getDefault(clusterSettings)); @@ -945,15 +946,25 @@ private static void updateReplicationStrategy(Settings.Builder settingsBuilder, } /** - * Updates index settings to enable remote store by default based on cluster level settings + * Updates index settings to enable remote store by default based on node attributes * @param settingsBuilder index settings builder to be updated with relevant settings * @param clusterSettings cluster level settings */ private static void updateRemoteStoreSettings(Settings.Builder settingsBuilder, Settings clusterSettings) { - if (CLUSTER_REMOTE_STORE_ENABLED_SETTING.get(clusterSettings) == true) { + if (isRemoteStoreAttributePresent(clusterSettings)) { settingsBuilder.put(SETTING_REMOTE_STORE_ENABLED, true) - .put(SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, CLUSTER_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING.get(clusterSettings)) - .put(SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING.get(clusterSettings)); + .put( + SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, + clusterSettings.get( + Node.NODE_ATTRIBUTES.getKey() + RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY + ) + ) + .put( + SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, + clusterSettings.get( + Node.NODE_ATTRIBUTES.getKey() + RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY + ) + ); } } diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java index a04b0d9de912d..4e49b25eb5789 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java @@ -44,6 +44,7 @@ import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.node.Node; +import org.opensearch.node.remotestore.RemoteStoreNodeService; import java.io.IOException; import java.util.Collections; @@ -61,6 +62,7 @@ import java.util.stream.Stream; import static org.opensearch.node.NodeRoleSettings.NODE_ROLES_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX; /** * A discovery node represents a node that is part of the cluster. @@ -281,6 +283,27 @@ public static DiscoveryNode createLocal(Settings settings, TransportAddress publ return new DiscoveryNode(Node.NODE_NAME_SETTING.get(settings), nodeId, publishAddress, attributes, roles, Version.CURRENT); } + /** Creates a DiscoveryNode representing the local node and verifies the repository. */ + public static DiscoveryNode createRemoteNodeLocal( + Settings settings, + TransportAddress publishAddress, + String nodeId, + RemoteStoreNodeService remoteStoreNodeService + ) { + Map attributes = Node.NODE_ATTRIBUTES.getAsMap(settings); + Set roles = getRolesFromSettings(settings); + DiscoveryNode discoveryNode = new DiscoveryNode( + Node.NODE_NAME_SETTING.get(settings), + nodeId, + publishAddress, + attributes, + roles, + Version.CURRENT + ); + remoteStoreNodeService.createAndVerifyRepositories(discoveryNode); + return discoveryNode; + } + /** extract node roles from the given settings */ public static Set getRolesFromSettings(final Settings settings) { if (NODE_ROLES_SETTING.exists(settings)) { @@ -460,6 +483,15 @@ public boolean isSearchNode() { return roles.contains(DiscoveryNodeRole.SEARCH_ROLE); } + /** + * Returns whether the node is a remote store node. + * + * @return true if the node contains remote store node attributes, false otherwise + */ + public boolean isRemoteStoreNode() { + return this.getAttributes().keySet().stream().anyMatch(key -> key.startsWith(REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX)); + } + /** * Returns a set of all the roles that the node has. The roles are returned in sorted order by the role name. *

diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 05938914b019f..b8fe322234140 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -129,6 +129,7 @@ import org.opensearch.node.Node; import org.opensearch.node.Node.DiscoverySettings; import org.opensearch.node.NodeRoleSettings; +import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.persistent.PersistentTasksClusterService; import org.opensearch.persistent.decider.EnableAssignmentDecider; import org.opensearch.plugins.PluginsService; @@ -669,7 +670,8 @@ public void apply(Settings value, Settings current, Settings previous) { // Remote cluster state settings RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING, - RemoteClusterStateService.REMOTE_CLUSTER_STATE_REPOSITORY_SETTING + RemoteClusterStateService.REMOTE_CLUSTER_STATE_REPOSITORY_SETTING, + RemoteStoreNodeService.REMOTE_STORE_COMPATIBILITY_MODE_SETTING ) ) ); @@ -683,12 +685,7 @@ public void apply(Settings value, Settings current, Settings previous) { */ public static final Map, List> FEATURE_FLAGGED_CLUSTER_SETTINGS = Map.of( List.of(FeatureFlags.REMOTE_STORE), - List.of( - IndicesService.CLUSTER_REMOTE_STORE_ENABLED_SETTING, - IndicesService.CLUSTER_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING, - IndicesService.CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING, - IndicesService.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING - ), + List.of(IndicesService.CLUSTER_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING), List.of(FeatureFlags.CONCURRENT_SEGMENT_SEARCH), List.of( SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING, diff --git a/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java b/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java index 58d8fe2e17fcf..288371aa240a0 100644 --- a/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java +++ b/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java @@ -53,6 +53,7 @@ import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.gateway.GatewayMetaState; import org.opensearch.monitor.NodeHealthService; +import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.plugins.DiscoveryPlugin; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; @@ -131,7 +132,8 @@ public DiscoveryModule( GatewayMetaState gatewayMetaState, RerouteService rerouteService, NodeHealthService nodeHealthService, - PersistedStateRegistry persistedStateRegistry + PersistedStateRegistry persistedStateRegistry, + RemoteStoreNodeService remoteStoreNodeService ) { final Collection> joinValidators = new ArrayList<>(); final Map> hostProviders = new HashMap<>(); @@ -208,7 +210,8 @@ public DiscoveryModule( rerouteService, electionStrategy, nodeHealthService, - persistedStateRegistry + persistedStateRegistry, + remoteStoreNodeService ); } else { throw new IllegalArgumentException("Unknown discovery type [" + discoveryType + "]"); diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 8defaef1c844b..2e2a0762ea489 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -243,36 +243,6 @@ public class IndicesService extends AbstractLifecycleComponent Property.Final ); - /** - * Used to specify if all indexes are to create with remote store enabled by default - */ - public static final Setting CLUSTER_REMOTE_STORE_ENABLED_SETTING = Setting.boolSetting( - "cluster.remote_store.enabled", - false, - Property.NodeScope, - Property.Final - ); - - /** - * Used to specify default repo to use for segment upload for remote store backed indices - */ - public static final Setting CLUSTER_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING = Setting.simpleString( - "cluster.remote_store.segment.repository", - "", - Property.NodeScope, - Property.Final - ); - - /** - * Used to specify default repo to use for translog upload for remote store backed indices - */ - public static final Setting CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING = Setting.simpleString( - "cluster.remote_store.translog.repository", - "", - Property.NodeScope, - Property.Final - ); - /** * Used to specify the default translog buffer interval for remote store backed indexes. */ diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index fdf8b616ccb6c..b9c5c3352adc9 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -165,6 +165,7 @@ import org.opensearch.monitor.fs.FsInfo; import org.opensearch.monitor.fs.FsProbe; import org.opensearch.monitor.jvm.JvmInfo; +import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.persistent.PersistentTasksClusterService; import org.opensearch.persistent.PersistentTasksExecutor; import org.opensearch.persistent.PersistentTasksExecutorRegistry; @@ -269,6 +270,7 @@ import static org.opensearch.common.util.FeatureFlags.TELEMETRY; import static org.opensearch.env.NodeEnvironment.collectFileCacheDataPath; import static org.opensearch.index.ShardIndexingPressureSettings.SHARD_INDEXING_PRESSURE_ENABLED_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.isRemoteStoreAttributePresent; /** * A node represent a node within a cluster ({@code cluster.name}). The {@link #client()} can be used @@ -522,12 +524,15 @@ protected Node( .collect(Collectors.toCollection(LinkedHashSet::new)) ); resourcesToClose.add(nodeEnvironment); - localNodeFactory = new LocalNodeFactory(settings, nodeEnvironment.nodeId()); final List> executorBuilders = pluginsService.getExecutorBuilders(settings); runnableTaskListener = new AtomicReference<>(); final ThreadPool threadPool = new ThreadPool(settings, runnableTaskListener, executorBuilders.toArray(new ExecutorBuilder[0])); + + final SetOnce repositoriesServiceReference = new SetOnce<>(); + final RemoteStoreNodeService remoteStoreNodeService = new RemoteStoreNodeService(repositoriesServiceReference::get, threadPool); + localNodeFactory = new LocalNodeFactory(settings, nodeEnvironment.nodeId(), remoteStoreNodeService); resourcesToClose.add(() -> ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS)); final ResourceWatcherService resourceWatcherService = new ResourceWatcherService(settings, threadPool); resourcesToClose.add(resourceWatcherService); @@ -591,7 +596,6 @@ protected Node( client ); - final SetOnce repositoriesServiceReference = new SetOnce<>(); final ClusterInfoService clusterInfoService = newClusterInfoService(settings, clusterService, threadPool, client); final UsageService usageService = new UsageService(); @@ -1000,7 +1004,8 @@ protected Node( gatewayMetaState, rerouteService, fsHealthService, - persistedStateRegistry + persistedStateRegistry, + remoteStoreNodeService ); final SearchPipelineService searchPipelineService = new SearchPipelineService( clusterService, @@ -1739,15 +1744,28 @@ private static class LocalNodeFactory implements Function localNode = new SetOnce<>(); private final String persistentNodeId; private final Settings settings; + private final RemoteStoreNodeService remoteStoreNodeService; - private LocalNodeFactory(Settings settings, String persistentNodeId) { + private LocalNodeFactory(Settings settings, String persistentNodeId, RemoteStoreNodeService remoteStoreNodeService) { this.persistentNodeId = persistentNodeId; this.settings = settings; + this.remoteStoreNodeService = remoteStoreNodeService; } @Override public DiscoveryNode apply(BoundTransportAddress boundTransportAddress) { - localNode.set(DiscoveryNode.createLocal(settings, boundTransportAddress.publishAddress(), persistentNodeId)); + if (isRemoteStoreAttributePresent(settings)) { + localNode.set( + DiscoveryNode.createRemoteNodeLocal( + settings, + boundTransportAddress.publishAddress(), + persistentNodeId, + remoteStoreNodeService + ) + ); + } else { + localNode.set(DiscoveryNode.createLocal(settings, boundTransportAddress.publishAddress(), persistentNodeId)); + } return localNode.get(); } diff --git a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java new file mode 100644 index 0000000000000..b514089966484 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeAttribute.java @@ -0,0 +1,149 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.remotestore; + +import org.opensearch.cluster.metadata.RepositoriesMetadata; +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.settings.Settings; +import org.opensearch.node.Node; +import org.opensearch.repositories.blobstore.BlobStoreRepository; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * This is an abstraction for validating and storing information specific to remote backed storage nodes. + * + * @opensearch.internal + */ +public class RemoteStoreNodeAttribute { + + public static final String REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX = "remote_store"; + public static final String REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY = "remote_store.segment.repository"; + public static final String REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY = "remote_store.translog.repository"; + public static final String REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT = "remote_store.repository.%s.type"; + public static final String REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX = "remote_store.repository.%s.settings."; + private final RepositoriesMetadata repositoriesMetadata; + + /** + * Creates a new {@link RemoteStoreNodeAttribute} + */ + public RemoteStoreNodeAttribute(DiscoveryNode node) { + this.repositoriesMetadata = buildRepositoriesMetadata(node); + } + + private String validateAttributeNonNull(DiscoveryNode node, String attributeKey) { + String attributeValue = node.getAttributes().get(attributeKey); + if (attributeValue == null || attributeValue.isEmpty()) { + throw new IllegalStateException("joining node [" + node + "] doesn't have the node attribute [" + attributeKey + "]"); + } + + return attributeValue; + } + + private Map validateSettingsAttributesNonNull(DiscoveryNode node, String repositoryName) { + String settingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + repositoryName + ); + Map settingsMap = node.getAttributes() + .keySet() + .stream() + .filter(key -> key.startsWith(settingsAttributeKeyPrefix)) + .collect(Collectors.toMap(key -> key.replace(settingsAttributeKeyPrefix, ""), key -> validateAttributeNonNull(node, key))); + + if (settingsMap.isEmpty()) { + throw new IllegalStateException( + "joining node [" + node + "] doesn't have settings attribute for [" + repositoryName + "] repository" + ); + } + + return settingsMap; + } + + private RepositoryMetadata buildRepositoryMetadata(DiscoveryNode node, String name) { + String type = validateAttributeNonNull( + node, + String.format(Locale.getDefault(), REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, name) + ); + Map settingsMap = validateSettingsAttributesNonNull(node, name); + + Settings.Builder settings = Settings.builder(); + settingsMap.forEach(settings::put); + + // Repository metadata built here will always be for a system repository. + settings.put(BlobStoreRepository.SYSTEM_REPOSITORY_SETTING.getKey(), true); + + return new RepositoryMetadata(name, type, settings.build()); + } + + private RepositoriesMetadata buildRepositoriesMetadata(DiscoveryNode node) { + List repositoryMetadataList = new ArrayList<>(); + Set repositoryNames = new HashSet<>(); + + repositoryNames.add(validateAttributeNonNull(node, REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY)); + repositoryNames.add(validateAttributeNonNull(node, REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY)); + + for (String repositoryName : repositoryNames) { + repositoryMetadataList.add(buildRepositoryMetadata(node, repositoryName)); + } + + return new RepositoriesMetadata(repositoryMetadataList); + } + + public static boolean isRemoteStoreAttributePresent(Settings settings) { + return settings.getByPrefix(Node.NODE_ATTRIBUTES.getKey() + REMOTE_STORE_NODE_ATTRIBUTE_KEY_PREFIX).isEmpty() == false; + } + + public RepositoriesMetadata getRepositoriesMetadata() { + return this.repositoriesMetadata; + } + + @Override + public int hashCode() { + // The hashCode is generated by computing the hash of all the repositoryMetadata present in + // repositoriesMetadata without generation. Below is the modified list hashCode generation logic. + + int hashCode = 1; + Iterator iterator = this.repositoriesMetadata.repositories().iterator(); + while (iterator.hasNext()) { + RepositoryMetadata repositoryMetadata = (RepositoryMetadata) iterator.next(); + hashCode = 31 * hashCode + (repositoryMetadata == null + ? 0 + : Objects.hash(repositoryMetadata.name(), repositoryMetadata.type(), repositoryMetadata.settings())); + } + return hashCode; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + RemoteStoreNodeAttribute that = (RemoteStoreNodeAttribute) o; + + return this.getRepositoriesMetadata().equalsIgnoreGenerations(that.getRepositoriesMetadata()); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append('{').append(this.repositoriesMetadata).append('}'); + return super.toString(); + } +} diff --git a/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java new file mode 100644 index 0000000000000..26c078353d12a --- /dev/null +++ b/server/src/main/java/org/opensearch/node/remotestore/RemoteStoreNodeService.java @@ -0,0 +1,159 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.node.remotestore; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.cluster.metadata.RepositoriesMetadata; +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.settings.Setting; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.Repository; +import org.opensearch.threadpool.ThreadPool; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.function.Supplier; + +/** + * Contains all the method needed for a remote store backed node lifecycle. + */ +public class RemoteStoreNodeService { + + private static final Logger logger = LogManager.getLogger(RemoteStoreNodeService.class); + private final Supplier repositoriesService; + private final ThreadPool threadPool; + public static final Setting REMOTE_STORE_COMPATIBILITY_MODE_SETTING = new Setting<>( + "remote_store.compatibility_mode", + CompatibilityMode.STRICT.name(), + CompatibilityMode::parseString, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + + /** + * Node join compatibility mode introduced with remote backed storage. + * + * @opensearch.internal + */ + public enum CompatibilityMode { + STRICT("strict"); + + public final String mode; + + CompatibilityMode(String mode) { + this.mode = mode; + } + + public static CompatibilityMode parseString(String compatibilityMode) { + try { + return CompatibilityMode.valueOf(compatibilityMode.toUpperCase(Locale.ROOT)); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException( + "[" + + compatibilityMode + + "] compatibility mode is not supported. " + + "supported modes are [" + + CompatibilityMode.values().toString() + + "]" + ); + } + } + } + + public RemoteStoreNodeService(Supplier repositoriesService, ThreadPool threadPool) { + this.repositoriesService = repositoriesService; + this.threadPool = threadPool; + } + + /** + * Creates a repository during a node startup and performs verification by invoking verify method against + * mentioned repository. This verification will happen on a local node to validate if the node is able to connect + * to the repository with appropriate permissions. + * If the creation or verification fails this will close all the repositories this method created and throw + * exception. + */ + public void createAndVerifyRepositories(DiscoveryNode localNode) { + RemoteStoreNodeAttribute nodeAttribute = new RemoteStoreNodeAttribute(localNode); + RepositoriesService reposService = repositoriesService.get(); + Map repositories = new HashMap<>(); + for (RepositoryMetadata repositoryMetadata : nodeAttribute.getRepositoriesMetadata().repositories()) { + String repositoryName = repositoryMetadata.name(); + Repository repository; + RepositoriesService.validate(repositoryName); + + // Create Repository + repository = reposService.createRepository(repositoryMetadata); + logger.info( + "remote backed storage repository with name [{}] and type [{}] created", + repository.getMetadata().name(), + repository.getMetadata().type() + ); + + // Verify Repository + String verificationToken = repository.startVerification(); + repository.verify(verificationToken, localNode); + repository.endVerification(verificationToken); + logger.info(() -> new ParameterizedMessage("successfully verified [{}] repository", repositoryName)); + repositories.put(repositoryName, repository); + } + // Updating the repositories map in RepositoriesService + reposService.updateRepositoriesMap(repositories); + } + + /** + * Updates repositories metadata in the cluster state if not already present. If a repository metadata for a + * repository is already present in the cluster state and if it's different then the joining remote store backed + * node repository metadata an exception will be thrown and the node will not be allowed to join the cluster. + */ + public RepositoriesMetadata updateRepositoriesMetadata(DiscoveryNode joiningNode, RepositoriesMetadata existingRepositories) { + if (joiningNode.isRemoteStoreNode()) { + List updatedRepositoryMetadataList = new ArrayList<>(); + List newRepositoryMetadataList = new RemoteStoreNodeAttribute(joiningNode).getRepositoriesMetadata() + .repositories(); + + if (existingRepositories == null) { + return new RepositoriesMetadata(newRepositoryMetadataList); + } else { + updatedRepositoryMetadataList.addAll(existingRepositories.repositories()); + } + + for (RepositoryMetadata newRepositoryMetadata : newRepositoryMetadataList) { + boolean repositoryAlreadyPresent = false; + for (RepositoryMetadata existingRepositoryMetadata : existingRepositories.repositories()) { + if (newRepositoryMetadata.name().equals(existingRepositoryMetadata.name())) { + if (newRepositoryMetadata.equalsIgnoreGenerations(existingRepositoryMetadata)) { + repositoryAlreadyPresent = true; + break; + } else { + throw new IllegalStateException( + "new repository metadata [" + + newRepositoryMetadata + + "] supplied by joining node is different from existing repository metadata [" + + existingRepositoryMetadata + + "]." + ); + } + } + } + if (repositoryAlreadyPresent == false) { + updatedRepositoryMetadataList.add(newRepositoryMetadata); + } + } + return new RepositoriesMetadata(updatedRepositoryMetadataList); + } else { + return existingRepositories; + } + } +} diff --git a/server/src/main/java/org/opensearch/node/remotestore/package-info.java b/server/src/main/java/org/opensearch/node/remotestore/package-info.java new file mode 100644 index 0000000000000..e2592aa5fcc29 --- /dev/null +++ b/server/src/main/java/org/opensearch/node/remotestore/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Restore remote store transport handler. */ +package org.opensearch.node.remotestore; diff --git a/server/src/main/java/org/opensearch/repositories/FilterRepository.java b/server/src/main/java/org/opensearch/repositories/FilterRepository.java index 08f8bcb467d03..697ac37c4a175 100644 --- a/server/src/main/java/org/opensearch/repositories/FilterRepository.java +++ b/server/src/main/java/org/opensearch/repositories/FilterRepository.java @@ -167,6 +167,11 @@ public boolean isReadOnly() { return in.isReadOnly(); } + @Override + public boolean isSystemRepository() { + return in.isSystemRepository(); + } + @Override public void snapshotShard( Store store, diff --git a/server/src/main/java/org/opensearch/repositories/RepositoriesService.java b/server/src/main/java/org/opensearch/repositories/RepositoriesService.java index 5c2a19965c532..ab343aabd3a82 100644 --- a/server/src/main/java/org/opensearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/opensearch/repositories/RepositoriesService.java @@ -420,7 +420,13 @@ public void applyClusterState(ClusterChangedEvent event) { // Check if repositories got changed if ((oldMetadata == null && newMetadata == null) || (oldMetadata != null && oldMetadata.equalsIgnoreGenerations(newMetadata))) { for (Repository repo : repositories.values()) { - repo.updateState(state); + // Update State should only be invoked for repository which are already in cluster state. This + // check needs to be added as system repositories can be populated before cluster state has the + // repository metadata. + RepositoriesMetadata stateRepositoriesMetadata = state.metadata().custom(RepositoriesMetadata.TYPE); + if (stateRepositoriesMetadata != null && stateRepositoriesMetadata.repository(repo.getMetadata().name()) != null) { + repo.updateState(state); + } } return; } @@ -468,7 +474,22 @@ public void applyClusterState(ClusterChangedEvent event) { } } else { try { - repository = createRepository(repositoryMetadata, typesRegistry); + // System repositories are already created and verified and hence during cluster state + // update we should avoid creating it again. Once the cluster state is update with the + // repository metadata the repository metadata update will land in the above if block. + if (repositories.containsKey(repositoryMetadata.name()) == false) { + repository = createRepository(repositoryMetadata, typesRegistry); + } else { + // Validate the repository metadata which was created during bootstrap is same as the + // one present in incoming cluster state. + repository = repositories.get(repositoryMetadata.name()); + if (repositoryMetadata.equalsIgnoreGenerations(repository.getMetadata()) == false) { + throw new RepositoryException( + repositoryMetadata.name(), + "repository was already " + "registered with different metadata during bootstrap than cluster state" + ); + } + } } catch (RepositoryException ex) { logger.warn(() -> new ParameterizedMessage("failed to create repository [{}]", repositoryMetadata.name()), ex); } @@ -587,7 +608,7 @@ public void unregisterInternalRepository(String name) { } /** Closes the given repository. */ - private void closeRepository(Repository repository) { + public void closeRepository(Repository repository) { logger.debug("closing repository [{}][{}]", repository.getMetadata().type(), repository.getMetadata().name()); repository.close(); } @@ -601,6 +622,13 @@ private void archiveRepositoryStats(Repository repository, long clusterStateVers } } + /** + * Creates repository holder. This method starts the non-internal repository + */ + public Repository createRepository(RepositoryMetadata repositoryMetadata) { + return this.createRepository(repositoryMetadata, typesRegistry); + } + /** * Creates repository holder. This method starts the repository */ @@ -625,7 +653,7 @@ private Repository createRepository(RepositoryMetadata repositoryMetadata, Map repos) { + if (repositories.isEmpty()) { + repositories = repos; + } else { + throw new IllegalArgumentException("can't overwrite as repositories are already present"); + } + } + @Override protected void doStart() { diff --git a/server/src/main/java/org/opensearch/repositories/Repository.java b/server/src/main/java/org/opensearch/repositories/Repository.java index 76a3b65c9ea55..10f3dc2b6b340 100644 --- a/server/src/main/java/org/opensearch/repositories/Repository.java +++ b/server/src/main/java/org/opensearch/repositories/Repository.java @@ -247,6 +247,13 @@ default RepositoryStats stats() { */ boolean isReadOnly(); + /** + * Returns true if the repository is managed by the system directly and doesn't allow managing the lifetime of the + * repository through external APIs + * @return true if the repository is system managed + */ + boolean isSystemRepository(); + /** * Creates a snapshot of the shard based on the index commit point. *

diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index b850c7555c174..220d468a03090 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -286,6 +286,15 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp */ public static final Setting READONLY_SETTING = Setting.boolSetting("readonly", false, Setting.Property.NodeScope); + /*** + * Setting to set repository as system repository + */ + public static final Setting SYSTEM_REPOSITORY_SETTING = Setting.boolSetting( + "system_repository", + false, + Setting.Property.NodeScope + ); + protected final boolean supportURLRepo; private final int maxShardBlobDeleteBatch; @@ -347,6 +356,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp private final boolean readOnly; + private final boolean isSystemRepository; + private final Object lock = new Object(); private final SetOnce blobContainer = new SetOnce<>(); @@ -412,6 +423,7 @@ protected BlobStoreRepository( remoteUploadRateLimiter = getRateLimiter(metadata.settings(), "max_remote_upload_bytes_per_sec", ByteSizeValue.ZERO); remoteDownloadRateLimiter = getRateLimiter(metadata.settings(), "max_remote_download_bytes_per_sec", ByteSizeValue.ZERO); readOnly = READONLY_SETTING.get(metadata.settings()); + isSystemRepository = SYSTEM_REPOSITORY_SETTING.get(metadata.settings()); cacheRepositoryData = CACHE_REPOSITORY_DATA.get(metadata.settings()); bufferSize = Math.toIntExact(BUFFER_SIZE_SETTING.get(metadata.settings()).getBytes()); maxShardBlobDeleteBatch = MAX_SNAPSHOT_SHARD_BLOB_DELETE_BATCH_SIZE.get(metadata.settings()); @@ -1831,8 +1843,10 @@ public String startVerification() { byte[] testBytes = Strings.toUTF8Bytes(seed); BlobContainer testContainer = blobStore().blobContainer(basePath().add(testBlobPrefix(seed))); BytesArray bytes = new BytesArray(testBytes); - try (InputStream stream = bytes.streamInput()) { - testContainer.writeBlobAtomic("master.dat", stream, bytes.length(), true); + if (isSystemRepository == false) { + try (InputStream stream = bytes.streamInput()) { + testContainer.writeBlobAtomic("master.dat", stream, bytes.length(), true); + } } return seed; } @@ -2141,6 +2155,11 @@ public boolean isReadOnly() { return readOnly; } + @Override + public boolean isSystemRepository() { + return isSystemRepository; + } + /** * Writing a new index generation is a three step process. * First, the {@link RepositoryMetadata} entry for this repository is set into a pending state by incrementing its @@ -3131,7 +3150,9 @@ public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, In @Override public void verify(String seed, DiscoveryNode localNode) { - assertSnapshotOrGenericThread(); + if (isSystemRepository == false) { + assertSnapshotOrGenericThread(); + } if (isReadOnly()) { try { latestIndexBlobId(); @@ -3156,30 +3177,33 @@ public void verify(String seed, DiscoveryNode localNode) { exp ); } - try (InputStream masterDat = testBlobContainer.readBlob("master.dat")) { - final String seedRead = Streams.readFully(masterDat).utf8ToString(); - if (seedRead.equals(seed) == false) { + + if (isSystemRepository == false) { + try (InputStream masterDat = testBlobContainer.readBlob("master.dat")) { + final String seedRead = Streams.readFully(masterDat).utf8ToString(); + if (seedRead.equals(seed) == false) { + throw new RepositoryVerificationException( + metadata.name(), + "Seed read from master.dat was [" + seedRead + "] but expected seed [" + seed + "]" + ); + } + } catch (NoSuchFileException e) { throw new RepositoryVerificationException( metadata.name(), - "Seed read from master.dat was [" + seedRead + "] but expected seed [" + seed + "]" + "a file written by cluster-manager to the store [" + + blobStore() + + "] cannot be accessed on the node [" + + localNode + + "]. " + + "This might indicate that the store [" + + blobStore() + + "] is not shared between this node and the cluster-manager node or " + + "that permissions on the store don't allow reading files written by the cluster-manager node", + e ); + } catch (Exception e) { + throw new RepositoryVerificationException(metadata.name(), "Failed to verify repository", e); } - } catch (NoSuchFileException e) { - throw new RepositoryVerificationException( - metadata.name(), - "a file written by cluster-manager to the store [" - + blobStore() - + "] cannot be accessed on the node [" - + localNode - + "]. " - + "This might indicate that the store [" - + blobStore() - + "] is not shared between this node and the cluster-manager node or " - + "that permissions on the store don't allow reading files written by the cluster-manager node", - e - ); - } catch (Exception e) { - throw new RepositoryVerificationException(metadata.name(), "Failed to verify repository", e); } } } diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java index be0161b84d6fa..3fa5768f4f614 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java @@ -40,16 +40,21 @@ import org.opensearch.cluster.NotClusterManagerException; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.SetOnce; import org.opensearch.common.settings.Settings; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.transport.TransportResponse; import org.opensearch.monitor.StatusInfo; +import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.repositories.RepositoriesService; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; import org.opensearch.test.transport.CapturingTransport.CapturedRequest; import org.opensearch.test.transport.MockTransport; +import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.BytesTransportRequest; import org.opensearch.transport.RemoteTransportException; import org.opensearch.transport.TransportException; @@ -73,6 +78,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.core.Is.is; +import static org.mockito.Mockito.mock; public class JoinHelperTests extends OpenSearchTestCase { private final NamedWriteableRegistry namedWriteableRegistry = DEFAULT_NAMED_WRITABLE_REGISTRY; @@ -97,6 +103,7 @@ public void testJoinDeduplication() { null, null, transportService, + buildRemoteStoreNodeService(transportService, deterministicTaskQueue.getThreadPool()), () -> 0L, () -> null, (joinRequest, joinCallback) -> { @@ -282,6 +289,7 @@ public void testJoinFailureOnUnhealthyNodes() { null, null, transportService, + buildRemoteStoreNodeService(transportService, deterministicTaskQueue.getThreadPool()), () -> 0L, () -> null, (joinRequest, joinCallback) -> { @@ -481,6 +489,7 @@ private TestClusterSetup getTestClusterSetup(Version version, boolean isCapturin null, null, transportService, + buildRemoteStoreNodeService(transportService, deterministicTaskQueue.getThreadPool()), () -> 0L, () -> localClusterState, (joinRequest, joinCallback) -> { @@ -500,6 +509,18 @@ private TestClusterSetup getTestClusterSetup(Version version, boolean isCapturin return new TestClusterSetup(deterministicTaskQueue, localNode, transportService, localClusterState, joinHelper, capturingTransport); } + private RemoteStoreNodeService buildRemoteStoreNodeService(TransportService transportService, ThreadPool threadPool) { + RepositoriesService repositoriesService = new RepositoriesService( + Settings.EMPTY, + mock(ClusterService.class), + transportService, + Collections.emptyMap(), + Collections.emptyMap(), + threadPool + ); + return new RemoteStoreNodeService(new SetOnce<>(repositoriesService)::get, threadPool); + } + private static class TestClusterSetup { public final DeterministicTaskQueue deterministicTaskQueue; public final DiscoveryNode localNode; diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java index bd45985379e7d..e0844fd521d3f 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java @@ -42,21 +42,35 @@ import org.opensearch.cluster.decommission.NodeDecommissionedException; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.metadata.RepositoriesMetadata; +import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.RerouteService; import org.opensearch.cluster.routing.allocation.AllocationService; +import org.opensearch.common.SetOnce; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; +import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; +import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Locale; import java.util.Map; +import java.util.stream.Collectors; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.test.VersionUtils.allVersions; import static org.opensearch.test.VersionUtils.maxCompatibleVersion; import static org.opensearch.test.VersionUtils.randomCompatibleVersion; @@ -109,14 +123,20 @@ public void testPreventJoinClusterWithUnsupportedNodeVersions() { builder.add(new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), randomCompatibleVersion(random(), version))); DiscoveryNodes nodes = builder.build(); + Metadata metadata = Metadata.EMPTY_METADATA; + final Version maxNodeVersion = nodes.getMaxNodeVersion(); final Version minNodeVersion = nodes.getMinNodeVersion(); - final Version tooLow = LegacyESVersion.fromString("6.7.0"); + final DiscoveryNode tooLowJoiningNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + LegacyESVersion.fromString("6.7.0") + ); expectThrows(IllegalStateException.class, () -> { if (randomBoolean()) { - JoinTaskExecutor.ensureNodesCompatibility(tooLow, nodes); + JoinTaskExecutor.ensureNodesCompatibility(tooLowJoiningNode, nodes, metadata); } else { - JoinTaskExecutor.ensureNodesCompatibility(tooLow, minNodeVersion, maxNodeVersion); + JoinTaskExecutor.ensureNodesCompatibility(tooLowJoiningNode, nodes, metadata, minNodeVersion, maxNodeVersion); } }); @@ -134,11 +154,11 @@ public void testPreventJoinClusterWithUnsupportedNodeVersions() { minGoodVersion = minCompatVersion.before(allVersions().get(0)) ? allVersions().get(0) : minCompatVersion; } final Version justGood = randomVersionBetween(random(), minGoodVersion, maxCompatibleVersion(minNodeVersion)); - + final DiscoveryNode justGoodJoiningNode = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), justGood); if (randomBoolean()) { - JoinTaskExecutor.ensureNodesCompatibility(justGood, nodes); + JoinTaskExecutor.ensureNodesCompatibility(justGoodJoiningNode, nodes, metadata); } else { - JoinTaskExecutor.ensureNodesCompatibility(justGood, minNodeVersion, maxNodeVersion); + JoinTaskExecutor.ensureNodesCompatibility(justGoodJoiningNode, nodes, metadata, minNodeVersion, maxNodeVersion); } } @@ -174,8 +194,16 @@ public void testUpdatesNodeWithNewRoles() throws Exception { final AllocationService allocationService = mock(AllocationService.class); when(allocationService.adaptAutoExpandReplicas(any())).then(invocationOnMock -> invocationOnMock.getArguments()[0]); final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); - - final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor(Settings.EMPTY, allocationService, logger, rerouteService); + final RemoteStoreNodeService remoteStoreNodeService = mock(RemoteStoreNodeService.class); + when(remoteStoreNodeService.updateRepositoriesMetadata(any(), any())).thenReturn(new RepositoriesMetadata(Collections.emptyList())); + + final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor( + Settings.EMPTY, + allocationService, + logger, + rerouteService, + remoteStoreNodeService + ); final DiscoveryNode clusterManagerNode = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); @@ -271,8 +299,15 @@ public void testJoinFailedForDecommissionedNode() throws Exception { final AllocationService allocationService = mock(AllocationService.class); when(allocationService.adaptAutoExpandReplicas(any())).then(invocationOnMock -> invocationOnMock.getArguments()[0]); final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); - - final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor(Settings.EMPTY, allocationService, logger, rerouteService); + final RemoteStoreNodeService remoteStoreNodeService = mock(RemoteStoreNodeService.class); + + final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor( + Settings.EMPTY, + allocationService, + logger, + rerouteService, + remoteStoreNodeService + ); final DiscoveryNode clusterManagerNode = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); @@ -327,6 +362,399 @@ public void testJoinClusterWithDecommissionFailed() { JoinTaskExecutor.ensureNodeCommissioned(discoveryNode, metadata); } + public void testJoinClusterWithNonRemoteStoreNodeJoining() { + DiscoveryNode joiningNode = newDiscoveryNode(Collections.emptyMap()); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(joiningNode).build()) + .build(); + + JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()); + } + + public void testJoinClusterWithRemoteStoreNodeJoining() { + DiscoveryNode joiningNode = newDiscoveryNode(remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO)); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(joiningNode).build()) + .build(); + + JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()); + } + + public void testJoinClusterWithNonRemoteStoreNodeJoiningNonRemoteStoreCluster() { + final DiscoveryNode existingNode = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(existingNode).localNodeId(existingNode.getId()).build()) + .build(); + + DiscoveryNode joiningNode = newDiscoveryNode(Collections.emptyMap()); + + JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()); + } + + public void testPreventJoinClusterWithRemoteStoreNodeJoiningNonRemoteStoreCluster() { + final DiscoveryNode existingNode = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(existingNode).localNodeId(existingNode.getId()).build()) + .build(); + + DiscoveryNode joiningNode = newDiscoveryNode(remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO)); + Exception e = assertThrows( + IllegalStateException.class, + () -> JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()) + ); + assertTrue(e.getMessage().equals("a remote store node [" + joiningNode + "] is trying to join a non remote " + "store cluster")); + } + + public void testJoinClusterWithRemoteStoreNodeJoiningRemoteStoreCluster() { + final DiscoveryNode existingNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(existingNode).localNodeId(existingNode.getId()).build()) + .build(); + + DiscoveryNode joiningNode = newDiscoveryNode(remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO)); + JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()); + } + + public void testPreventJoinClusterWithRemoteStoreNodeWithDifferentAttributesJoiningRemoteStoreCluster() { + Map existingNodeAttributes = remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO); + Map remoteStoreNodeAttributes = remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO); + final DiscoveryNode existingNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + existingNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(existingNode).localNodeId(existingNode.getId()).build()) + .build(); + + for (Map.Entry nodeAttribute : existingNodeAttributes.entrySet()) { + if (nodeAttribute.getKey() != REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY + && nodeAttribute.getKey() != REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY) { + remoteStoreNodeAttributes.put(nodeAttribute.getKey(), nodeAttribute.getValue() + "-new"); + validateAttributes(remoteStoreNodeAttributes, currentState, existingNode); + remoteStoreNodeAttributes.put(nodeAttribute.getKey(), nodeAttribute.getValue()); + } + } + } + + public void testPreventJoinClusterWithRemoteStoreNodeWithDifferentNameAttributesJoiningRemoteStoreCluster() { + Map existingNodeAttributes = remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO); + final DiscoveryNode existingNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + existingNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(existingNode).localNodeId(existingNode.getId()).build()) + .build(); + + for (Map.Entry nodeAttribute : existingNodeAttributes.entrySet()) { + if (REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY.equals(nodeAttribute.getKey())) { + Map remoteStoreNodeAttributes = remoteStoreNodeAttributes(SEGMENT_REPO + "new", TRANSLOG_REPO); + validateAttributes(remoteStoreNodeAttributes, currentState, existingNode); + } else if (REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY.equals(nodeAttribute.getKey())) { + Map remoteStoreNodeAttributes = remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO + "new"); + validateAttributes(remoteStoreNodeAttributes, currentState, existingNode); + } + } + } + + public void testPreventJoinClusterWithNonRemoteStoreNodeJoiningRemoteStoreCluster() { + final DiscoveryNode existingNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(existingNode).localNodeId(existingNode.getId()).build()) + .build(); + + DiscoveryNode joiningNode = newDiscoveryNode(Collections.emptyMap()); + Exception e = assertThrows( + IllegalStateException.class, + () -> JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()) + ); + assertTrue(e.getMessage().equals("a non remote store node [" + joiningNode + "] is trying to join a remote " + "store cluster")); + } + + public void testPreventJoinClusterWithRemoteStoreNodeWithPartialAttributesJoiningRemoteStoreCluster() { + Map existingNodeAttributes = remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO); + Map remoteStoreNodeAttributes = remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO); + final DiscoveryNode existingNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + existingNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + ClusterState currentState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(DiscoveryNodes.builder().add(existingNode).localNodeId(existingNode.getId()).build()) + .build(); + + for (Map.Entry nodeAttribute : existingNodeAttributes.entrySet()) { + remoteStoreNodeAttributes.put(nodeAttribute.getKey(), null); + DiscoveryNode joiningNode = newDiscoveryNode(remoteStoreNodeAttributes); + Exception e = assertThrows( + IllegalStateException.class, + () -> JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()) + ); + assertTrue( + e.getMessage().equals("joining node [" + joiningNode + "] doesn't have the node attribute [" + nodeAttribute.getKey() + "]") + ); + + remoteStoreNodeAttributes.put(nodeAttribute.getKey(), nodeAttribute.getValue()); + } + } + + public void testUpdatesClusterStateWithSingleNodeCluster() throws Exception { + Map remoteStoreNodeAttributes = remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO); + final AllocationService allocationService = mock(AllocationService.class); + when(allocationService.adaptAutoExpandReplicas(any())).then(invocationOnMock -> invocationOnMock.getArguments()[0]); + final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); + final RemoteStoreNodeService remoteStoreNodeService = new RemoteStoreNodeService( + new SetOnce<>(mock(RepositoriesService.class))::get, + null + ); + + final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor( + Settings.EMPTY, + allocationService, + logger, + rerouteService, + remoteStoreNodeService + ); + + final DiscoveryNode clusterManagerNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + remoteStoreNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes( + DiscoveryNodes.builder() + .add(clusterManagerNode) + .localNodeId(clusterManagerNode.getId()) + .clusterManagerNodeId(clusterManagerNode.getId()) + ) + .build(); + + final ClusterStateTaskExecutor.ClusterTasksResult result = joinTaskExecutor.execute( + clusterState, + List.of(new JoinTaskExecutor.Task(clusterManagerNode, "elect leader")) + ); + assertThat(result.executionResults.entrySet(), hasSize(1)); + final ClusterStateTaskExecutor.TaskResult taskResult = result.executionResults.values().iterator().next(); + assertTrue(taskResult.isSuccess()); + validateRepositoryMetadata(result.resultingState, clusterManagerNode, 2); + } + + public void testUpdatesClusterStateWithMultiNodeCluster() throws Exception { + Map remoteStoreNodeAttributes = remoteStoreNodeAttributes(SEGMENT_REPO, TRANSLOG_REPO); + final AllocationService allocationService = mock(AllocationService.class); + when(allocationService.adaptAutoExpandReplicas(any())).then(invocationOnMock -> invocationOnMock.getArguments()[0]); + final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); + final RemoteStoreNodeService remoteStoreNodeService = new RemoteStoreNodeService( + new SetOnce<>(mock(RepositoriesService.class))::get, + null + ); + + final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor( + Settings.EMPTY, + allocationService, + logger, + rerouteService, + remoteStoreNodeService + ); + + final DiscoveryNode clusterManagerNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + remoteStoreNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + final RepositoryMetadata segmentRepositoryMetadata = buildRepositoryMetadata(clusterManagerNode, SEGMENT_REPO); + final RepositoryMetadata translogRepositoryMetadata = buildRepositoryMetadata(clusterManagerNode, TRANSLOG_REPO); + List repositoriesMetadata = new ArrayList<>() { + { + add(segmentRepositoryMetadata); + add(translogRepositoryMetadata); + } + }; + + final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes( + DiscoveryNodes.builder() + .add(clusterManagerNode) + .localNodeId(clusterManagerNode.getId()) + .clusterManagerNodeId(clusterManagerNode.getId()) + ) + .metadata(Metadata.builder().putCustom(RepositoriesMetadata.TYPE, new RepositoriesMetadata(repositoriesMetadata))) + .build(); + + final DiscoveryNode joiningNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + remoteStoreNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + final ClusterStateTaskExecutor.ClusterTasksResult result = joinTaskExecutor.execute( + clusterState, + List.of(new JoinTaskExecutor.Task(joiningNode, "test")) + ); + assertThat(result.executionResults.entrySet(), hasSize(1)); + final ClusterStateTaskExecutor.TaskResult taskResult = result.executionResults.values().iterator().next(); + assertTrue(taskResult.isSuccess()); + validateRepositoryMetadata(result.resultingState, clusterManagerNode, 2); + } + + public void testUpdatesClusterStateWithSingleNodeClusterAndSameRepository() throws Exception { + Map remoteStoreNodeAttributes = remoteStoreNodeAttributes(COMMON_REPO, COMMON_REPO); + final AllocationService allocationService = mock(AllocationService.class); + when(allocationService.adaptAutoExpandReplicas(any())).then(invocationOnMock -> invocationOnMock.getArguments()[0]); + final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); + final RemoteStoreNodeService remoteStoreNodeService = new RemoteStoreNodeService( + new SetOnce<>(mock(RepositoriesService.class))::get, + null + ); + + final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor( + Settings.EMPTY, + allocationService, + logger, + rerouteService, + remoteStoreNodeService + ); + + final DiscoveryNode clusterManagerNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + remoteStoreNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes( + DiscoveryNodes.builder() + .add(clusterManagerNode) + .localNodeId(clusterManagerNode.getId()) + .clusterManagerNodeId(clusterManagerNode.getId()) + ) + .build(); + + final ClusterStateTaskExecutor.ClusterTasksResult result = joinTaskExecutor.execute( + clusterState, + List.of(new JoinTaskExecutor.Task(clusterManagerNode, "elect leader")) + ); + assertThat(result.executionResults.entrySet(), hasSize(1)); + final ClusterStateTaskExecutor.TaskResult taskResult = result.executionResults.values().iterator().next(); + assertTrue(taskResult.isSuccess()); + validateRepositoryMetadata(result.resultingState, clusterManagerNode, 1); + } + + public void testUpdatesClusterStateWithMultiNodeClusterAndSameRepository() throws Exception { + Map remoteStoreNodeAttributes = remoteStoreNodeAttributes(COMMON_REPO, COMMON_REPO); + final AllocationService allocationService = mock(AllocationService.class); + when(allocationService.adaptAutoExpandReplicas(any())).then(invocationOnMock -> invocationOnMock.getArguments()[0]); + final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); + final RemoteStoreNodeService remoteStoreNodeService = new RemoteStoreNodeService( + new SetOnce<>(mock(RepositoriesService.class))::get, + null + ); + + final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor( + Settings.EMPTY, + allocationService, + logger, + rerouteService, + remoteStoreNodeService + ); + + final DiscoveryNode clusterManagerNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + remoteStoreNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + final RepositoryMetadata segmentRepositoryMetadata = buildRepositoryMetadata(clusterManagerNode, COMMON_REPO); + List repositoriesMetadata = new ArrayList<>() { + { + add(segmentRepositoryMetadata); + } + }; + + final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes( + DiscoveryNodes.builder() + .add(clusterManagerNode) + .localNodeId(clusterManagerNode.getId()) + .clusterManagerNodeId(clusterManagerNode.getId()) + ) + .metadata(Metadata.builder().putCustom(RepositoriesMetadata.TYPE, new RepositoriesMetadata(repositoriesMetadata))) + .build(); + + final DiscoveryNode joiningNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + remoteStoreNodeAttributes, + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + final ClusterStateTaskExecutor.ClusterTasksResult result = joinTaskExecutor.execute( + clusterState, + List.of(new JoinTaskExecutor.Task(joiningNode, "test")) + ); + assertThat(result.executionResults.entrySet(), hasSize(1)); + final ClusterStateTaskExecutor.TaskResult taskResult = result.executionResults.values().iterator().next(); + assertTrue(taskResult.isSuccess()); + validateRepositoryMetadata(result.resultingState, clusterManagerNode, 1); + } + + private void validateRepositoryMetadata(ClusterState updatedState, DiscoveryNode existingNode, int expectedRepositories) + throws Exception { + + final RepositoriesMetadata repositoriesMetadata = updatedState.metadata().custom(RepositoriesMetadata.TYPE); + assertTrue(repositoriesMetadata.repositories().size() == expectedRepositories); + if (repositoriesMetadata.repositories().size() == 2) { + final RepositoryMetadata segmentRepositoryMetadata = buildRepositoryMetadata(existingNode, SEGMENT_REPO); + final RepositoryMetadata translogRepositoryMetadata = buildRepositoryMetadata(existingNode, TRANSLOG_REPO); + for (RepositoryMetadata repositoryMetadata : repositoriesMetadata.repositories()) { + if (repositoryMetadata.name().equals(segmentRepositoryMetadata.name())) { + assertTrue(segmentRepositoryMetadata.equalsIgnoreGenerations(repositoryMetadata)); + } else if (repositoryMetadata.name().equals(segmentRepositoryMetadata.name())) { + assertTrue(translogRepositoryMetadata.equalsIgnoreGenerations(repositoryMetadata)); + } + } + } else if (repositoriesMetadata.repositories().size() == 1) { + final RepositoryMetadata repositoryMetadata = buildRepositoryMetadata(existingNode, COMMON_REPO); + assertTrue(repositoryMetadata.equalsIgnoreGenerations(repositoriesMetadata.repositories().get(0))); + } else { + throw new Exception("Stack overflow example: checkedExceptionThrower"); + } + } + private DiscoveryNode newDiscoveryNode(Map attributes) { return new DiscoveryNode( randomAlphaOfLength(10), @@ -337,4 +765,82 @@ private DiscoveryNode newDiscoveryNode(Map attributes) { Version.CURRENT ); } + + private static final String SEGMENT_REPO = "segment-repo"; + private static final String TRANSLOG_REPO = "translog-repo"; + private static final String COMMON_REPO = "remote-repo"; + + private Map remoteStoreNodeAttributes(String segmentRepoName, String translogRepoName) { + String segmentRepositoryTypeAttributeKey = String.format( + Locale.getDefault(), + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + segmentRepoName + ); + String segmentRepositorySettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + segmentRepoName + ); + String translogRepositoryTypeAttributeKey = String.format( + Locale.getDefault(), + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + translogRepoName + ); + String translogRepositorySettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + translogRepoName + ); + + return new HashMap<>() { + { + put(REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, segmentRepoName); + put(segmentRepositoryTypeAttributeKey, "s3"); + put(segmentRepositorySettingsAttributeKeyPrefix + "bucket", "segment_bucket"); + put(segmentRepositorySettingsAttributeKeyPrefix + "base_path", "/segment/path"); + put(REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, translogRepoName); + putIfAbsent(translogRepositoryTypeAttributeKey, "s3"); + putIfAbsent(translogRepositorySettingsAttributeKeyPrefix + "bucket", "translog_bucket"); + putIfAbsent(translogRepositorySettingsAttributeKeyPrefix + "base_path", "/translog/path"); + } + }; + } + + private void validateAttributes(Map remoteStoreNodeAttributes, ClusterState currentState, DiscoveryNode existingNode) { + DiscoveryNode joiningNode = newDiscoveryNode(remoteStoreNodeAttributes); + Exception e = assertThrows( + IllegalStateException.class, + () -> JoinTaskExecutor.ensureNodesCompatibility(joiningNode, currentState.getNodes(), currentState.metadata()) + ); + assertTrue( + e.getMessage() + .equals( + "a remote store node [" + + joiningNode + + "] is trying to join a remote store cluster with incompatible node attributes in " + + "comparison with existing node [" + + existingNode + + "]" + ) + ); + } + + private RepositoryMetadata buildRepositoryMetadata(DiscoveryNode node, String name) { + Map nodeAttributes = node.getAttributes(); + String type = nodeAttributes.get(String.format(Locale.getDefault(), REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, name)); + + String settingsAttributeKeyPrefix = String.format(Locale.getDefault(), REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, name); + Map settingsMap = node.getAttributes() + .keySet() + .stream() + .filter(key -> key.startsWith(settingsAttributeKeyPrefix)) + .collect(Collectors.toMap(key -> key.replace(settingsAttributeKeyPrefix, ""), key -> node.getAttributes().get(key))); + + Settings.Builder settings = Settings.builder(); + settingsMap.entrySet().forEach(entry -> settings.put(entry.getKey(), entry.getValue())); + + settings.put(BlobStoreRepository.SYSTEM_REPOSITORY_SETTING.getKey(), true); + + return new RepositoryMetadata(name, type, settings.build()); + } } diff --git a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java index 8d798b38dc023..766a20fda8d28 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java @@ -60,6 +60,7 @@ import org.opensearch.monitor.NodeHealthService; import org.opensearch.monitor.StatusInfo; import org.opensearch.node.Node; +import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.test.ClusterServiceUtils; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; @@ -90,6 +91,8 @@ import java.util.stream.IntStream; import java.util.stream.Stream; +import org.mockito.Mockito; + import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; import static java.util.Collections.singleton; @@ -264,7 +267,8 @@ protected void onSendRequest( (s, p, r) -> {}, ElectionStrategy.DEFAULT_INSTANCE, nodeHealthService, - persistedStateRegistry + persistedStateRegistry, + Mockito.mock(RemoteStoreNodeService.class) ); transportService.start(); transportService.acceptIncomingRequests(); diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index ad5fbb18138b3..86e154c547e07 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -136,11 +136,11 @@ import static org.opensearch.index.IndexSettings.INDEX_SOFT_DELETES_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_DEFAULT_INDEX_REFRESH_INTERVAL_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_MINIMUM_INDEX_REFRESH_INTERVAL_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_ENABLED_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; import static org.opensearch.indices.ShardLimitValidatorTests.createTestShardLimitService; +import static org.opensearch.node.Node.NODE_ATTRIBUTES; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasKey; @@ -158,6 +158,10 @@ public class MetadataCreateIndexServiceTests extends OpenSearchTestCase { private CreateIndexClusterStateUpdateRequest request; private QueryShardContext queryShardContext; private ClusterSettings clusterSettings; + private static final String segmentRepositoryNameAttributeKey = NODE_ATTRIBUTES.getKey() + + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; + private static final String translogRepositoryNameAttributeKey = NODE_ATTRIBUTES.getKey() + + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; @Before public void setup() throws Exception { @@ -1214,9 +1218,8 @@ public void testvalidateIndexSettings() { public void testRemoteStoreNoUserOverrideExceptReplicationTypeSegmentIndexSettings() { Settings settings = Settings.builder() .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.DOCUMENT) - .put(CLUSTER_REMOTE_STORE_ENABLED_SETTING.getKey(), true) - .put(CLUSTER_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING.getKey(), "my-segment-repo-1") - .put(CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING.getKey(), "my-translog-repo-1") + .put(segmentRepositoryNameAttributeKey, "my-segment-repo-1") + .put(translogRepositoryNameAttributeKey, "my-translog-repo-1") .build(); FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); @@ -1247,9 +1250,8 @@ public void testRemoteStoreNoUserOverrideExceptReplicationTypeSegmentIndexSettin public void testRemoteStoreImplicitOverrideReplicationTypeToSegmentForRemoteStore() { Settings settings = Settings.builder() - .put(CLUSTER_REMOTE_STORE_ENABLED_SETTING.getKey(), true) - .put(CLUSTER_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING.getKey(), "my-segment-repo-1") - .put(CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING.getKey(), "my-translog-repo-1") + .put(segmentRepositoryNameAttributeKey, "my-segment-repo-1") + .put(translogRepositoryNameAttributeKey, "my-translog-repo-1") .build(); FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); @@ -1280,9 +1282,8 @@ public void testRemoteStoreImplicitOverrideReplicationTypeToSegmentForRemoteStor public void testRemoteStoreNoUserOverrideIndexSettings() { Settings settings = Settings.builder() .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT) - .put(CLUSTER_REMOTE_STORE_ENABLED_SETTING.getKey(), true) - .put(CLUSTER_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING.getKey(), "my-segment-repo-1") - .put(CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING.getKey(), "my-translog-repo-1") + .put(segmentRepositoryNameAttributeKey, "my-segment-repo-1") + .put(translogRepositoryNameAttributeKey, "my-translog-repo-1") .build(); FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); diff --git a/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListenerTests.java b/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListenerTests.java index f785b5f1191b4..936e2170bed6c 100644 --- a/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListenerTests.java +++ b/server/src/test/java/org/opensearch/common/blobstore/stream/read/listener/ReadContextListenerTests.java @@ -70,7 +70,7 @@ public void testReadContextListener() throws InterruptedException, IOException { assertEquals(NUMBER_OF_PARTS * PART_SIZE, Files.size(fileLocation)); } - public void testReadContextListenerFailure() throws InterruptedException { + public void testReadContextListenerFailure() throws Exception { Path fileLocation = path.resolve(UUID.randomUUID().toString()); List blobPartStreams = initializeBlobPartStreams(); CountDownLatch countDownLatch = new CountDownLatch(1); @@ -99,8 +99,7 @@ public int available() { readContextListener.onResponse(readContext); countDownLatch.await(); - - assertFalse(Files.exists(fileLocation)); + assertBusy(() -> { assertFalse(Files.exists(fileLocation)); }); } public void testReadContextListenerException() { diff --git a/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java b/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java index b32dd7c6c240b..7c0dca3803cb2 100644 --- a/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java +++ b/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java @@ -46,6 +46,7 @@ import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.gateway.GatewayMetaState; +import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.plugins.DiscoveryPlugin; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; @@ -77,6 +78,8 @@ public class DiscoveryModuleTests extends OpenSearchTestCase { private ClusterSettings clusterSettings; private GatewayMetaState gatewayMetaState; + private RemoteStoreNodeService remoteStoreNodeService; + public interface DummyHostsProviderPlugin extends DiscoveryPlugin { Map> impl(); @@ -99,6 +102,7 @@ public void setupDummyServices() { clusterApplier = mock(ClusterApplier.class); clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); gatewayMetaState = mock(GatewayMetaState.class); + remoteStoreNodeService = mock(RemoteStoreNodeService.class); } @After @@ -122,7 +126,8 @@ private DiscoveryModule newModule(Settings settings, List plugi gatewayMetaState, mock(RerouteService.class), null, - new PersistedStateRegistry() + new PersistedStateRegistry(), + remoteStoreNodeService ); } diff --git a/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java index 10180911c396f..8de652138e83e 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java @@ -91,6 +91,7 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.CheckedFunction; import org.opensearch.common.Priority; +import org.opensearch.common.SetOnce; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.IndexScopedSettings; @@ -107,6 +108,8 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.ShardLimitValidator; import org.opensearch.indices.SystemIndices; +import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.repositories.RepositoriesService; import org.opensearch.snapshots.EmptySnapshotsInfoService; import org.opensearch.test.gateway.TestGatewayAllocator; import org.opensearch.threadpool.ThreadPool; @@ -150,6 +153,8 @@ public class ClusterStateChanges { private final TransportUpdateSettingsAction transportUpdateSettingsAction; private final TransportClusterRerouteAction transportClusterRerouteAction; private final TransportCreateIndexAction transportCreateIndexAction; + private final RepositoriesService repositoriesService; + private final RemoteStoreNodeService remoteStoreNodeService; private final NodeRemovalClusterStateTaskExecutor nodeRemovalExecutor; private final JoinTaskExecutor joinTaskExecutor; @@ -362,8 +367,19 @@ public IndexMetadata upgradeIndexMetadata(IndexMetadata indexMetadata, Version m indexNameExpressionResolver ); + repositoriesService = new RepositoriesService( + Settings.EMPTY, + clusterService, + transportService, + Collections.emptyMap(), + Collections.emptyMap(), + threadPool + ); + + remoteStoreNodeService = new RemoteStoreNodeService(new SetOnce<>(repositoriesService)::get, threadPool); + nodeRemovalExecutor = new NodeRemovalClusterStateTaskExecutor(allocationService, logger); - joinTaskExecutor = new JoinTaskExecutor(Settings.EMPTY, allocationService, logger, (s, p, r) -> {}); + joinTaskExecutor = new JoinTaskExecutor(Settings.EMPTY, allocationService, logger, (s, p, r) -> {}, remoteStoreNodeService); } public ClusterState createIndex(ClusterState state, CreateIndexRequest request) { diff --git a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java index 1ab48b30af2f9..b20cb323e144f 100644 --- a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java +++ b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java @@ -756,6 +756,11 @@ public boolean isReadOnly() { return false; } + @Override + public boolean isSystemRepository() { + return false; + } + @Override public void snapshotShard( Store store, diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java index 0dbc0372458b5..a24fd04d3d4f6 100644 --- a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java @@ -111,7 +111,7 @@ protected void updateRepository(Client client, String repoName, Settings repoSet createRepository(client, repoName, repoSettings); } - protected Settings getRemoteStoreBackedIndexSettings(String remoteStoreRepo) { + protected Settings getRemoteStoreBackedIndexSettings() { return Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "1") .put("index.refresh_interval", "300s") diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRemoteIndexTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRemoteIndexTests.java index d69fb1fd7b349..0f24d60993f2f 100644 --- a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRemoteIndexTests.java +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRemoteIndexTests.java @@ -39,25 +39,30 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; import org.opensearch.index.snapshots.blobstore.RemoteStoreShardShallowCopySnapshot; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.repositories.IndexId; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.RepositoryData; +import org.opensearch.repositories.fs.FsRepository; import org.opensearch.snapshots.SnapshotId; import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.OpenSearchIntegTestCase; import java.io.IOException; +import java.nio.file.Path; import java.util.Arrays; import java.util.List; +import java.util.Locale; import java.util.stream.Collectors; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_ENABLED_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY; +import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.hamcrest.Matchers.equalTo; /** @@ -71,12 +76,36 @@ protected Settings featureFlagSettings() { @Override protected Settings nodeSettings() { + Path tempDir = createTempDir(); return Settings.builder() .put(super.nodeSettings()) .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT) - .put(CLUSTER_REMOTE_STORE_ENABLED_SETTING.getKey(), true) - .put(CLUSTER_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING.getKey(), "test-rs-repo") - .put(CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING.getKey(), "test-rs-repo") + .put(buildRemoteStoreNodeAttributes("test-rs-repo", tempDir.resolve("repo"))) + .put(Environment.PATH_HOME_SETTING.getKey(), tempDir) + .put(Environment.PATH_REPO_SETTING.getKey(), tempDir.resolve("repo")) + .put(Environment.PATH_SHARED_DATA_SETTING.getKey(), tempDir.getParent()) + .build(); + } + + private Settings buildRemoteStoreNodeAttributes(String repoName, Path repoPath) { + String repoTypeAttributeKey = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT, + repoName + ); + String repoSettingsAttributeKeyPrefix = String.format( + Locale.getDefault(), + "node.attr." + REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX, + repoName + ); + + return Settings.builder() + .put("node.attr." + REMOTE_STORE_SEGMENT_REPOSITORY_NAME_ATTRIBUTE_KEY, repoName) + .put(repoTypeAttributeKey, FsRepository.TYPE) + .put(repoSettingsAttributeKeyPrefix + "location", repoPath) + .put("node.attr." + REMOTE_STORE_TRANSLOG_REPOSITORY_NAME_ATTRIBUTE_KEY, repoName) + .put(repoTypeAttributeKey, FsRepository.TYPE) + .put(repoSettingsAttributeKeyPrefix + "location", repoPath) .build(); } @@ -95,13 +124,6 @@ public void testRetrieveShallowCopySnapshotCase1() throws IOException { .build(); createRepository(client, snapshotRepositoryName, snapshotRepoSettings); - logger.info("--> creating remote store repository"); - Settings remoteStoreRepoSettings = Settings.builder() - .put(node().settings()) - .put("location", OpenSearchIntegTestCase.randomRepoPath(node().settings())) - .build(); - createRepository(client, remoteStoreRepositoryName, remoteStoreRepoSettings); - logger.info("--> creating an index and indexing documents"); final String indexName = "test-idx"; createIndex(indexName); @@ -110,7 +132,7 @@ public void testRetrieveShallowCopySnapshotCase1() throws IOException { logger.info("--> creating a remote store enabled index and indexing documents"); final String remoteStoreIndexName = "test-rs-idx"; - Settings indexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepositoryName); + Settings indexSettings = getRemoteStoreBackedIndexSettings(); createIndex(remoteStoreIndexName, indexSettings); indexDocuments(client, remoteStoreIndexName); @@ -195,16 +217,9 @@ public void testGetRemoteStoreShallowCopyShardMetadata() throws IOException { .build(); createRepository(client, snapshotRepositoryName, snapshotRepoSettings); - logger.info("--> creating remote store repository"); - Settings remoteStoreRepoSettings = Settings.builder() - .put(node().settings()) - .put("location", OpenSearchIntegTestCase.randomRepoPath(node().settings())) - .build(); - createRepository(client, remoteStoreRepositoryName, remoteStoreRepoSettings); - logger.info("--> creating a remote store enabled index and indexing documents"); final String remoteStoreIndexName = "test-rs-idx"; - Settings indexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepositoryName); + Settings indexSettings = getRemoteStoreBackedIndexSettings(); createIndex(remoteStoreIndexName, indexSettings); indexDocuments(client, remoteStoreIndexName); @@ -266,9 +281,6 @@ public void testRetrieveShallowCopySnapshotCase2() throws IOException { assertFalse(updatedRepositoryMetadata.settings().getAsBoolean(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), false)); - logger.info("--> creating remote store repository"); - createRepository(client, remoteStoreRepositoryName); - logger.info("--> creating an index and indexing documents"); final String indexName = "test-idx"; createIndex(indexName); @@ -277,7 +289,7 @@ public void testRetrieveShallowCopySnapshotCase2() throws IOException { logger.info("--> creating a remote store enabled index and indexing documents"); final String remoteStoreIndexName = "test-rs-idx"; - Settings indexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepositoryName); + Settings indexSettings = getRemoteStoreBackedIndexSettings(); createIndex(remoteStoreIndexName, indexSettings); indexDocuments(client, remoteStoreIndexName); diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 37ea8a28bd4b4..9cc7a83d3c563 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -205,6 +205,7 @@ import org.opensearch.ingest.IngestService; import org.opensearch.monitor.StatusInfo; import org.opensearch.node.ResponseCollectorService; +import org.opensearch.node.remotestore.RemoteStoreNodeService; import org.opensearch.plugins.PluginsService; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; @@ -1892,6 +1893,7 @@ private final class TestClusterNode { private final ClusterInfoService clusterInfoService; private Coordinator coordinator; + private RemoteStoreNodeService remoteStoreNodeService; private Map actions = new HashMap<>(); @@ -1997,6 +1999,7 @@ public void onFailure(final Exception e) { emptyMap(), threadPool ); + remoteStoreNodeService = new RemoteStoreNodeService(new SetOnce<>(repositoriesService)::get, threadPool); final ActionFilters actionFilters = new ActionFilters(emptySet()); snapshotsService = new SnapshotsService( settings, @@ -2513,7 +2516,8 @@ public void start(ClusterState initialState) { rerouteService, ElectionStrategy.DEFAULT_INSTANCE, () -> new StatusInfo(HEALTHY, "healthy-info"), - persistedStateRegistry + persistedStateRegistry, + remoteStoreNodeService ); clusterManagerService.setClusterStatePublisher(coordinator); coordinator.start(); diff --git a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java index c6960b5ca33ff..d49d3d290b8a8 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java @@ -59,6 +59,7 @@ import org.opensearch.cluster.service.FakeThreadPoolClusterManagerService; import org.opensearch.common.Nullable; import org.opensearch.common.Randomness; +import org.opensearch.common.SetOnce; import org.opensearch.common.UUIDs; import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.stream.BytesStreamOutput; @@ -85,6 +86,8 @@ import org.opensearch.gateway.PersistedClusterStateService; import org.opensearch.monitor.NodeHealthService; import org.opensearch.monitor.StatusInfo; +import org.opensearch.node.remotestore.RemoteStoreNodeService; +import org.opensearch.repositories.RepositoriesService; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.disruption.DisruptableMockTransport; import org.opensearch.test.disruption.DisruptableMockTransport.ConnectionStatus; @@ -1035,6 +1038,8 @@ class ClusterNode { TransportService transportService; private DisruptableMockTransport mockTransport; private NodeHealthService nodeHealthService; + private RepositoriesService repositoriesService; + private RemoteStoreNodeService remoteStoreNodeService; List> extraJoinValidators = new ArrayList<>(); ClusterNode(int nodeIndex, boolean clusterManagerEligible, Settings nodeSettings, NodeHealthService nodeHealthService) { @@ -1129,6 +1134,15 @@ protected Optional getDisruptableMockTransport(Transpo clusterService.setNodeConnectionsService( new NodeConnectionsService(clusterService.getSettings(), threadPool, transportService) ); + repositoriesService = new RepositoriesService( + settings, + clusterService, + transportService, + Collections.emptyMap(), + Collections.emptyMap(), + threadPool + ); + remoteStoreNodeService = new RemoteStoreNodeService(new SetOnce<>(repositoriesService)::get, threadPool); final Collection> onJoinValidators = Collections.singletonList( (dn, cs) -> extraJoinValidators.forEach(validator -> validator.accept(dn, cs)) ); @@ -1149,7 +1163,8 @@ protected Optional getDisruptableMockTransport(Transpo (s, p, r) -> {}, getElectionStrategy(), nodeHealthService, - persistedStateRegistry + persistedStateRegistry, + remoteStoreNodeService ); clusterManagerService.setClusterStatePublisher(coordinator); final GatewayService gatewayService = new GatewayService( diff --git a/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java b/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java index fbee13ab3b551..be2f895301396 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java @@ -173,6 +173,11 @@ public boolean isReadOnly() { return false; } + @Override + public boolean isSystemRepository() { + return false; + } + @Override public void snapshotShard( Store store, diff --git a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java index 11e847e29a097..95832dc9544ce 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java @@ -55,6 +55,7 @@ import org.opensearch.cluster.coordination.ClusterBootstrapService; import org.opensearch.cluster.coordination.NoClusterManagerBlockService; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.RepositoriesMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; @@ -1318,6 +1319,12 @@ public synchronized void validateClusterFormed() { assertTrue("Expected node to exist: " + expectedNode + debugString, discoveryNodes.nodeExists(expectedNode)); } }); + states.forEach(cs -> { + if (cs.nodes().getNodes().values().stream().findFirst().get().isRemoteStoreNode()) { + RepositoriesMetadata repositoriesMetadata = cs.metadata().custom(RepositoriesMetadata.TYPE); + assertTrue(repositoriesMetadata != null && !repositoriesMetadata.repositories().isEmpty()); + } + }); }, 30, TimeUnit.SECONDS); } catch (AssertionError ae) { throw new IllegalStateException("cluster failed to form", ae);