Skip to content

Commit

Permalink
Replace internal usages of 'master' term in 'server/src/main' directo…
Browse files Browse the repository at this point in the history
…ry (#2519) (#3165)

Signed-off-by: Tianli Feng <ftianli@amazon.com>
  • Loading branch information
Tianli Feng committed May 4, 2022
1 parent c19d537 commit acbcf76
Show file tree
Hide file tree
Showing 127 changed files with 683 additions and 629 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ private MockTerminal executeCommand(
private MockTerminal unsafeBootstrap(Environment environment, boolean abort, Boolean applyClusterReadOnlyBlock) throws Exception {
final MockTerminal terminal = executeCommand(new UnsafeBootstrapMasterCommand(), environment, 0, abort, applyClusterReadOnlyBlock);
assertThat(terminal.getOutput(), containsString(UnsafeBootstrapMasterCommand.CONFIRMATION_MSG));
assertThat(terminal.getOutput(), containsString(UnsafeBootstrapMasterCommand.MASTER_NODE_BOOTSTRAPPED_MSG));
assertThat(terminal.getOutput(), containsString(UnsafeBootstrapMasterCommand.CLUSTER_MANAGER_NODE_BOOTSTRAPPED_MSG));
return terminal;
}

Expand Down Expand Up @@ -171,7 +171,7 @@ public void testBootstrapNotMasterEligible() {
final Environment environment = TestEnvironment.newEnvironment(
Settings.builder().put(nonMasterNode(internalCluster().getDefaultSettings())).build()
);
expectThrows(() -> unsafeBootstrap(environment), UnsafeBootstrapMasterCommand.NOT_MASTER_NODE_MSG);
expectThrows(() -> unsafeBootstrap(environment), UnsafeBootstrapMasterCommand.NOT_CLUSTER_MANAGER_NODE_MSG);
}

public void testBootstrapNoDataFolder() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,7 @@ public void testMasterFailOverDuringShardSnapshots() throws Exception {

blockDataNode(repoName, dataNode);

logger.info("--> create snapshot via master node client");
logger.info("--> create snapshot via cluster-manager node client");
final ActionFuture<CreateSnapshotResponse> snapshotResponse = internalCluster().masterClient()
.admin()
.cluster()
Expand All @@ -272,7 +272,7 @@ public void testMasterFailOverDuringShardSnapshots() throws Exception {
SnapshotException.class,
() -> snapshotResponse.actionGet(TimeValue.timeValueSeconds(30L))
);
assertThat(sne.getMessage(), endsWith("no longer master"));
assertThat(sne.getMessage(), endsWith("no longer cluster-manager"));
}

private void assertSnapshotExists(String repository, String snapshot) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ private void executeRepurposeCommand(Settings settings, int expectedIndexCount,
boolean verbose = randomBoolean();
Settings settingsWithPath = Settings.builder().put(internalCluster().getDefaultSettings()).put(settings).build();
Matcher<String> matcher = allOf(
containsString(NodeRepurposeCommand.noMasterMessage(expectedIndexCount, expectedShardCount, 0)),
containsString(NodeRepurposeCommand.noClusterManagerMessage(expectedIndexCount, expectedShardCount, 0)),
NodeRepurposeCommandTests.conditionalNot(containsString("test-repurpose"), verbose == false)
);
NodeRepurposeCommandTests.verifySuccess(settingsWithPath, matcher, verbose);
Expand Down
4 changes: 2 additions & 2 deletions server/src/main/java/org/opensearch/OpenSearchException.java
Original file line number Diff line number Diff line change
Expand Up @@ -785,7 +785,7 @@ private enum OpenSearchExceptionHandle {
2,
UNKNOWN_VERSION_ADDED
),
MASTER_NOT_DISCOVERED_EXCEPTION(
CLUSTER_MANAGER_NOT_DISCOVERED_EXCEPTION(
org.opensearch.discovery.MasterNotDiscoveredException.class,
org.opensearch.discovery.MasterNotDiscoveredException::new,
3,
Expand Down Expand Up @@ -1496,7 +1496,7 @@ private enum OpenSearchExceptionHandle {
143,
UNKNOWN_VERSION_ADDED
),
NOT_MASTER_EXCEPTION(
NOT_CLUSTER_MANAGER_EXCEPTION(
org.opensearch.cluster.NotMasterException.class,
org.opensearch.cluster.NotMasterException::new,
144,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@

/**
* The {@code TransportClusterAllocationExplainAction} is responsible for actually executing the explanation of a shard's allocation on the
* master node in the cluster.
* cluster-manager node in the cluster.
*/
public class TransportClusterAllocationExplainAction extends TransportMasterNodeAction<
ClusterAllocationExplainRequest,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo
// ClusterStateHealth fields
int numberOfNodes = (int) parsedObjects[i++];
int numberOfDataNodes = (int) parsedObjects[i++];
boolean hasDiscoveredMaster = Boolean.TRUE.equals(parsedObjects[i++]);
boolean hasDiscoveredClusterManager = Boolean.TRUE.equals(parsedObjects[i++]);
int activeShards = (int) parsedObjects[i++];
int relocatingShards = (int) parsedObjects[i++];
int activePrimaryShards = (int) parsedObjects[i++];
Expand Down Expand Up @@ -118,7 +118,7 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo
unassignedShards,
numberOfNodes,
numberOfDataNodes,
hasDiscoveredMaster,
hasDiscoveredClusterManager,
activeShardsPercent,
status,
indices
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -218,11 +218,11 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS
@Override
public void onNoLongerMaster(String source) {
logger.trace(
"stopped being master while waiting for events with priority [{}]. retrying.",
"stopped being cluster-manager while waiting for events with priority [{}]. retrying.",
request.waitForEvents()
);
// TransportMasterNodeAction implements the retry logic, which is triggered by passing a NotMasterException
listener.onFailure(new NotMasterException("no longer master. source: [" + source + "]"));
listener.onFailure(new NotMasterException("no longer cluster-manager. source: [" + source + "]"));
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -73,12 +73,12 @@
* <ol>
* <li>Check that there are no running repository cleanup, snapshot create, or snapshot delete actions
* and add an entry for the repository that is to be cleaned up to {@link RepositoryCleanupInProgress}</li>
* <li>Run cleanup actions on the repository. Note, these are executed exclusively on the master node.
* <li>Run cleanup actions on the repository. Note, these are executed exclusively on the cluster-manager node.
* For the precise operations execute see {@link BlobStoreRepository#cleanup}</li>
* <li>Remove the entry in {@link RepositoryCleanupInProgress} in the first step.</li>
* </ol>
*
* On master failover during the cleanup operation it is simply removed from the cluster state. This is safe because the logic in
* On cluster-manager failover during the cleanup operation it is simply removed from the cluster state. This is safe because the logic in
* {@link BlobStoreRepository#cleanup} ensures that the repository state id has not changed between creation of the cluster state entry
* and any delete/write operations. TODO: This will not work if we also want to clean up at the shard level as those will involve writes
* as well as deletes.
Expand Down Expand Up @@ -119,7 +119,7 @@ public TransportCleanupRepositoryAction(
);
this.repositoriesService = repositoriesService;
this.snapshotsService = snapshotsService;
// We add a state applier that will remove any dangling repository cleanup actions on master failover.
// We add a state applier that will remove any dangling repository cleanup actions on cluster-manager failover.
// This is safe to do since cleanups will increment the repository state id before executing any operations to prevent concurrent
// operations from corrupting the repository. This is the same safety mechanism used by snapshot deletes.
if (DiscoveryNode.isMasterNode(clusterService.getSettings())) {
Expand All @@ -136,7 +136,7 @@ private static void addClusterStateApplier(ClusterService clusterService) {
return;
}
clusterService.submitStateUpdateTask(
"clean up repository cleanup task after master failover",
"clean up repository cleanup task after cluster-manager failover",
new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -155,11 +155,11 @@ public void onAckTimeout() {
}

private void reroute(final boolean updateSettingsAcked) {
// We're about to send a second update task, so we need to check if we're still the elected master
// For example the minimum_master_node could have been breached and we're no longer elected master,
// We're about to send a second update task, so we need to check if we're still the elected cluster-manager
// For example the minimum_master_node could have been breached and we're no longer elected cluster-manager,
// so we should *not* execute the reroute.
if (!clusterService.state().nodes().isLocalNodeElectedMaster()) {
logger.debug("Skipping reroute after cluster update settings, because node is no longer master");
logger.debug("Skipping reroute after cluster update settings, because node is no longer cluster-manager");
listener.onResponse(
new ClusterUpdateSettingsResponse(
updateSettingsAcked,
Expand Down Expand Up @@ -198,7 +198,7 @@ protected ClusterUpdateSettingsResponse newResponse(boolean acknowledged) {
@Override
public void onNoLongerMaster(String source) {
logger.debug(
"failed to preform reroute after cluster settings were updated - current node is no longer a master"
"failed to preform reroute after cluster settings were updated - current node is no longer a cluster-manager"
);
listener.onResponse(
new ClusterUpdateSettingsResponse(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -69,8 +69,8 @@ public void clusterChanged(ClusterChangedEvent changedEvent) {
final RestoreInProgress.Entry prevEntry = restoreInProgress(changedEvent.previousState(), uuid);
final RestoreInProgress.Entry newEntry = restoreInProgress(changedEvent.state(), uuid);
if (prevEntry == null) {
// When there is a master failure after a restore has been started, this listener might not be registered
// on the current master and as such it might miss some intermediary cluster states due to batching.
// When there is a cluster-manager failure after a restore has been started, this listener might not be registered
// on the current cluster-manager and as such it might miss some intermediary cluster states due to batching.
// Clean up listener in that case and acknowledge completion of restore operation to client.
clusterService.removeListener(this);
listener.onResponse(new RestoreSnapshotResponse((RestoreInfo) null));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -221,8 +221,8 @@ private void buildResponse(
// Unlikely edge case:
// Data node has finished snapshotting the shard but the cluster state has not yet been updated
// to reflect this. We adjust the status to show up as snapshot metadata being written because
// technically if the data node failed before successfully reporting DONE state to master, then
// this shards state would jump to a failed state.
// technically if the data node failed before successfully reporting DONE state to cluster-manager,
// then this shards state would jump to a failed state.
shardStatus = new SnapshotIndexShardStatus(
shardEntry.key,
SnapshotIndexShardStage.FINALIZE,
Expand Down Expand Up @@ -406,7 +406,7 @@ private SnapshotInfo snapshot(SnapshotsInProgress snapshotsInProgress, String re
/**
* Returns status of shards currently finished snapshots
* <p>
* This method is executed on master node and it's complimentary to the
* This method is executed on cluster-manager node and it's complimentary to the
* {@link SnapshotShardsService#currentSnapshotShards(Snapshot)} because it
* returns similar information but for already finished snapshots.
* </p>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -108,20 +108,20 @@ public boolean equals(Object o) {
if (o == null || getClass() != o.getClass()) return false;
ClusterStateResponse response = (ClusterStateResponse) o;
return waitForTimedOut == response.waitForTimedOut && Objects.equals(clusterName, response.clusterName) &&
// Best effort. Only compare cluster state version and master node id,
// Best effort. Only compare cluster state version and cluster-manager node id,
// because cluster state doesn't implement equals()
Objects.equals(getVersion(clusterState), getVersion(response.clusterState))
&& Objects.equals(getMasterNodeId(clusterState), getMasterNodeId(response.clusterState));
&& Objects.equals(getClusterManagerNodeId(clusterState), getClusterManagerNodeId(response.clusterState));
}

@Override
public int hashCode() {
// Best effort. Only use cluster state version and master node id,
// Best effort. Only use cluster state version and cluster-manager node id,
// because cluster state doesn't implement hashcode()
return Objects.hash(clusterName, getVersion(clusterState), getMasterNodeId(clusterState), waitForTimedOut);
return Objects.hash(clusterName, getVersion(clusterState), getClusterManagerNodeId(clusterState), waitForTimedOut);
}

private static String getMasterNodeId(ClusterState clusterState) {
private static String getClusterManagerNodeId(ClusterState clusterState) {
if (clusterState == null) {
return null;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ public void onNewClusterState(ClusterState newState) {
} else {
listener.onFailure(
new NotMasterException(
"master stepped down waiting for metadata version " + request.waitForMetadataVersion()
"cluster-manager stepped down waiting for metadata version " + request.waitForMetadataVersion()
)
);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ public NodeStats nodeStats() {
}

/**
* Cluster Health Status, only populated on master nodes.
* Cluster Health Status, only populated on cluster-manager nodes.
*/
@Nullable
public ClusterHealthStatus clusterStatus() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ public class ClusterStatsResponse extends BaseNodesResponse<ClusterStatsNodeResp
public ClusterStatsResponse(StreamInput in) throws IOException {
super(in);
timestamp = in.readVLong();
// it may be that the master switched on us while doing the operation. In this case the status may be null.
// it may be that the cluster-manager switched on us while doing the operation. In this case the status may be null.
status = in.readOptionalWriteable(ClusterHealthStatus::readFrom);

String clusterUUID = null;
Expand Down Expand Up @@ -93,7 +93,7 @@ public ClusterStatsResponse(
indicesStats = new ClusterStatsIndices(nodes, MappingStats.of(state), AnalysisStats.of(state));
ClusterHealthStatus status = null;
for (ClusterStatsNodeResponse response : nodes) {
// only the master node populates the status
// only the cluster-manager node populates the status
if (response.clusterStatus() != null) {
status = response.clusterStatus();
break;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,10 +38,10 @@
* <ul>
* <li>A user overflows the index graveyard by deleting more than 500 indices while a node is offline and then the node rejoins the
* cluster</li>
* <li>A node (unsafely) moves from one cluster to another, perhaps because the original cluster lost all its master nodes</li>
* <li>A node (unsafely) moves from one cluster to another, perhaps because the original cluster lost all its cluster-manager nodes</li>
* <li>A user (unsafely) meddles with the contents of the data path, maybe restoring an old index folder from a backup</li>
* <li>A disk partially fails and the user has no replicas and no snapshots and wants to (unsafely) recover whatever they can</li>
* <li>A cluster loses all master nodes and those are (unsafely) restored from backup, but the backup does not contain the index</li>
* <li>A cluster loses all cluster-manager nodes and those are (unsafely) restored from backup, but the backup does not contain the index</li>
* </ul>
*
* <p>The classes in this package form an API for managing dangling indices, allowing them to be listed, imported or deleted.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ enum ItemProcessingState {
TRANSLATED,
/**
* the request can not execute with the current mapping and should wait for a new mapping
* to arrive from the master. A mapping request for the needed changes has already been
* to arrive from the cluster-manager. A mapping request for the needed changes has already been
* submitted
*/
WAIT_FOR_MAPPING_UPDATE,
Expand Down Expand Up @@ -144,7 +144,7 @@ public boolean isOperationExecuted() {
return currentItemState == ItemProcessingState.EXECUTED;
}

/** returns true if the request needs to wait for a mapping update to arrive from the master */
/** returns true if the request needs to wait for a mapping update to arrive from the cluster-manager */
public boolean requiresWaitingForMappingUpdate() {
return currentItemState == ItemProcessingState.WAIT_FOR_MAPPING_UPDATE;
}
Expand Down Expand Up @@ -216,7 +216,7 @@ public <T extends DocWriteRequest<T>> T getRequestToExecute() {
return (T) requestToExecute;
}

/** indicates that the current operation can not be completed and needs to wait for a new mapping from the master */
/** indicates that the current operation can not be completed and needs to wait for a new mapping from the cluster-manager */
public void markAsRequiringMappingUpdate() {
assert assertInvariants(ItemProcessingState.TRANSLATED);
currentItemState = ItemProcessingState.WAIT_FOR_MAPPING_UPDATE;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@
public interface MappingUpdatePerformer {

/**
* Update the mappings on the master.
* Update the mappings on the cluster-manager.
*/
void updateMappings(Mapping update, ShardId shardId, ActionListener<Void> listener);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -621,7 +621,7 @@ private static Engine.Result performOpOnReplica(
throw new IllegalStateException("Unexpected request operation type on replica: " + docWriteRequest.opType().getLowercase());
}
if (result.getResultType() == Engine.Result.Type.MAPPING_UPDATE_REQUIRED) {
// Even though the primary waits on all nodes to ack the mapping changes to the master
// Even though the primary waits on all nodes to ack the mapping changes to the cluster-manager
// (see MappingUpdatedAction.updateMappingOnMaster) we still need to protect against missing mappings
// and wait for them. The reason is concurrent requests. Request r1 which has new field f triggers a
// mapping update. Assume that that update is first applied on the primary, and only later on the replica
Expand Down
Loading

0 comments on commit acbcf76

Please sign in to comment.