Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Backport 2.x] Replace internal usages of 'master' term in 'server/src/test' directory #3444

Merged
merged 1 commit into from
May 26, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -386,7 +386,7 @@ public void onFailure(Exception e) {
);

if (isolatedNode.equals(nonClusterManagerNode)) {
assertNoMaster(nonClusterManagerNode);
assertNoClusterManager(nonClusterManagerNode);
} else {
ensureStableCluster(2, nonClusterManagerNode);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ public void testClusterManagerNodeGCs() throws Exception {

logger.info("waiting for nodes to de-elect cluster-manager [{}]", oldClusterManagerNode);
for (String node : oldNonClusterManagerNodesSet) {
assertDifferentMaster(node, oldClusterManagerNode);
assertDifferentClusterManager(node, oldClusterManagerNode);
}

logger.info("waiting for nodes to elect a new cluster-manager");
Expand All @@ -107,7 +107,7 @@ public void testClusterManagerNodeGCs() throws Exception {
// make sure all nodes agree on cluster-manager
String newClusterManager = internalCluster().getMasterName();
assertThat(newClusterManager, not(equalTo(oldClusterManagerNode)));
assertMaster(newClusterManager, nodes);
assertClusterManager(newClusterManager, nodes);
}

/**
Expand Down Expand Up @@ -137,7 +137,7 @@ public void testIsolateClusterManagerAndVerifyClusterStateConsensus() throws Exc
ensureStableCluster(2, nonIsolatedNode);

// make sure isolated need picks up on things.
assertNoMaster(isolatedNode, TimeValue.timeValueSeconds(40));
assertNoClusterManager(isolatedNode, TimeValue.timeValueSeconds(40));

// restore isolation
networkDisruption.stopDisrupting();
Expand Down Expand Up @@ -227,7 +227,7 @@ public void testVerifyApiBlocksDuringPartition() throws Exception {
// continuously ping until network failures have been resolved. However
// It may a take a bit before the node detects it has been cut off from the elected cluster-manager
logger.info("waiting for isolated node [{}] to have no cluster-manager", isolatedNode);
assertNoMaster(isolatedNode, NoMasterBlockService.NO_MASTER_BLOCK_WRITES, TimeValue.timeValueSeconds(30));
assertNoClusterManager(isolatedNode, NoMasterBlockService.NO_MASTER_BLOCK_WRITES, TimeValue.timeValueSeconds(30));

logger.info("wait until elected cluster-manager has been removed and a new 2 node cluster was from (via [{}])", isolatedNode);
ensureStableCluster(2, nonIsolatedNode);
Expand Down Expand Up @@ -273,7 +273,7 @@ public void testVerifyApiBlocksDuringPartition() throws Exception {
// continuously ping until network failures have been resolved. However
// It may a take a bit before the node detects it has been cut off from the elected cluster-manager
logger.info("waiting for isolated node [{}] to have no cluster-manager", isolatedNode);
assertNoMaster(isolatedNode, NoMasterBlockService.NO_MASTER_BLOCK_ALL, TimeValue.timeValueSeconds(30));
assertNoClusterManager(isolatedNode, NoMasterBlockService.NO_MASTER_BLOCK_ALL, TimeValue.timeValueSeconds(30));

// make sure we have stable cluster & cross partition recoveries are canceled by the removal of the missing node
// the unresponsive partition causes recoveries to only time out after 15m (default) and these will cause
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ public void testClusterJoinDespiteOfPublishingIssues() throws Exception {
);
nonClusterManagerTransportService.addFailToSendNoConnectRule(clusterManagerTranspotService);

assertNoMaster(nonClusterManagerNode);
assertNoClusterManager(nonClusterManagerNode);

logger.info(
"blocking cluster state publishing from cluster-manager [{}] to non cluster-manager [{}]",
Expand Down Expand Up @@ -166,7 +166,7 @@ public void testElectClusterManagerWithLatestVersion() throws Exception {
logger.info("--> forcing a complete election to make sure \"preferred\" cluster-manager is elected");
isolateAllNodes.startDisrupting();
for (String node : nodes) {
assertNoMaster(node);
assertNoClusterManager(node);
}
internalCluster().clearDisruptionScheme();
ensureStableCluster(3);
Expand Down Expand Up @@ -194,7 +194,7 @@ public void testElectClusterManagerWithLatestVersion() throws Exception {
logger.info("--> forcing a complete election again");
isolateAllNodes.startDisrupting();
for (String node : nodes) {
assertNoMaster(node);
assertNoClusterManager(node);
}

isolateAllNodes.stopDisrupting();
Expand Down Expand Up @@ -242,7 +242,7 @@ public void testNodeNotReachableFromClusterManager() throws Exception {
ensureStableCluster(2, clusterManagerNode);

logger.info("waiting for [{}] to have no cluster-manager", nonClusterManagerNode);
assertNoMaster(nonClusterManagerNode);
assertNoClusterManager(nonClusterManagerNode);

logger.info("healing partition and checking cluster reforms");
clusterManagerTransportService.clearAllRules();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -252,7 +252,7 @@ public void testWithdrawsVotesFromNodesMatchingWildcard() throws InterruptedExce
assertWarnings(AddVotingConfigExclusionsRequest.DEPRECATION_MESSAGE);
}

public void testWithdrawsVotesFromAllMasterEligibleNodes() throws InterruptedException {
public void testWithdrawsVotesFromAllClusterManagerEligibleNodes() throws InterruptedException {
final CountDownLatch countDownLatch = new CountDownLatch(2);

clusterStateObserver.waitForNextChange(new AdjustConfigurationForExclusions(countDownLatch));
Expand Down Expand Up @@ -349,14 +349,14 @@ public void testReturnsErrorIfNoMatchingNodeDescriptions() throws InterruptedExc
assertWarnings(AddVotingConfigExclusionsRequest.DEPRECATION_MESSAGE);
}

public void testOnlyMatchesMasterEligibleNodes() throws InterruptedException {
public void testOnlyMatchesClusterManagerEligibleNodes() throws InterruptedException {
final CountDownLatch countDownLatch = new CountDownLatch(1);
final SetOnce<TransportException> exceptionHolder = new SetOnce<>();

transportService.sendRequest(
localNode,
AddVotingConfigExclusionsAction.NAME,
makeRequestWithNodeDescriptions("_all", "master:false"),
makeRequestWithNodeDescriptions("_all", "cluster_manager:false"),
expectError(e -> {
exceptionHolder.set(e);
countDownLatch.countDown();
Expand All @@ -368,7 +368,7 @@ public void testOnlyMatchesMasterEligibleNodes() throws InterruptedException {
assertThat(rootCause, instanceOf(IllegalArgumentException.class));
assertThat(
rootCause.getMessage(),
equalTo("add voting config exclusions request for [_all, master:false] matched no cluster-manager-eligible nodes")
equalTo("add voting config exclusions request for [_all, cluster_manager:false] matched no cluster-manager-eligible nodes")
);
assertWarnings(AddVotingConfigExclusionsRequest.DEPRECATION_MESSAGE);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -112,9 +112,9 @@ public void testClusterHealth() throws IOException {
assertThat(clusterHealth.getActiveShardsPercent(), is(allOf(greaterThanOrEqualTo(0.0), lessThanOrEqualTo(100.0))));
}

public void testClusterHealthVerifyMasterNodeDiscovery() throws IOException {
public void testClusterHealthVerifyClusterManagerNodeDiscovery() throws IOException {
DiscoveryNode localNode = new DiscoveryNode("node", OpenSearchTestCase.buildNewFakeTransportAddress(), Version.CURRENT);
// set the node information to verify master_node discovery in ClusterHealthResponse
// set the node information to verify cluster_manager_node discovery in ClusterHealthResponse
ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY))
.nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).masterNodeId(localNode.getId()))
.build();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -474,12 +474,12 @@ public void onFailure(Exception e) {
for (int i = 1; i < testNodes.length; i++) {
discoveryNodes[i - 1] = testNodes[i].discoveryNode();
}
DiscoveryNode master = discoveryNodes[0];
DiscoveryNode clusterManager = discoveryNodes[0];
for (int i = 1; i < testNodes.length; i++) {
// Notify only nodes that should remain in the cluster
setState(
testNodes[i].clusterService,
ClusterStateCreationUtils.state(testNodes[i].discoveryNode(), master, discoveryNodes)
ClusterStateCreationUtils.state(testNodes[i].discoveryNode(), clusterManager, discoveryNodes)
);
}
if (randomBoolean()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -257,9 +257,9 @@ public static void connectNodes(TestNode... nodes) {
for (int i = 0; i < nodes.length; i++) {
discoveryNodes[i] = nodes[i].discoveryNode();
}
DiscoveryNode master = discoveryNodes[0];
DiscoveryNode clusterManager = discoveryNodes[0];
for (TestNode node : nodes) {
setState(node.clusterService, ClusterStateCreationUtils.state(node.discoveryNode(), master, discoveryNodes));
setState(node.clusterService, ClusterStateCreationUtils.state(node.discoveryNode(), clusterManager, discoveryNodes));
}
for (TestNode nodeA : nodes) {
for (TestNode nodeB : nodes) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ public void testEqualsAndHashCode() {
assertEquals(request, copy);
assertEquals(request.hashCode(), copy.hashCode());

// Changing masterNodeTime makes requests not equal
// Changing clusterManagerNodeTimeout makes requests not equal
copy.masterNodeTimeout(timeValueMillis(request.masterNodeTimeout().millis() + 1));
assertNotEquals(request, copy);
assertNotEquals(request.hashCode(), copy.hashCode());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,7 @@ public void testDefaultMaxConcurrentSearches() {
}
builder.add(
new DiscoveryNode(
"master",
"cluster_manager",
buildNewFakeTransportAddress(),
Collections.emptyMap(),
Collections.singleton(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -366,29 +366,29 @@ public void testOneRequestIsSentToEachNodeHoldingAShard() {
}
}

// simulate the master being removed from the cluster but before a new master is elected
// as such, the shards assigned to the master will still show up in the cluster state as assigned to a node but
// that node will not be in the local cluster state on any node that has detected the master as failing
// simulate the cluster-manager being removed from the cluster but before a new cluster-manager is elected
// as such, the shards assigned to the cluster-manager will still show up in the cluster state as assigned to a node but
// that node will not be in the local cluster state on any node that has detected the cluster-manager as failing
// in this case, such a shard should be treated as unassigned
public void testRequestsAreNotSentToFailedMaster() {
public void testRequestsAreNotSentToFailedClusterManager() {
Request request = new Request(new String[] { TEST_INDEX });
PlainActionFuture<Response> listener = new PlainActionFuture<>();

DiscoveryNode masterNode = clusterService.state().nodes().getMasterNode();
DiscoveryNode clusterManagerNode = clusterService.state().nodes().getMasterNode();
DiscoveryNodes.Builder builder = DiscoveryNodes.builder(clusterService.state().getNodes());
builder.remove(masterNode.getId());
builder.remove(clusterManagerNode.getId());

setState(clusterService, ClusterState.builder(clusterService.state()).nodes(builder));

action.new AsyncAction(null, request, listener).start();

Map<String, List<CapturingTransport.CapturedRequest>> capturedRequests = transport.getCapturedRequestsByTargetNodeAndClear();

// the master should not be in the list of nodes that requests were sent to
// the cluster manager should not be in the list of nodes that requests were sent to
ShardsIterator shardIt = clusterService.state().routingTable().allShards(new String[] { TEST_INDEX });
Set<String> set = new HashSet<>();
for (ShardRouting shard : shardIt) {
if (!shard.currentNodeId().equals(masterNode.getId())) {
if (!shard.currentNodeId().equals(clusterManagerNode.getId())) {
set.add(shard.currentNodeId());
}
}
Expand All @@ -399,7 +399,7 @@ public void testRequestsAreNotSentToFailedMaster() {
// check requests were sent to the right nodes
assertEquals(set, capturedRequests.keySet());
for (Map.Entry<String, List<CapturingTransport.CapturedRequest>> entry : capturedRequests.entrySet()) {
// check one request was sent to each non-master node
// check one request was sent to each non-cluster-manager node
assertEquals(1, entry.getValue().size());
}
}
Expand Down Expand Up @@ -456,13 +456,13 @@ public void testResultAggregation() throws ExecutionException, InterruptedExcept
Request request = new Request(new String[] { TEST_INDEX });
PlainActionFuture<Response> listener = new PlainActionFuture<>();

// simulate removing the master
final boolean simulateFailedMasterNode = rarely();
DiscoveryNode failedMasterNode = null;
if (simulateFailedMasterNode) {
failedMasterNode = clusterService.state().nodes().getMasterNode();
// simulate removing the cluster-manager
final boolean simulateFailedClusterManagerNode = rarely();
DiscoveryNode failedClusterManagerNode = null;
if (simulateFailedClusterManagerNode) {
failedClusterManagerNode = clusterService.state().nodes().getMasterNode();
DiscoveryNodes.Builder builder = DiscoveryNodes.builder(clusterService.state().getNodes());
builder.remove(failedMasterNode.getId());
builder.remove(failedClusterManagerNode.getId());
builder.masterNodeId(null);

setState(clusterService, ClusterState.builder(clusterService.state()).nodes(builder));
Expand Down Expand Up @@ -511,8 +511,8 @@ public void testResultAggregation() throws ExecutionException, InterruptedExcept
transport.handleResponse(requestId, nodeResponse);
}
}
if (simulateFailedMasterNode) {
totalShards += map.get(failedMasterNode.getId()).size();
if (simulateFailedClusterManagerNode) {
totalShards += map.get(failedClusterManagerNode.getId()).size();
}

Response response = listener.get();
Expand Down
Loading