diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/VersionCollection.java b/buildSrc/src/main/java/org/elasticsearch/gradle/VersionCollection.java index e2af34dbabdc0..cf8eb1829abef 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/VersionCollection.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/VersionCollection.java @@ -171,30 +171,38 @@ public UnreleasedVersionInfo unreleasedInfo(Version version) { } public void forPreviousUnreleased(Consumer consumer) { - getUnreleased().stream() + List collect = getUnreleased().stream() .filter(version -> version.equals(currentVersion) == false) - .forEach(version -> consumer.accept( - new UnreleasedVersionInfo( + .map(version -> new UnreleasedVersionInfo( version, getBranchFor(version), getGradleProjectNameFor(version) ) - )); + ) + .collect(Collectors.toList()); + + collect.forEach(uvi -> consumer.accept(uvi)); } private String getGradleProjectNameFor(Version version) { if (version.equals(currentVersion)) { throw new IllegalArgumentException("The Gradle project to build " + version + " is the current build."); } + Map> releasedMajorGroupedByMinor = getReleasedMajorGroupedByMinor(); if (version.getRevision() == 0) { - if (releasedMajorGroupedByMinor - .get(releasedMajorGroupedByMinor.keySet().stream().max(Integer::compareTo).orElse(0)) - .contains(version)) { - return "minor"; + List unreleasedStagedOrMinor = getUnreleased().stream() + .filter(v -> v.getRevision() == 0) + .collect(Collectors.toList()); + if (unreleasedStagedOrMinor.size() > 2) { + if (unreleasedStagedOrMinor.get(unreleasedStagedOrMinor.size() - 2).equals(version)) { + return "minor"; + } else{ + return "staged"; + } } else { - return "staged"; + return "minor"; } } else { if (releasedMajorGroupedByMinor @@ -239,8 +247,10 @@ public List getUnreleased() { unreleased.add(getLatestVersionByKey(groupByMinor, greatestMinor - 1)); if (groupByMinor.getOrDefault(greatestMinor - 1, emptyList()).size() == 1) { // we found that the previous minor is staged but not yet released - // in this case, the minor before that has a bugfix - unreleased.add(getLatestVersionByKey(groupByMinor, greatestMinor - 2)); + // in this case, the minor before that has a bugfix, should there be such a minor + if (greatestMinor >= 2) { + unreleased.add(getLatestVersionByKey(groupByMinor, greatestMinor - 2)); + } } } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/VersionCollectionTests.java b/buildSrc/src/test/java/org/elasticsearch/gradle/VersionCollectionTests.java index d1b4e893ec6ad..4ea33b240e532 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/VersionCollectionTests.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/VersionCollectionTests.java @@ -81,6 +81,9 @@ public class VersionCollectionTests extends GradleUnitTestCase { "6_0_0", "6_0_1", "6_1_0", "6_1_1", "6_1_2", "6_1_3", "6_1_4", "6_2_0", "6_2_1", "6_2_2", "6_2_3", "6_2_4", "6_3_0", "6_3_1", "6_3_2", "6_4_0", "6_4_1", "6_4_2" )); + sampleVersions.put("7.1.0", asList( + "7_1_0", "7_0_0", "6_7_0", "6_6_1", "6_6_0" + )); } @Test(expected = IllegalArgumentException.class) @@ -145,6 +148,11 @@ public void testWireCompatible() { singletonList("7.3.0"), getVersionCollection("8.0.0").getWireCompatible() ); + assertVersionsEquals( + asList("6.7.0", "7.0.0"), + getVersionCollection("7.1.0").getWireCompatible() + ); + } public void testWireCompatibleUnreleased() { @@ -171,6 +179,10 @@ public void testWireCompatibleUnreleased() { singletonList("7.3.0"), getVersionCollection("8.0.0").getUnreleasedWireCompatible() ); + assertVersionsEquals( + asList("6.7.0", "7.0.0"), + getVersionCollection("7.1.0").getWireCompatible() + ); } public void testIndexCompatible() { @@ -286,7 +298,7 @@ public void testGetBranch() { getVersionCollection("6.4.2") ); assertUnreleasedBranchNames( - asList("5.6", "6.4", "6.5"), + asList("5.6", "6.4", "6.x"), getVersionCollection("6.6.0") ); assertUnreleasedBranchNames( @@ -309,13 +321,17 @@ public void testGetGradleProjectName() { getVersionCollection("6.4.2") ); assertUnreleasedGradleProjectNames( - asList("maintenance", "bugfix", "staged"), + asList("maintenance", "bugfix", "minor"), getVersionCollection("6.6.0") ); assertUnreleasedGradleProjectNames( asList("bugfix", "staged", "minor"), getVersionCollection("8.0.0") ); + assertUnreleasedGradleProjectNames( + asList("staged", "minor"), + getVersionCollection("7.1.0") + ); } public void testCompareToAuthoritative() { diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 118ab2f905f74..e6aadf6b82055 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,4 +1,4 @@ -elasticsearch = 7.0.0 +elasticsearch = 8.0.0 lucene = 8.0.0-snapshot-83f9835 # optional dependencies diff --git a/distribution/bwc/build.gradle b/distribution/bwc/build.gradle index 0ee597b449a41..ff111ecaa08ca 100644 --- a/distribution/bwc/build.gradle +++ b/distribution/bwc/build.gradle @@ -141,6 +141,12 @@ bwcVersions.forPreviousUnreleased { VersionCollection.UnreleasedVersionInfo unre extension += '.gz' } } + if (bwcVersion.onOrAfter('7.0.0') && projectName.contains('deb')) { + classifier = "-amd64" + } + if (bwcVersion.onOrAfter('7.0.0') && projectName.contains('rpm')) { + classifier = "-x86_64" + } if (bwcVersion.onOrAfter('6.3.0')) { baseDir += projectName.endsWith('zip') || projectName.endsWith('tar') ? '/archives' : '/packages' // add oss variant first diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java index c62d37a4e28d8..415be8f2bbd19 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java @@ -137,12 +137,21 @@ public void testBasic() throws Exception { } public void testRemoveOldVersion() throws Exception { + Version previous = VersionUtils.getPreviousVersion(); + if (previous.before(Version.CURRENT.minimumIndexCompatibilityVersion()) ) { + // Can happen when bumping majors: 8.0 is only compat back to 7.0, but that's not released yet + // In this case, ignore what's released and just find that latest version before current + previous = VersionUtils.allVersions().stream() + .filter(v -> v.before(Version.CURRENT)) + .max(Version::compareTo) + .get(); + } createPlugin( "fake", VersionUtils.randomVersionBetween( random(), Version.CURRENT.minimumIndexCompatibilityVersion(), - VersionUtils.getPreviousVersion())); + previous)); removePlugin("fake", home, randomBoolean()); assertThat(Files.exists(env.pluginsFile().resolve("fake")), equalTo(false)); assertRemoveCleaned(env); diff --git a/docs/Versions.asciidoc b/docs/Versions.asciidoc index 8a446bf037a12..89d0be877b839 100644 --- a/docs/Versions.asciidoc +++ b/docs/Versions.asciidoc @@ -1,5 +1,5 @@ -:version: 7.0.0-alpha2 -:major-version: 7.x +:version: 8.0.0-alpha1 +:major-version: 8.x :lucene_version: 8.0.0 :lucene_version_path: 8_0_0 :branch: master diff --git a/docs/reference/ml/apis/put-datafeed.asciidoc b/docs/reference/ml/apis/put-datafeed.asciidoc index 18c611e97cac1..05e02ce3615df 100644 --- a/docs/reference/ml/apis/put-datafeed.asciidoc +++ b/docs/reference/ml/apis/put-datafeed.asciidoc @@ -19,6 +19,11 @@ Instantiates a {dfeed}. You must create a job before you create a {dfeed}. You can associate only one {dfeed} to each job. +IMPORTANT: You must use {kib} or this API to create a {dfeed}. Do not put a {dfeed} + directly to the `.ml-config` index using the Elasticsearch index API. + If {es} {security-features} are enabled, do not give users `write` + privileges on the `.ml-config` index. + ==== Path Parameters diff --git a/docs/reference/ml/apis/put-job.asciidoc b/docs/reference/ml/apis/put-job.asciidoc index 4abeebee3e47a..e3d80c276dc55 100644 --- a/docs/reference/ml/apis/put-job.asciidoc +++ b/docs/reference/ml/apis/put-job.asciidoc @@ -12,7 +12,13 @@ Instantiates a job. `PUT _ml/anomaly_detectors/` -//===== Description +===== Description + +IMPORTANT: You must use {kib} or this API to create a {ml} job. Do not put a job + directly to the `.ml-config` index using the Elasticsearch index API. + If {es} {security-features} are enabled, do not give users `write` + privileges on the `.ml-config` index. + ==== Path Parameters diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 9740ff4222d7e..c6713a2907e33 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -38,6 +38,7 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.rest.action.document.RestBulkAction; import org.elasticsearch.rest.action.document.RestGetAction; +import org.elasticsearch.rest.action.document.RestIndexAction; import org.elasticsearch.rest.action.document.RestUpdateAction; import org.elasticsearch.rest.action.search.RestExplainAction; import org.elasticsearch.test.NotEqualMessageBuilder; @@ -80,15 +81,20 @@ */ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase { private final boolean supportsLenientBooleans = getOldClusterVersion().before(Version.V_6_0_0_alpha1); - private static final Version VERSION_5_1_0_UNRELEASED = Version.fromString("5.1.0"); private String index; + private String type; @Before public void setIndex() throws IOException { index = getTestName().toLowerCase(Locale.ROOT); } + @Before + public void setType() { + type = getOldClusterVersion().before(Version.V_6_7_0) ? "doc" : "_doc"; + } + public void testSearch() throws Exception { int count; if (isRunningAgainstOldCluster()) { @@ -102,7 +108,9 @@ public void testSearch() throws Exception { } { mappingsAndSettings.startObject("mappings"); - mappingsAndSettings.startObject("doc"); + if (isRunningAgainstAncientCluster()) { + mappingsAndSettings.startObject(type); + } mappingsAndSettings.startObject("properties"); { mappingsAndSettings.startObject("string"); @@ -121,7 +129,9 @@ public void testSearch() throws Exception { mappingsAndSettings.endObject(); } mappingsAndSettings.endObject(); - mappingsAndSettings.endObject(); + if (isRunningAgainstAncientCluster()) { + mappingsAndSettings.endObject(); + } mappingsAndSettings.endObject(); } mappingsAndSettings.endObject(); @@ -135,17 +145,20 @@ public void testSearch() throws Exception { count = randomIntBetween(2000, 3000); byte[] randomByteArray = new byte[16]; random().nextBytes(randomByteArray); - indexRandomDocuments(count, true, true, i -> { - return JsonXContent.contentBuilder().startObject() - .field("string", randomAlphaOfLength(10)) - .field("int", randomInt(100)) - .field("float", randomFloat()) - // be sure to create a "proper" boolean (True, False) for the first document so that automapping is correct - .field("bool", i > 0 && supportsLenientBooleans ? randomLenientBoolean() : randomBoolean()) - .field("field.with.dots", randomAlphaOfLength(10)) - .field("binary", Base64.getEncoder().encodeToString(randomByteArray)) - .endObject(); - }); + indexRandomDocuments( + count, + true, + true, + i -> JsonXContent.contentBuilder().startObject() + .field("string", randomAlphaOfLength(10)) + .field("int", randomInt(100)) + .field("float", randomFloat()) + // be sure to create a "proper" boolean (True, False) for the first document so that automapping is correct + .field("bool", i > 0 && supportsLenientBooleans ? randomLenientBoolean() : randomBoolean()) + .field("field.with.dots", randomAlphaOfLength(10)) + .field("binary", Base64.getEncoder().encodeToString(randomByteArray)) + .endObject() + ); refresh(); } else { count = countOfIndexedRandomDocuments(); @@ -155,7 +168,7 @@ public void testSearch() throws Exception { assertBasicSearchWorks(count); assertAllSearchWorks(count); assertBasicAggregationWorks(); - assertRealtimeGetWorks(); + assertRealtimeGetWorks(type); assertStoredBinaryFields(count); } @@ -171,7 +184,9 @@ public void testNewReplicasWork() throws Exception { } { mappingsAndSettings.startObject("mappings"); - mappingsAndSettings.startObject("doc"); + if (isRunningAgainstAncientCluster()) { + mappingsAndSettings.startObject(type); + } mappingsAndSettings.startObject("properties"); { mappingsAndSettings.startObject("field"); @@ -179,7 +194,9 @@ public void testNewReplicasWork() throws Exception { mappingsAndSettings.endObject(); } mappingsAndSettings.endObject(); - mappingsAndSettings.endObject(); + if (isRunningAgainstAncientCluster()) { + mappingsAndSettings.endObject(); + } mappingsAndSettings.endObject(); } mappingsAndSettings.endObject(); @@ -191,11 +208,8 @@ public void testNewReplicasWork() throws Exception { client().performRequest(createIndex); int numDocs = randomIntBetween(2000, 3000); - indexRandomDocuments(numDocs, true, false, i -> { - return JsonXContent.contentBuilder().startObject() - .field("field", "value") - .endObject(); - }); + indexRandomDocuments( + numDocs, true, false, i -> JsonXContent.contentBuilder().startObject().field("field", "value").endObject()); logger.info("Refreshing [{}]", index); client().performRequest(new Request("POST", "/" + index + "/_refresh")); } else { @@ -225,76 +239,6 @@ public void testNewReplicasWork() throws Exception { } } - /** - * Search on an alias that contains illegal characters that would prevent it from being created after 5.1.0. It should still be - * search-able though. - */ - public void testAliasWithBadName() throws Exception { - assumeTrue("Can only test bad alias name if old cluster is on 5.1.0 or before", - getOldClusterVersion().before(VERSION_5_1_0_UNRELEASED)); - - int count; - if (isRunningAgainstOldCluster()) { - XContentBuilder mappingsAndSettings = jsonBuilder(); - mappingsAndSettings.startObject(); - { - mappingsAndSettings.startObject("settings"); - mappingsAndSettings.field("number_of_shards", 1); - mappingsAndSettings.field("number_of_replicas", 0); - mappingsAndSettings.endObject(); - } - { - mappingsAndSettings.startObject("mappings"); - mappingsAndSettings.startObject("doc"); - mappingsAndSettings.startObject("properties"); - { - mappingsAndSettings.startObject("key"); - mappingsAndSettings.field("type", "keyword"); - mappingsAndSettings.endObject(); - } - mappingsAndSettings.endObject(); - mappingsAndSettings.endObject(); - mappingsAndSettings.endObject(); - } - mappingsAndSettings.endObject(); - Request createIndex = new Request("PUT", "/" + index); - createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); - client().performRequest(createIndex); - - String aliasName = "%23" + index; // %23 == # - client().performRequest(new Request("PUT", "/" + index + "/_alias/" + aliasName)); - Response response = client().performRequest(new Request("HEAD", "/" + index + "/_alias/" + aliasName)); - assertEquals(200, response.getStatusLine().getStatusCode()); - - count = randomIntBetween(32, 128); - indexRandomDocuments(count, true, true, i -> { - return JsonXContent.contentBuilder().startObject() - .field("key", "value") - .endObject(); - }); - refresh(); - } else { - count = countOfIndexedRandomDocuments(); - } - - Request request = new Request("GET", "/_cluster/state"); - request.addParameter("metric", "metadata"); - logger.error("clusterState=" + entityAsMap(client().performRequest(request))); - // We can read from the alias just like we can read from the index. - String aliasName = "%23" + index; // %23 == # - Map searchRsp = entityAsMap(client().performRequest(new Request("GET", "/" + aliasName + "/_search"))); - int totalHits = extractTotalHits(searchRsp); - assertEquals(count, totalHits); - if (isRunningAgainstOldCluster() == false) { - // We can remove the alias. - Response response = client().performRequest(new Request("DELETE", "/" + index + "/_alias/" + aliasName)); - assertEquals(200, response.getStatusLine().getStatusCode()); - // and check that it is gone: - response = client().performRequest(new Request("HEAD", "/" + index + "/_alias/" + aliasName)); - assertEquals(404, response.getStatusLine().getStatusCode()); - } - } - public void testClusterState() throws Exception { if (isRunningAgainstOldCluster()) { XContentBuilder mappingsAndSettings = jsonBuilder(); @@ -348,31 +292,45 @@ public void testShrink() throws IOException { mappingsAndSettings.startObject(); { mappingsAndSettings.startObject("mappings"); - mappingsAndSettings.startObject("doc"); - mappingsAndSettings.startObject("properties"); { - mappingsAndSettings.startObject("field"); - mappingsAndSettings.field("type", "text"); + if (isRunningAgainstAncientCluster()) { + mappingsAndSettings.startObject(type); + } + mappingsAndSettings.startObject("properties"); + { + mappingsAndSettings.startObject("field"); + { + mappingsAndSettings.field("type", "text"); + } + mappingsAndSettings.endObject(); + } mappingsAndSettings.endObject(); + if (isRunningAgainstAncientCluster()) { + mappingsAndSettings.endObject(); + } } mappingsAndSettings.endObject(); - mappingsAndSettings.endObject(); - mappingsAndSettings.endObject(); + if (isRunningAgainstAncientCluster() == false) { + // the default number of shards is now one so we have to set the number of shards to be more than one explicitly + mappingsAndSettings.startObject("settings"); + { + mappingsAndSettings.field("index.number_of_shards", 5); + } + mappingsAndSettings.endObject(); + } } mappingsAndSettings.endObject(); Request createIndex = new Request("PUT", "/" + index); createIndex.setJsonEntity(Strings.toString(mappingsAndSettings)); RequestOptions.Builder options = createIndex.getOptions().toBuilder(); options.setWarningsHandler(WarningsHandler.PERMISSIVE); + expectWarnings(RestIndexAction.TYPES_DEPRECATION_MESSAGE); createIndex.setOptions(options); client().performRequest(createIndex); numDocs = randomIntBetween(512, 1024); - indexRandomDocuments(numDocs, true, true, i -> { - return JsonXContent.contentBuilder().startObject() - .field("field", "value") - .endObject(); - }); + indexRandomDocuments( + numDocs, true, true, i -> JsonXContent.contentBuilder().startObject().field("field", "value").endObject()); ensureGreen(index); // wait for source index to be available on both nodes before starting shrink @@ -381,9 +339,7 @@ public void testShrink() throws IOException { client().performRequest(updateSettingsRequest); Request shrinkIndexRequest = new Request("PUT", "/" + index + "/_shrink/" + shrunkenIndex); - if (getOldClusterVersion().onOrAfter(Version.V_6_4_0)) { - shrinkIndexRequest.addParameter("copy_settings", "true"); - } + shrinkIndexRequest.setJsonEntity("{\"settings\": {\"index.number_of_shards\": 1}}"); client().performRequest(shrinkIndexRequest); @@ -419,16 +375,30 @@ public void testShrinkAfterUpgrade() throws IOException { mappingsAndSettings.startObject(); { mappingsAndSettings.startObject("mappings"); - mappingsAndSettings.startObject("doc"); - mappingsAndSettings.startObject("properties"); { - mappingsAndSettings.startObject("field"); - mappingsAndSettings.field("type", "text"); + if (isRunningAgainstAncientCluster()) { + mappingsAndSettings.startObject(type); + } + mappingsAndSettings.startObject("properties"); + { + mappingsAndSettings.startObject("field"); + { + mappingsAndSettings.field("type", "text"); + } + mappingsAndSettings.endObject(); + } mappingsAndSettings.endObject(); + if (isRunningAgainstAncientCluster()) { + mappingsAndSettings.endObject(); + } } mappingsAndSettings.endObject(); - mappingsAndSettings.endObject(); - mappingsAndSettings.endObject(); + if (isRunningAgainstAncientCluster() == false) { + // the default number of shards is now one so we have to set the number of shards to be more than one explicitly + mappingsAndSettings.startObject("settings"); + mappingsAndSettings.field("index.number_of_shards", 5); + mappingsAndSettings.endObject(); + } } mappingsAndSettings.endObject(); Request createIndex = new Request("PUT", "/" + index); @@ -439,11 +409,12 @@ public void testShrinkAfterUpgrade() throws IOException { client().performRequest(createIndex); numDocs = randomIntBetween(512, 1024); - indexRandomDocuments(numDocs, true, true, i -> { - return JsonXContent.contentBuilder().startObject() - .field("field", "value") - .endObject(); - }); + indexRandomDocuments( + numDocs, + true, + true, + i -> JsonXContent.contentBuilder().startObject().field("field", "value").endObject() + ); } else { ensureGreen(index); // wait for source index to be available on both nodes before starting shrink @@ -510,7 +481,7 @@ public void testRollover() throws IOException { bulk.append("{\"index\":{}}\n"); bulk.append("{\"test\":\"test\"}\n"); } - Request bulkRequest = new Request("POST", "/" + index + "_write/doc/_bulk"); + Request bulkRequest = new Request("POST", "/" + index + "_write/" + type + "/_bulk"); bulkRequest.setJsonEntity(bulk.toString()); bulkRequest.addParameter("refresh", ""); bulkRequest.setOptions(expectWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE)); @@ -633,7 +604,7 @@ void assertBasicAggregationWorks() throws IOException { assertTotalHits(termsCount, boolTerms); } - void assertRealtimeGetWorks() throws IOException { + void assertRealtimeGetWorks(final String typeName) throws IOException { Request disableAutoRefresh = new Request("PUT", "/" + index + "/_settings"); disableAutoRefresh.setJsonEntity("{ \"index\": { \"refresh_interval\" : -1 }}"); client().performRequest(disableAutoRefresh); @@ -644,13 +615,15 @@ void assertRealtimeGetWorks() throws IOException { Map hit = (Map) ((List)(XContentMapValues.extractValue("hits.hits", searchResponse))).get(0); String docId = (String) hit.get("_id"); - Request updateRequest = new Request("POST", "/" + index + "/doc/" + docId + "/_update"); + Request updateRequest = new Request("POST", "/" + index + "/" + typeName + "/" + docId + "/_update"); updateRequest.setOptions(expectWarnings(RestUpdateAction.TYPES_DEPRECATION_MESSAGE)); updateRequest.setJsonEntity("{ \"doc\" : { \"foo\": \"bar\"}}"); client().performRequest(updateRequest); - Request getRequest = new Request("GET", "/" + index + "/doc/" + docId); - getRequest.setOptions(expectWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE)); + Request getRequest = new Request("GET", "/" + index + "/" + typeName + "/" + docId); + if (getOldClusterVersion().before(Version.V_6_7_0)) { + getRequest.setOptions(expectWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE)); + } Map getRsp = entityAsMap(client().performRequest(getRequest)); Map source = (Map) getRsp.get("_source"); assertTrue("doc does not contain 'foo' key: " + source, source.containsKey("foo")); @@ -689,7 +662,7 @@ static void assertNoFailures(Map response) { void assertTotalHits(int expectedTotalHits, Map response) { int actualTotalHits = extractTotalHits(response); - assertEquals(expectedTotalHits, actualTotalHits); + assertEquals(response.toString(), expectedTotalHits, actualTotalHits); } int extractTotalHits(Map response) { @@ -704,7 +677,7 @@ int extractTotalHits(Map response) { * Tests that a single document survives. Super basic smoke test. */ public void testSingleDoc() throws IOException { - String docLocation = "/" + index + "/doc/1"; + String docLocation = "/" + index + "/" + type + "/1"; String doc = "{\"test\": \"test\"}"; if (isRunningAgainstOldCluster()) { @@ -715,7 +688,9 @@ public void testSingleDoc() throws IOException { Request request = new Request("GET", docLocation); - request.setOptions(expectWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE)); + if (getOldClusterVersion().before(Version.V_6_7_0)) { + request.setOptions(expectWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE)); + } assertThat(toStr(client().performRequest(request)), containsString(doc)); } @@ -779,8 +754,12 @@ public void testRecovery() throws Exception { } if (shouldHaveTranslog) { // Update a few documents so we are sure to have a translog - indexRandomDocuments(count / 10, false /* Flushing here would invalidate the whole thing....*/, false, - i -> jsonBuilder().startObject().field("field", "value").endObject()); + indexRandomDocuments( + count / 10, + false, // flushing here would invalidate the whole thing + false, + i -> jsonBuilder().startObject().field("field", "value").endObject() + ); } saveInfoDocument("should_have_translog", Boolean.toString(shouldHaveTranslog)); } else { @@ -791,6 +770,7 @@ public void testRecovery() throws Exception { // Count the documents in the index to make sure we have as many as we put there Request countRequest = new Request("GET", "/" + index + "/_search"); countRequest.addParameter("size", "0"); + refresh(); Map countResponse = entityAsMap(client().performRequest(countRequest)); assertTotalHits(count, countResponse); @@ -863,7 +843,7 @@ public void testRecovery() throws Exception { */ public void testSnapshotRestore() throws IOException { int count; - if (isRunningAgainstOldCluster()) { + if (isRunningAgainstOldCluster() && getOldClusterVersion().major < 8) { // Create the index count = between(200, 300); indexRandomDocuments(count, true, true, i -> jsonBuilder().startObject().field("field", "value").endObject()); @@ -894,13 +874,19 @@ public void testSnapshotRestore() throws IOException { } templateBuilder.endObject(); templateBuilder.startObject("mappings"); { - templateBuilder.startObject("doc"); { - templateBuilder.startObject("_source"); { + if (isRunningAgainstAncientCluster()) { + templateBuilder.startObject(type); + } + { + templateBuilder.startObject("_source"); + { templateBuilder.field("enabled", true); } templateBuilder.endObject(); } - templateBuilder.endObject(); + if (isRunningAgainstAncientCluster()) { + templateBuilder.endObject(); + } } templateBuilder.endObject(); templateBuilder.startObject("aliases"); { @@ -921,8 +907,8 @@ public void testSnapshotRestore() throws IOException { createTemplateRequest.setJsonEntity(Strings.toString(templateBuilder)); // In 7.0, type names are no longer expected by default in put index template requests. - // We therefore use the deprecated typed APIs when running against the current version. - if (isRunningAgainstOldCluster() == false) { + // We therefore use the deprecated typed APIs when running against the current version, but testing with a pre-7 version + if (isRunningAgainstOldCluster() == false && getOldClusterVersion().major < 7) { createTemplateRequest.addParameter(INCLUDE_TYPE_NAME_PARAMETER, "true"); } createTemplateRequest.setOptions(allowTypeRemovalWarnings()); @@ -1016,12 +1002,13 @@ public void testSoftDeletes() throws Exception { int numDocs = between(10, 100); for (int i = 0; i < numDocs; i++) { String doc = Strings.toString(JsonXContent.contentBuilder().startObject().field("field", "v1").endObject()); - Request request = new Request("POST", "/" + index + "/doc/" + i); + Request request = new Request("POST", "/" + index + "/" + type + "/" + i); + if (isRunningAgainstAncientCluster() == false) { + request.setOptions(expectWarnings(RestIndexAction.TYPES_DEPRECATION_MESSAGE)); + } request.setJsonEntity(doc); client().performRequest(request); - if (rarely()) { - refresh(); - } + refresh(); } client().performRequest(new Request("POST", "/" + index + "/_flush")); int liveDocs = numDocs; @@ -1029,11 +1016,11 @@ public void testSoftDeletes() throws Exception { for (int i = 0; i < numDocs; i++) { if (randomBoolean()) { String doc = Strings.toString(JsonXContent.contentBuilder().startObject().field("field", "v2").endObject()); - Request request = new Request("POST", "/" + index + "/doc/" + i); + Request request = new Request("POST", "/" + index + "/" + type + "/" + i); request.setJsonEntity(doc); client().performRequest(request); } else if (randomBoolean()) { - client().performRequest(new Request("DELETE", "/" + index + "/doc/" + i)); + client().performRequest(new Request("DELETE", "/" + index + "/" + type + "/" + i)); liveDocs--; } } @@ -1046,7 +1033,7 @@ public void testSoftDeletes() throws Exception { } } - private void checkSnapshot(String snapshotName, int count, Version tookOnVersion) throws IOException { + private void checkSnapshot(final String snapshotName, final int count, final Version tookOnVersion) throws IOException { // Check the snapshot metadata, especially the version Request listSnapshotRequest = new Request("GET", "/_snapshot/repo/" + snapshotName); Map listSnapshotResponse = entityAsMap(client().performRequest(listSnapshotRequest)); @@ -1103,7 +1090,7 @@ && getOldClusterVersion().onOrAfter(Version.V_6_1_0) && getOldClusterVersion().b bulk.append("{\"index\":{\"_id\":\"").append(count + i).append("\"}}\n"); bulk.append("{\"test\":\"test\"}\n"); } - Request writeToRestoredRequest = new Request("POST", "/restored_" + index + "/doc/_bulk"); + Request writeToRestoredRequest = new Request("POST", "/restored_" + index + "/" + type + "/_bulk"); writeToRestoredRequest.addParameter("refresh", "true"); writeToRestoredRequest.setJsonEntity(bulk.toString()); writeToRestoredRequest.setOptions(expectWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE)); @@ -1132,7 +1119,7 @@ && getOldClusterVersion().onOrAfter(Version.V_6_1_0) && getOldClusterVersion().b // In 7.0, type names are no longer returned by default in get index template requests. // We therefore use the deprecated typed APIs when running against the current version. - if (isRunningAgainstOldCluster() == false) { + if (isRunningAgainstAncientCluster() == false) { getTemplateRequest.addParameter(INCLUDE_TYPE_NAME_PARAMETER, "true"); } getTemplateRequest.setOptions(allowTypeRemovalWarnings()); @@ -1145,7 +1132,14 @@ && getOldClusterVersion().onOrAfter(Version.V_6_1_0) && getOldClusterVersion().b expectedTemplate.put("index_patterns", singletonList("evil_*")); } expectedTemplate.put("settings", singletonMap("index", singletonMap("number_of_shards", "1"))); - expectedTemplate.put("mappings", singletonMap("doc", singletonMap("_source", singletonMap("enabled", true)))); + // We don't have the type in the response starting with 7.0, but we won't have it on old cluster after upgrade + // either so look at the response to figure out the correct assertions + if (isTypeInTemplateResponse(getTemplateResponse)) { + expectedTemplate.put("mappings", singletonMap(type, singletonMap("_source", singletonMap("enabled", true)))); + } else { + expectedTemplate.put("mappings", singletonMap("_source", singletonMap("enabled", true))); + } + expectedTemplate.put("order", 0); Map aliases = new HashMap<>(); aliases.put("alias1", emptyMap()); @@ -1155,18 +1149,33 @@ && getOldClusterVersion().onOrAfter(Version.V_6_1_0) && getOldClusterVersion().b if (false == expectedTemplate.equals(getTemplateResponse)) { NotEqualMessageBuilder builder = new NotEqualMessageBuilder(); builder.compareMaps(getTemplateResponse, expectedTemplate); + logger.info("expected: {}\nactual:{}", expectedTemplate, getTemplateResponse); fail("template doesn't match:\n" + builder.toString()); } } + @SuppressWarnings("unchecked") + private boolean isTypeInTemplateResponse(Map getTemplateResponse) { + return ( (Map) ( + (Map) getTemplateResponse.getOrDefault("test_template", emptyMap()) + ).get("mappings")).get("_source") == null; + } + // TODO tests for upgrades after shrink. We've had trouble with shrink in the past. - private void indexRandomDocuments(int count, boolean flushAllowed, boolean saveInfo, - CheckedFunction docSupplier) throws IOException { + private void indexRandomDocuments( + final int count, + final boolean flushAllowed, + final boolean saveInfo, + final CheckedFunction docSupplier) + throws IOException { logger.info("Indexing {} random documents", count); for (int i = 0; i < count; i++) { logger.debug("Indexing document [{}]", i); - Request createDocument = new Request("POST", "/" + index + "/doc/" + i); + Request createDocument = new Request("POST", "/" + index + "/" + type + "/" + i); + if (isRunningAgainstAncientCluster() == false) { + createDocument.setOptions(expectWarnings(RestBulkAction.TYPES_DEPRECATION_MESSAGE)); + } createDocument.setJsonEntity(Strings.toString(docSupplier.apply(i))); client().performRequest(createDocument); if (rarely()) { @@ -1191,16 +1200,21 @@ private void saveInfoDocument(String type, String value) throws IOException { infoDoc.field("value", value); infoDoc.endObject(); // Only create the first version so we know how many documents are created when the index is first created - Request request = new Request("PUT", "/info/doc/" + index + "_" + type); + Request request = new Request("PUT", "/info/" + this.type + "/" + index + "_" + type); request.addParameter("op_type", "create"); request.setJsonEntity(Strings.toString(infoDoc)); + if (isRunningAgainstAncientCluster() == false) { + request.setOptions(expectWarnings(RestIndexAction.TYPES_DEPRECATION_MESSAGE)); + } client().performRequest(request); } private String loadInfoDocument(String type) throws IOException { - Request request = new Request("GET", "/info/doc/" + index + "_" + type); + Request request = new Request("GET", "/info/" + this.type + "/" + index + "_" + type); request.addParameter("filter_path", "_source"); - request.setOptions(expectWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE)); + if (isRunningAgainstAncientCluster()) { + request.setOptions(expectWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE)); + } String doc = toStr(client().performRequest(request)); Matcher m = Pattern.compile("\"value\":\"(.+)\"").matcher(doc); assertTrue(doc, m.find()); diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java index f22b1b44c0763..d6cc9f078e0ed 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/QueryBuilderBWCIT.java @@ -20,6 +20,7 @@ package org.elasticsearch.upgrades; import org.apache.http.util.EntityUtils; +import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; @@ -145,6 +146,7 @@ private static void addCandidate(String querySource, QueryBuilder expectedQb) { } public void testQueryBuilderBWC() throws Exception { + final String type = getOldClusterVersion().before(Version.V_7_0_0) ? "doc" : "_doc"; String index = "queries"; if (isRunningAgainstOldCluster()) { XContentBuilder mappingsAndSettings = jsonBuilder(); @@ -157,7 +159,9 @@ public void testQueryBuilderBWC() throws Exception { } { mappingsAndSettings.startObject("mappings"); - mappingsAndSettings.startObject("doc"); + if (isRunningAgainstAncientCluster()) { + mappingsAndSettings.startObject(type); + } mappingsAndSettings.startObject("properties"); { mappingsAndSettings.startObject("query"); @@ -176,7 +180,9 @@ public void testQueryBuilderBWC() throws Exception { } mappingsAndSettings.endObject(); mappingsAndSettings.endObject(); - mappingsAndSettings.endObject(); + if (isRunningAgainstAncientCluster()) { + mappingsAndSettings.endObject(); + } } mappingsAndSettings.endObject(); Request request = new Request("PUT", "/" + index); @@ -188,7 +194,7 @@ public void testQueryBuilderBWC() throws Exception { assertEquals(200, rsp.getStatusLine().getStatusCode()); for (int i = 0; i < CANDIDATES.size(); i++) { - request = new Request("PUT", "/" + index + "/doc/" + Integer.toString(i)); + request = new Request("PUT", "/" + index + "/" + type + "/" + Integer.toString(i)); request.setJsonEntity((String) CANDIDATES.get(i)[0]); rsp = client().performRequest(request); assertEquals(201, rsp.getStatusLine().getStatusCode()); diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java index c80218c50ebe9..96ebaedadce93 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java @@ -19,20 +19,15 @@ package org.elasticsearch.upgrades; import org.apache.http.util.EntityUtils; -import org.elasticsearch.common.Booleans; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.rest.action.document.RestBulkAction; -import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; +import org.elasticsearch.common.Booleans; +import org.elasticsearch.rest.action.document.RestBulkAction; import java.io.IOException; import java.nio.charset.StandardCharsets; -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.rest.action.search.RestSearchAction.TOTAL_HITS_AS_INT_PARAM; -import static org.hamcrest.Matchers.equalTo; /** * Basic test that indexed documents survive the rolling restart. See @@ -68,26 +63,6 @@ public void testIndexing() throws IOException { } if (CLUSTER_TYPE == ClusterType.OLD) { - { - Version minimumIndexCompatibilityVersion = Version.CURRENT.minimumIndexCompatibilityVersion(); - assertThat("this branch is not needed if we aren't compatible with 6.0", - minimumIndexCompatibilityVersion.onOrBefore(Version.V_6_0_0), equalTo(true)); - if (minimumIndexCompatibilityVersion.before(Version.V_7_0_0)) { - XContentBuilder template = jsonBuilder(); - template.startObject(); - { - template.field("index_patterns", "*"); - template.startObject("settings"); - template.field("number_of_shards", 5); - template.endObject(); - } - template.endObject(); - Request createTemplate = new Request("PUT", "/_template/template"); - createTemplate.setJsonEntity(Strings.toString(template)); - client().performRequest(createTemplate); - } - } - Request createTestIndex = new Request("PUT", "/test_index"); createTestIndex.setJsonEntity("{\"settings\": {\"index.number_of_replicas\": 0}}"); client().performRequest(createTestIndex); diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java index 3666a64896ae2..295aee8b869ff 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java @@ -300,7 +300,11 @@ public void testRecoveryWithSoftDeletes() throws Exception { if (randomBoolean()) { indexDocs(index, i, 1); // update } else if (randomBoolean()) { - client().performRequest(new Request("DELETE", index + "/test/" + i)); + if (getNodeId(v -> v.onOrAfter(Version.V_7_0_0)) == null) { + client().performRequest(new Request("DELETE", index + "/test/" + i)); + } else { + client().performRequest(new Request("DELETE", index + "/_doc/" + i)); + } } } } diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml index bd07ee8a58469..375ba12a35621 100644 --- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/10_basic.yml @@ -67,8 +67,3 @@ - match: { hits.total: 1 } - match: { hits.hits.0._id: q3 } ---- -"Index with _all is available": - - do: - indices.get: - index: all-index diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml index a26a3f8274d99..2672cee7cc78a 100644 --- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/10_basic.yml @@ -200,19 +200,3 @@ wait_for_completion: true task_id: $task ---- -"Create an index with _all explicitly disabled": - - skip: - features: warnings - - do: - warnings: - - "[_all] is deprecated in 6.0+ and will be removed in 7.0. As a replacement, you can use [copy_to] on mapping fields to create your own catch all field." - indices.create: - index: all-index - body: - mappings: - _all: - enabled: false - properties: - field: - type: text diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml index 63e67652127e9..78a4aac867d8f 100644 --- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml @@ -125,17 +125,4 @@ task_id: $task_id - match: { task.headers.X-Opaque-Id: "Reindexing Again" } ---- -"Index with _all is available": - - do: - indices.get: - index: all-index - - - do: - indices.get_mapping: - include_type_name: false - index: all-index - - - is_true: all-index.mappings._all - - match: { all-index.mappings._all.enabled: false} diff --git a/qa/verify-version-constants/src/test/java/org/elasticsearch/qa/verify_version_constants/VerifyVersionConstantsIT.java b/qa/verify-version-constants/src/test/java/org/elasticsearch/qa/verify_version_constants/VerifyVersionConstantsIT.java index 56297eaab8857..bba6b4a8aa27f 100644 --- a/qa/verify-version-constants/src/test/java/org/elasticsearch/qa/verify_version_constants/VerifyVersionConstantsIT.java +++ b/qa/verify-version-constants/src/test/java/org/elasticsearch/qa/verify_version_constants/VerifyVersionConstantsIT.java @@ -37,7 +37,7 @@ public void testLuceneVersionConstant() throws IOException, ParseException { assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); final ObjectPath objectPath = ObjectPath.createFromResponse(response); final String elasticsearchVersionString = objectPath.evaluate("version.number").toString(); - final Version elasticsearchVersion = Version.fromString(elasticsearchVersionString); + final Version elasticsearchVersion = Version.fromString(elasticsearchVersionString.replace("-SNAPSHOT", "")); final String luceneVersionString = objectPath.evaluate("version.lucene_version").toString(); final org.apache.lucene.util.Version luceneVersion = org.apache.lucene.util.Version.parse(luceneVersionString); assertThat(elasticsearchVersion.luceneVersion, equalTo(luceneVersion)); diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.shrink.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.shrink.json index f92421b79ae91..5ef943eacba6c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.shrink.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.shrink.json @@ -18,10 +18,6 @@ } }, "params": { - "copy_settings": { - "type" : "boolean", - "description" : "whether or not to copy settings from the source index (defaults to false)" - }, "timeout": { "type" : "time", "description" : "Explicit operation timeout" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.split.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.split.json index 2c14fced28c36..a79fa7b708269 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.split.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.split.json @@ -18,10 +18,6 @@ } }, "params": { - "copy_settings": { - "type" : "boolean", - "description" : "whether or not to copy settings from the source index (defaults to false)" - }, "timeout": { "type" : "time", "description" : "Explicit operation timeout" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml index 8c4c84c4be152..3add4b100d812 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml @@ -43,13 +43,10 @@ target: "copy-settings-target" wait_for_active_shards: 1 master_timeout: 10s - copy_settings: true body: settings: index.number_of_replicas: 0 index.merge.scheduler.max_thread_count: 2 - warnings: - - "parameter [copy_settings] is deprecated and will be removed in 8.0.0" - do: cluster.health: @@ -64,42 +61,3 @@ - match: { copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" } - match: { copy-settings-target.settings.index.blocks.write: "true" } - match: { copy-settings-target.settings.index.routing.allocation.include._id: $master } - - # now we do a actual shrink and copy settings (by default) - - do: - indices.shrink: - index: "source" - target: "default-copy-settings-target" - wait_for_active_shards: 1 - master_timeout: 10s - body: - settings: - index.number_of_replicas: 0 - index.merge.scheduler.max_thread_count: 2 - - - do: - cluster.health: - wait_for_status: green - - - do: - indices.get_settings: - index: "default-copy-settings-target" - - # settings should be copied - - match: { default-copy-settings-target.settings.index.merge.scheduler.max_merge_count: "4" } - - match: { default-copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" } - - match: { default-copy-settings-target.settings.index.blocks.write: "true" } - - match: { default-copy-settings-target.settings.index.routing.allocation.include._id: $master } - - # now we do a actual shrink and try to set no copy settings - - do: - catch: /illegal_argument_exception/ - indices.shrink: - index: "source" - target: "explicit-no-copy-settings-target" - wait_for_active_shards: 1 - master_timeout: 10s - copy_settings: false - body: - settings: - index.number_of_replicas: 0 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml index 90d4080e46379..8cf932b1c1159 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml @@ -44,15 +44,11 @@ target: "copy-settings-target" wait_for_active_shards: 1 master_timeout: 10s - copy_settings: true body: settings: index.number_of_replicas: 0 index.number_of_shards: 2 index.merge.scheduler.max_thread_count: 2 - warnings: - - "parameter [copy_settings] is deprecated and will be removed in 8.0.0" - - do: cluster.health: @@ -67,42 +63,3 @@ - match: { copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" } - match: { copy-settings-target.settings.index.blocks.write: "true" } - match: { copy-settings-target.settings.index.routing.allocation.include._id: $master } - - # now we do a actual shrink and copy settings (by default) - - do: - indices.split: - index: "source" - target: "default-copy-settings-target" - wait_for_active_shards: 1 - master_timeout: 10s - body: - settings: - index.number_of_replicas: 0 - index.number_of_shards: 2 - index.merge.scheduler.max_thread_count: 2 - - - do: - cluster.health: - wait_for_status: green - - - do: - indices.get_settings: - index: "default-copy-settings-target" - - # settings should be copied - - match: { default-copy-settings-target.settings.index.merge.scheduler.max_merge_count: "4" } - - match: { default-copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" } - - match: { default-copy-settings-target.settings.index.blocks.write: "true" } - - match: { default-copy-settings-target.settings.index.routing.allocation.include._id: $master } - - - do: - catch: /illegal_argument_exception/ - indices.split: - index: "source" - target: "explicit-no-copy-settings-target" - wait_for_active_shards: 1 - master_timeout: 10s - copy_settings: false - body: - settings: - index.number_of_replicas: 0 diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index e520d714bb931..c02062b2bbd63 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -124,7 +124,11 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_6_7_0 = new Version(V_6_7_0_ID, org.apache.lucene.util.Version.LUCENE_7_7_0); public static final int V_7_0_0_ID = 7000099; public static final Version V_7_0_0 = new Version(V_7_0_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final Version CURRENT = V_7_0_0; + public static final int V_7_1_0_ID = 7010099; + public static final Version V_7_1_0 = new Version(V_7_1_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); + public static final int V_8_0_0_ID = 8000099; + public static final Version V_8_0_0 = new Version(V_8_0_0_ID, org.apache.lucene.util.Version.LUCENE_8_0_0); + public static final Version CURRENT = V_8_0_0; static { @@ -138,6 +142,10 @@ public static Version readVersion(StreamInput in) throws IOException { public static Version fromId(int id) { switch (id) { + case V_8_0_0_ID: + return V_8_0_0; + case V_7_1_0_ID: + return V_7_1_0; case V_7_0_0_ID: return V_7_0_0; case V_6_7_0_ID: diff --git a/server/src/main/java/org/elasticsearch/action/resync/ResyncReplicationRequest.java b/server/src/main/java/org/elasticsearch/action/resync/ResyncReplicationRequest.java index a53766af7cf52..2ec0e9200d436 100644 --- a/server/src/main/java/org/elasticsearch/action/resync/ResyncReplicationRequest.java +++ b/server/src/main/java/org/elasticsearch/action/resync/ResyncReplicationRequest.java @@ -66,7 +66,8 @@ public Translog.Operation[] getOperations() { @Override public void readFrom(final StreamInput in) throws IOException { - assert Version.CURRENT.major <= 7; + // TODO: https://github.com/elastic/elasticsearch/issues/38556 + //assert Version.CURRENT.major <= 7; if (in.getVersion().equals(Version.V_6_0_0)) { /* * Resync replication request serialization was broken in 6.0.0 due to the elements of the stream not being prefixed with a diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 6843534107178..9eed4600738b0 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -127,7 +127,6 @@ public class Coordinator extends AbstractLifecycleComponent implements Discovery private long maxTermSeen; private final Reconfigurator reconfigurator; private final ClusterBootstrapService clusterBootstrapService; - private final DiscoveryUpgradeService discoveryUpgradeService; private final LagDetector lagDetector; private final ClusterFormationFailureHelper clusterFormationFailureHelper; @@ -169,8 +168,6 @@ public Coordinator(String nodeName, Settings settings, ClusterSettings clusterSe this.reconfigurator = new Reconfigurator(settings, clusterSettings); this.clusterBootstrapService = new ClusterBootstrapService(settings, transportService, this::getFoundPeers, this::isInitialConfigurationSet, this::setInitialConfiguration); - this.discoveryUpgradeService = new DiscoveryUpgradeService(settings, transportService, - this::isInitialConfigurationSet, joinHelper, peerFinder::getFoundPeers, this::setInitialConfiguration); this.lagDetector = new LagDetector(settings, transportService.getThreadPool(), n -> removeNode(n, "lagging"), transportService::getLocalNode); this.clusterFormationFailureHelper = new ClusterFormationFailureHelper(settings, this::getClusterFormationState, @@ -508,10 +505,6 @@ void becomeCandidate(String method) { peerFinder.activate(coordinationState.get().getLastAcceptedState().nodes()); clusterFormationFailureHelper.start(); - if (getCurrentTerm() == ZEN1_BWC_TERM) { - discoveryUpgradeService.activate(lastKnownLeader, coordinationState.get().getLastAcceptedState()); - } - leaderChecker.setCurrentNodes(DiscoveryNodes.EMPTY_NODES); leaderChecker.updateLeader(null); @@ -543,7 +536,6 @@ void becomeLeader(String method) { lastKnownLeader = Optional.of(getLocalNode()); peerFinder.deactivate(getLocalNode()); - discoveryUpgradeService.deactivate(); clusterFormationFailureHelper.stop(); closePrevotingAndElectionScheduler(); preVoteCollector.update(getPreVoteResponse(), getLocalNode()); @@ -575,7 +567,6 @@ void becomeFollower(String method, DiscoveryNode leaderNode) { lastKnownLeader = Optional.of(leaderNode); peerFinder.deactivate(leaderNode); - discoveryUpgradeService.deactivate(); clusterFormationFailureHelper.stop(); closePrevotingAndElectionScheduler(); cancelActivePublication("become follower: " + method); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/DiscoveryUpgradeService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/DiscoveryUpgradeService.java deleted file mode 100644 index 3bf5fa225a934..0000000000000 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/DiscoveryUpgradeService.java +++ /dev/null @@ -1,347 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.cluster.coordination; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.elasticsearch.Version; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.coordination.CoordinationMetaData.VotingConfiguration; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.CountDown; -import org.elasticsearch.discovery.zen.ElectMasterService; -import org.elasticsearch.discovery.zen.ElectMasterService.MasterCandidate; -import org.elasticsearch.discovery.zen.UnicastZenPing; -import org.elasticsearch.discovery.zen.UnicastZenPing.UnicastPingRequest; -import org.elasticsearch.discovery.zen.UnicastZenPing.UnicastPingResponse; -import org.elasticsearch.discovery.zen.ZenPing.PingResponse; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.threadpool.ThreadPool.Names; -import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportRequestOptions; -import org.elasticsearch.transport.TransportResponseHandler; -import org.elasticsearch.transport.TransportService; - -import java.io.IOException; -import java.util.HashSet; -import java.util.Iterator; -import java.util.Optional; -import java.util.Set; -import java.util.function.BooleanSupplier; -import java.util.function.Consumer; -import java.util.function.Supplier; -import java.util.stream.Collectors; -import java.util.stream.Stream; -import java.util.stream.StreamSupport; - -import static java.lang.Math.max; -import static org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING; -import static org.elasticsearch.cluster.ClusterState.UNKNOWN_VERSION; -import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentSet; -import static org.elasticsearch.discovery.zen.ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING; -import static org.elasticsearch.discovery.zen.ZenDiscovery.PING_TIMEOUT_SETTING; - -/** - * Deals with rolling upgrades of the cluster coordination layer. In mixed clusters we prefer to elect the older nodes, but - * when the last old node shuts down then as long as there are enough new nodes we can assume that they form the whole cluster and - * define them as the initial configuration. - */ -public class DiscoveryUpgradeService { - - private static Logger logger = LogManager.getLogger(DiscoveryUpgradeService.class); - - // how long to wait after activation before attempting to join a master or perform a bootstrap upgrade - public static final Setting BWC_PING_TIMEOUT_SETTING = - Setting.timeSetting("discovery.zen.bwc_ping_timeout", - PING_TIMEOUT_SETTING, TimeValue.timeValueMillis(1), Setting.Property.NodeScope, Setting.Property.Deprecated); - - // whether to try and bootstrap all the discovered Zen2 nodes when the last Zen1 node leaves the cluster. - public static final Setting ENABLE_UNSAFE_BOOTSTRAPPING_ON_UPGRADE_SETTING = - Setting.boolSetting("discovery.zen.unsafe_rolling_upgrades_enabled", true, Setting.Property.NodeScope, Setting.Property.Deprecated); - - /** - * Dummy {@link ElectMasterService} that is only used to choose the best 6.x master from the discovered nodes, ignoring the - * `minimum_master_nodes` setting. - */ - private static final ElectMasterService electMasterService = new ElectMasterService(Settings.EMPTY); - - private final TransportService transportService; - private final BooleanSupplier isBootstrappedSupplier; - private final JoinHelper joinHelper; - private final Supplier> peersSupplier; - private final Consumer initialConfigurationConsumer; - private final TimeValue bwcPingTimeout; - private final boolean enableUnsafeBootstrappingOnUpgrade; - private final ClusterName clusterName; - - @Nullable // null if no active joining round - private volatile JoiningRound joiningRound; - - public DiscoveryUpgradeService(Settings settings, TransportService transportService, - BooleanSupplier isBootstrappedSupplier, JoinHelper joinHelper, - Supplier> peersSupplier, - Consumer initialConfigurationConsumer) { - assert Version.CURRENT.major == Version.V_6_6_0.major + 1 : "remove this service once unsafe upgrades are no longer needed"; - this.transportService = transportService; - this.isBootstrappedSupplier = isBootstrappedSupplier; - this.joinHelper = joinHelper; - this.peersSupplier = peersSupplier; - this.initialConfigurationConsumer = initialConfigurationConsumer; - this.bwcPingTimeout = BWC_PING_TIMEOUT_SETTING.get(settings); - this.enableUnsafeBootstrappingOnUpgrade = ENABLE_UNSAFE_BOOTSTRAPPING_ON_UPGRADE_SETTING.get(settings); - this.clusterName = CLUSTER_NAME_SETTING.get(settings); - } - - public void activate(Optional lastKnownLeader, ClusterState lastAcceptedClusterState) { - // called under coordinator mutex - - if (isBootstrappedSupplier.getAsBoolean()) { - return; - } - - assert lastKnownLeader.isPresent() == false || Coordinator.isZen1Node(lastKnownLeader.get()) : lastKnownLeader; - // if there was a leader and it's not a old node then we must have been bootstrapped - - final Settings dynamicSettings = lastAcceptedClusterState.metaData().settings(); - final int minimumMasterNodes = DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.exists(dynamicSettings) - ? DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.get(dynamicSettings) - : lastAcceptedClusterState.getMinimumMasterNodesOnPublishingMaster(); - - assert joiningRound == null : joiningRound; - final Set knownMasterNodeIds = new HashSet<>(); - lastAcceptedClusterState.nodes().getMasterNodes().forEach(c -> knownMasterNodeIds.add(c.key)); - - joiningRound - = new JoiningRound(enableUnsafeBootstrappingOnUpgrade && lastKnownLeader.isPresent(), minimumMasterNodes, knownMasterNodeIds); - joiningRound.scheduleNextAttempt(); - } - - public void deactivate() { - // called under coordinator mutex - joiningRound = null; - } - - /** - * Waits for some number of calls to {@link ListenableCountDown#countDown()} and then notifies a listener. The listener - * is only ever notified once, whether successful or not. - */ - private static class ListenableCountDown { - private final CountDown countDown; - private final ActionListener listener; - - ListenableCountDown(int count, ActionListener listener) { - this.countDown = new CountDown(count); - this.listener = listener; - } - - void onFailure(Exception e) { - if (countDown.fastForward()) { - listener.onFailure(e); - } - } - - void countDown() { - if (countDown.countDown()) { - listener.onResponse(null); - } - } - } - - private class JoiningRound { - private final boolean upgrading; - private final int minimumMasterNodes; - private final Set knownMasterNodeIds; - - JoiningRound(boolean upgrading, int minimumMasterNodes, Set knownMasterNodeIds) { - this.upgrading = upgrading; - this.minimumMasterNodes = minimumMasterNodes; - this.knownMasterNodeIds = knownMasterNodeIds; - } - - private boolean isRunning() { - return joiningRound == this && isBootstrappedSupplier.getAsBoolean() == false; - } - - private boolean canBootstrap(Set discoveryNodes) { - return upgrading && minimumMasterNodes <= discoveryNodes.stream().filter(DiscoveryNode::isMasterNode).count(); - } - - void scheduleNextAttempt() { - if (isRunning() == false) { - return; - } - - final ThreadPool threadPool = transportService.getThreadPool(); - threadPool.scheduleUnlessShuttingDown(bwcPingTimeout, Names.SAME, new Runnable() { - - @Override - public void run() { - if (isRunning() == false) { - return; - } - - final Set discoveryNodes = Stream.concat(StreamSupport.stream(peersSupplier.get().spliterator(), false), - Stream.of(transportService.getLocalNode())).filter(DiscoveryNode::isMasterNode).collect(Collectors.toSet()); - - // this set of nodes is reasonably fresh - the PeerFinder cleans up nodes to which the transport service is not - // connected each time it wakes up (every second by default) - - logger.debug("upgrading={}, minimumMasterNodes={}, nodes={}", upgrading, minimumMasterNodes, discoveryNodes); - - if (discoveryNodes.stream().anyMatch(Coordinator::isZen1Node)) { - electBestOldMaster(discoveryNodes); - } else if (canBootstrap(discoveryNodes)) { - // no Zen1 nodes found, but the last-known master was a Zen1 node, so this is a rolling upgrade - transportService.getThreadPool().generic().execute(() -> { - try { - Set nodeIds = new HashSet<>(); - discoveryNodes.forEach(n -> nodeIds.add(n.getId())); - - final Iterator knownNodeIdIterator = knownMasterNodeIds.iterator(); - while (nodeIds.size() < 2 * minimumMasterNodes - 1 && knownNodeIdIterator.hasNext()) { - nodeIds.add(knownNodeIdIterator.next()); - } - - final VotingConfiguration votingConfiguration = new VotingConfiguration(nodeIds); - assert votingConfiguration.hasQuorum( - discoveryNodes.stream().map(DiscoveryNode::getId).collect(Collectors.toList())); - assert 2 * minimumMasterNodes - 2 <= nodeIds.size() : nodeIds + " too small for " + minimumMasterNodes; - - initialConfigurationConsumer.accept(votingConfiguration); - } catch (Exception e) { - logger.debug("exception during bootstrapping upgrade, retrying", e); - } finally { - scheduleNextAttempt(); - } - }); - } else { - scheduleNextAttempt(); - } - } - - /** - * Ping all the old discovered masters one more time to obtain their cluster state versions, and then vote for the best one. - * @param discoveryNodes The master nodes (old and new). - */ - private void electBestOldMaster(Set discoveryNodes) { - final Set masterCandidates = newConcurrentSet(); - final ListenableCountDown listenableCountDown - = new ListenableCountDown(discoveryNodes.size(), new ActionListener() { - - @Override - public void onResponse(Void value) { - assert masterCandidates.size() == discoveryNodes.size() - : masterCandidates + " does not match " + discoveryNodes; - - // TODO we shouldn't elect a master with a version that's older than ours - // If the only Zen1 nodes left are stale, and we can bootstrap, maybe we should bootstrap? - // Do we ever need to elect a freshly-started Zen1 node? - if (isRunning()) { - final MasterCandidate electedMaster = electMasterService.electMaster(masterCandidates); - logger.debug("elected {}, sending join", electedMaster); - joinHelper.sendJoinRequest(electedMaster.getNode(), Optional.empty(), - JoiningRound.this::scheduleNextAttempt); - } - } - - @Override - public void onFailure(Exception e) { - scheduleNextAttempt(); - } - }); - - boolean foundOldMaster = false; - for (final DiscoveryNode discoveryNode : discoveryNodes) { - assert discoveryNode.isMasterNode() : discoveryNode; - if (Coordinator.isZen1Node(discoveryNode)) { - foundOldMaster = true; - transportService.sendRequest(discoveryNode, UnicastZenPing.ACTION_NAME, - new UnicastPingRequest(0, TimeValue.ZERO, - new PingResponse(createDiscoveryNodeWithImpossiblyHighId(transportService.getLocalNode()), - null, clusterName, UNKNOWN_VERSION)), - TransportRequestOptions.builder().withTimeout(bwcPingTimeout).build(), - new TransportResponseHandler() { - @Override - public void handleResponse(UnicastPingResponse response) { - long clusterStateVersion = UNKNOWN_VERSION; - for (PingResponse pingResponse : response.pingResponses) { - if (discoveryNode.equals(pingResponse.node())) { - clusterStateVersion - = max(clusterStateVersion, pingResponse.getClusterStateVersion()); - } - } - masterCandidates.add(new MasterCandidate(discoveryNode, clusterStateVersion)); - listenableCountDown.countDown(); - } - - @Override - public void handleException(TransportException exp) { - logger.debug( - new ParameterizedMessage("unexpected exception when pinging {}", discoveryNode), exp); - listenableCountDown.onFailure(exp); - } - - @Override - public String executor() { - return Names.SAME; - } - - @Override - public UnicastPingResponse read(StreamInput in) throws IOException { - return new UnicastPingResponse(in); - } - }); - - } else { - masterCandidates.add( - new MasterCandidate(createDiscoveryNodeWithImpossiblyHighId(discoveryNode), UNKNOWN_VERSION)); - listenableCountDown.countDown(); - } - } - assert foundOldMaster; - } - - @Override - public String toString() { - return "discovery upgrade service retry"; - } - }); - } - } - - /** - * Pre-7.0 nodes select the best master by comparing their IDs (as strings) and selecting the lowest one amongst those nodes with - * the best cluster state version. We want 7.0+ nodes to participate in these elections in a mixed cluster but never to win one, so - * we lie and claim to have an impossible ID that compares above all genuine IDs. - */ - public static DiscoveryNode createDiscoveryNodeWithImpossiblyHighId(DiscoveryNode node) { - // IDs are base-64-encoded UUIDs, which means they use the character set [0-9A-Za-z_-]. The highest character in this set is 'z', - // and 'z' < '{', so by starting the ID with '{' we can be sure it's greater. This is terrible. - return new DiscoveryNode(node.getName(), "{zen2}" + node.getId(), node.getEphemeralId(), node.getHostName(), - node.getHostAddress(), node.getAddress(), node.getAttributes(), node.getRoles(), node.getVersion()); - } -} diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java b/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java index b0b91cd0980f2..241b9b3f82231 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java @@ -93,31 +93,9 @@ public PublicationTransportHandler(TransportService transportService, NamedWrite transportService.registerRequestHandler(PUBLISH_STATE_ACTION_NAME, BytesTransportRequest::new, ThreadPool.Names.GENERIC, false, false, (request, channel, task) -> channel.sendResponse(handleIncomingPublishRequest(request))); - transportService.registerRequestHandler(PublishClusterStateAction.SEND_ACTION_NAME, BytesTransportRequest::new, - ThreadPool.Names.GENERIC, - false, false, (request, channel, task) -> { - handleIncomingPublishRequest(request); - channel.sendResponse(TransportResponse.Empty.INSTANCE); - }); - transportService.registerRequestHandler(COMMIT_STATE_ACTION_NAME, ThreadPool.Names.GENERIC, false, false, ApplyCommitRequest::new, (request, channel, task) -> handleApplyCommit.accept(request, transportCommitCallback(channel))); - - transportService.registerRequestHandler(PublishClusterStateAction.COMMIT_ACTION_NAME, - PublishClusterStateAction.CommitClusterStateRequest::new, - ThreadPool.Names.GENERIC, false, false, - (request, channel, task) -> { - final Optional matchingClusterState = Optional.ofNullable(lastSeenClusterState.get()).filter( - cs -> cs.stateUUID().equals(request.stateUUID)); - if (matchingClusterState.isPresent() == false) { - throw new IllegalStateException("can't resolve cluster state with uuid" + - " [" + request.stateUUID + "] to commit"); - } - final ApplyCommitRequest applyCommitRequest = new ApplyCommitRequest(matchingClusterState.get().getNodes().getMasterNode(), - matchingClusterState.get().term(), matchingClusterState.get().version()); - handleApplyCommit.accept(applyCommitRequest, transportCommitCallback(channel)); - }); } private ActionListener transportCommitCallback(TransportChannel channel) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 838b1e2547204..2e18e5aeae50a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -590,7 +590,8 @@ public ClusterState execute(ClusterState currentState) throws Exception { static int getNumberOfShards(final Settings.Builder indexSettingsBuilder) { // TODO: this logic can be removed when the current major version is 8 - assert Version.CURRENT.major == 7; + // TODO: https://github.com/elastic/elasticsearch/issues/38556 + // assert Version.CURRENT.major == 7; final int numberOfShards; final Version indexVersionCreated = Version.fromId(Integer.parseInt(indexSettingsBuilder.get(IndexMetaData.SETTING_INDEX_VERSION_CREATED.getKey()))); diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index 0ae30c465b26e..61c3dd9adadad 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -35,7 +35,6 @@ import org.elasticsearch.cluster.coordination.ClusterBootstrapService; import org.elasticsearch.cluster.coordination.ClusterFormationFailureHelper; import org.elasticsearch.cluster.coordination.Coordinator; -import org.elasticsearch.cluster.coordination.DiscoveryUpgradeService; import org.elasticsearch.cluster.coordination.ElectionSchedulerFactory; import org.elasticsearch.cluster.coordination.FollowersChecker; import org.elasticsearch.cluster.coordination.JoinHelper; @@ -67,9 +66,9 @@ import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.discovery.PeerFinder; +import org.elasticsearch.discovery.SettingsBasedSeedHostsProvider; import org.elasticsearch.discovery.zen.ElectMasterService; import org.elasticsearch.discovery.zen.FaultDetection; -import org.elasticsearch.discovery.SettingsBasedSeedHostsProvider; import org.elasticsearch.discovery.zen.UnicastZenPing; import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.env.Environment; @@ -493,9 +492,7 @@ public void apply(Settings value, Settings current, Settings previous) { TransportAddVotingConfigExclusionsAction.MAXIMUM_VOTING_CONFIG_EXCLUSIONS_SETTING, ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING, ClusterBootstrapService.UNCONFIGURED_BOOTSTRAP_TIMEOUT_SETTING, - LagDetector.CLUSTER_FOLLOWER_LAG_TIMEOUT_SETTING, - DiscoveryUpgradeService.BWC_PING_TIMEOUT_SETTING, - DiscoveryUpgradeService.ENABLE_UNSAFE_BOOTSTRAPPING_ON_UPGRADE_SETTING + LagDetector.CLUSTER_FOLLOWER_LAG_TIMEOUT_SETTING ))); public static List> BUILT_IN_SETTING_UPGRADERS = Collections.unmodifiableList(Arrays.asList( diff --git a/server/src/main/java/org/elasticsearch/common/time/DateUtils.java b/server/src/main/java/org/elasticsearch/common/time/DateUtils.java index 20646ae14599a..4235344bd8371 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateUtils.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateUtils.java @@ -23,9 +23,12 @@ import org.elasticsearch.common.logging.DeprecationLogger; import org.joda.time.DateTimeZone; +import java.time.Clock; +import java.time.Duration; import java.time.Instant; import java.time.ZoneId; import java.time.ZoneOffset; +import java.time.ZonedDateTime; import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -139,4 +142,18 @@ public static long toMilliSeconds(long nanoSecondsSinceEpoch) { return nanoSecondsSinceEpoch / 1_000_000; } + + /** + * Returns the current UTC date-time with milliseconds precision. + * In Java 9+ (as opposed to Java 8) the {@code Clock} implementation uses system's best clock implementation (which could mean + * that the precision of the clock can be milliseconds, microseconds or nanoseconds), whereas in Java 8 + * {@code System.currentTimeMillis()} is always used. To account for these differences, this method defines a new {@code Clock} + * which will offer a value for {@code ZonedDateTime.now()} set to always have milliseconds precision. + * + * @return {@link ZonedDateTime} instance for the current date-time with milliseconds precision in UTC + */ + public static ZonedDateTime nowWithMillisResolution() { + Clock millisResolutionClock = Clock.tick(Clock.systemUTC(), Duration.ofMillis(1)); + return ZonedDateTime.now(millisResolutionClock); + } } diff --git a/server/src/main/java/org/elasticsearch/discovery/PeerFinder.java b/server/src/main/java/org/elasticsearch/discovery/PeerFinder.java index 58248b8183d34..839661ca87e8a 100644 --- a/server/src/main/java/org/elasticsearch/discovery/PeerFinder.java +++ b/server/src/main/java/org/elasticsearch/discovery/PeerFinder.java @@ -25,8 +25,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.util.SetOnce; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.coordination.PeersResponse; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -37,24 +35,13 @@ import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; -import org.elasticsearch.discovery.zen.UnicastZenPing; -import org.elasticsearch.discovery.zen.ZenDiscovery; -import org.elasticsearch.discovery.zen.ZenPing; -import org.elasticsearch.discovery.zen.ZenPing.PingResponse; -import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool.Names; -import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -64,8 +51,6 @@ import java.util.stream.Collectors; import static java.util.Collections.emptyList; -import static org.elasticsearch.cluster.coordination.Coordinator.isZen1Node; -import static org.elasticsearch.cluster.coordination.DiscoveryUpgradeService.createDiscoveryNodeWithImpossiblyHighId; public abstract class PeerFinder { @@ -82,8 +67,6 @@ public abstract class PeerFinder { Setting.timeSetting("discovery.request_peers_timeout", TimeValue.timeValueMillis(3000), TimeValue.timeValueMillis(1), Setting.Property.NodeScope); - private final Settings settings; - private final TimeValue findPeersInterval; private final TimeValue requestPeersTimeout; @@ -101,7 +84,6 @@ public abstract class PeerFinder { public PeerFinder(Settings settings, TransportService transportService, TransportAddressConnector transportAddressConnector, ConfiguredHostsResolver configuredHostsResolver) { - this.settings = settings; findPeersInterval = DISCOVERY_FIND_PEERS_INTERVAL_SETTING.get(settings); requestPeersTimeout = DISCOVERY_REQUEST_PEERS_TIMEOUT_SETTING.get(settings); this.transportService = transportService; @@ -111,9 +93,6 @@ public PeerFinder(Settings settings, TransportService transportService, Transpor transportService.registerRequestHandler(REQUEST_PEERS_ACTION_NAME, Names.GENERIC, false, false, PeersRequest::new, (request, channel, task) -> channel.sendResponse(handlePeersRequest(request))); - - transportService.registerRequestHandler(UnicastZenPing.ACTION_NAME, Names.GENERIC, false, false, - UnicastZenPing.UnicastPingRequest::new, new Zen1UnicastPingRequestHandler()); } public void activate(final DiscoveryNodes lastAcceptedNodes) { @@ -453,36 +432,10 @@ public String executor() { return Names.GENERIC; } }; - final String actionName; - final TransportRequest transportRequest; - final TransportResponseHandler transportResponseHandler; - if (isZen1Node(discoveryNode)) { - actionName = UnicastZenPing.ACTION_NAME; - transportRequest = new UnicastZenPing.UnicastPingRequest(1, ZenDiscovery.PING_TIMEOUT_SETTING.get(settings), - new ZenPing.PingResponse(createDiscoveryNodeWithImpossiblyHighId(getLocalNode()), null, - ClusterName.CLUSTER_NAME_SETTING.get(settings), ClusterState.UNKNOWN_VERSION)); - transportResponseHandler = peersResponseHandler.wrap(ucResponse -> { - Optional optionalMasterNode = Arrays.stream(ucResponse.pingResponses) - .filter(pr -> discoveryNode.equals(pr.node()) && discoveryNode.equals(pr.master())) - .map(ZenPing.PingResponse::node) - .findFirst(); - List discoveredNodes = new ArrayList<>(); - if (optionalMasterNode.isPresent() == false) { - Arrays.stream(ucResponse.pingResponses).map(PingResponse::master).filter(Objects::nonNull) - .forEach(discoveredNodes::add); - Arrays.stream(ucResponse.pingResponses).map(PingResponse::node).forEach(discoveredNodes::add); - } - return new PeersResponse(optionalMasterNode, discoveredNodes, 0L); - }, UnicastZenPing.UnicastPingResponse::new); - } else { - actionName = REQUEST_PEERS_ACTION_NAME; - transportRequest = new PeersRequest(getLocalNode(), knownNodes); - transportResponseHandler = peersResponseHandler; - } - transportService.sendRequest(discoveryNode, actionName, - transportRequest, + transportService.sendRequest(discoveryNode, REQUEST_PEERS_ACTION_NAME, + new PeersRequest(getLocalNode(), knownNodes), TransportRequestOptions.builder().withTimeout(requestPeersTimeout).build(), - transportResponseHandler); + peersResponseHandler); } @Override @@ -494,22 +447,4 @@ public String toString() { '}'; } } - - private class Zen1UnicastPingRequestHandler implements TransportRequestHandler { - @Override - public void messageReceived(UnicastZenPing.UnicastPingRequest request, TransportChannel channel, Task task) throws Exception { - final PeersRequest peersRequest = new PeersRequest(request.pingResponse.node(), - Optional.ofNullable(request.pingResponse.master()).map(Collections::singletonList).orElse(emptyList())); - final PeersResponse peersResponse = handlePeersRequest(peersRequest); - final List pingResponses = new ArrayList<>(); - final ClusterName clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); - pingResponses.add(new ZenPing.PingResponse(createDiscoveryNodeWithImpossiblyHighId(transportService.getLocalNode()), - peersResponse.getMasterNode().orElse(null), - clusterName, ClusterState.UNKNOWN_VERSION)); - peersResponse.getKnownPeers().forEach(dn -> pingResponses.add( - new ZenPing.PingResponse(ZenPing.PingResponse.FAKE_PING_ID, - isZen1Node(dn) ? dn : createDiscoveryNodeWithImpossiblyHighId(dn), null, clusterName, ClusterState.UNKNOWN_VERSION))); - channel.sendResponse(new UnicastZenPing.UnicastPingResponse(request.id, pingResponses.toArray(new ZenPing.PingResponse[0]))); - } - } } diff --git a/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java b/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java index 7bef4d5819e45..d67cdccb9a09b 100644 --- a/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java +++ b/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.Manifest; import org.elasticsearch.cluster.metadata.MetaData; @@ -118,7 +117,8 @@ private Tuple loadFullStateBWC() throws IOException { if (globalMetaData != null) { metaDataBuilder = MetaData.builder(globalMetaData); - assert Version.CURRENT.major < 8 : "failed to find manifest file, which is mandatory staring with Elasticsearch version 8.0"; + // TODO https://github.com/elastic/elasticsearch/issues/38556 + // assert Version.CURRENT.major < 8 : "failed to find manifest file, which is mandatory staring with Elasticsearch version 8.0"; } else { metaDataBuilder = MetaData.builder(); } @@ -127,7 +127,8 @@ private Tuple loadFullStateBWC() throws IOException { Tuple indexMetaDataAndGeneration = INDEX_META_DATA_FORMAT.loadLatestStateWithGeneration(logger, namedXContentRegistry, nodeEnv.resolveIndexFolder(indexFolderName)); - assert Version.CURRENT.major < 8 : "failed to find manifest file, which is mandatory staring with Elasticsearch version 8.0"; + // TODO https://github.com/elastic/elasticsearch/issues/38556 + // assert Version.CURRENT.major < 8 : "failed to find manifest file, which is mandatory staring with Elasticsearch version 8.0"; IndexMetaData indexMetaData = indexMetaDataAndGeneration.v1(); long generation = indexMetaDataAndGeneration.v2(); if (indexMetaData != null) { @@ -291,4 +292,4 @@ public void writeGlobalStateAndUpdateManifest(String reason, MetaData metaData) writeManifestAndCleanup(reason, manifest); cleanupGlobalState(generation); } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/node/InternalSettingsPreparer.java b/server/src/main/java/org/elasticsearch/node/InternalSettingsPreparer.java index e0157fff23cfd..b73268d9985b5 100644 --- a/server/src/main/java/org/elasticsearch/node/InternalSettingsPreparer.java +++ b/server/src/main/java/org/elasticsearch/node/InternalSettingsPreparer.java @@ -19,6 +19,11 @@ package org.elasticsearch.node; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsException; +import org.elasticsearch.env.Environment; + import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; @@ -26,21 +31,11 @@ import java.util.Collections; import java.util.List; import java.util.Map; -import java.util.function.Supplier; import java.util.function.Function; - -import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsException; -import org.elasticsearch.env.Environment; -import org.elasticsearch.node.Node; +import java.util.function.Supplier; public class InternalSettingsPreparer { - private static final String SECRET_PROMPT_VALUE = "${prompt.secret}"; - private static final String TEXT_PROMPT_VALUE = "${prompt.text}"; - /** * Prepares settings for the transport client by gathering all * elasticsearch system properties and setting defaults. @@ -88,7 +83,6 @@ public static Environment prepareEnvironment(Settings input, Map // re-initialize settings now that the config file has been loaded initializeSettings(output, input, properties); - checkSettingsForTerminalDeprecation(output); finalizeSettings(output, defaultNodeName); environment = new Environment(output.build(), configPath); @@ -112,25 +106,6 @@ static void initializeSettings(final Settings.Builder output, final Settings inp output.replacePropertyPlaceholders(); } - /** - * Checks all settings values to make sure they do not have the old prompt settings. These were deprecated in 6.0.0. - * This check should be removed in 8.0.0. - */ - private static void checkSettingsForTerminalDeprecation(final Settings.Builder output) throws SettingsException { - // This method to be removed in 8.0.0, as it was deprecated in 6.0 and removed in 7.0 - assert Version.CURRENT.major != 8: "Logic pertaining to config driven prompting should be removed"; - for (String setting : output.keys()) { - switch (output.get(setting)) { - case SECRET_PROMPT_VALUE: - throw new SettingsException("Config driven secret prompting was deprecated in 6.0.0. Use the keystore" + - " for secure settings."); - case TEXT_PROMPT_VALUE: - throw new SettingsException("Config driven text prompting was deprecated in 6.0.0. Use the keystore" + - " for secure settings."); - } - } - } - /** * Finish preparing settings by replacing forced settings and any defaults that need to be added. */ diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java index 60c481e59878f..3d0158cf95f0f 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandler.java @@ -19,15 +19,10 @@ package org.elasticsearch.rest.action.admin.indices; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.common.Booleans; -import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; @@ -37,8 +32,6 @@ import java.io.IOException; public abstract class RestResizeHandler extends BaseRestHandler { - private static final Logger logger = LogManager.getLogger(RestResizeHandler.class); - private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); RestResizeHandler(final Settings settings) { super(settings); @@ -53,24 +46,6 @@ public abstract class RestResizeHandler extends BaseRestHandler { public final RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { final ResizeRequest resizeRequest = new ResizeRequest(request.param("target"), request.param("index")); resizeRequest.setResizeType(getResizeType()); - // copy_settings should be removed in Elasticsearch 8.0.0; cf. https://github.com/elastic/elasticsearch/issues/28347 - assert Version.CURRENT.major < 8; - final String rawCopySettings = request.param("copy_settings"); - final Boolean copySettings; - if (rawCopySettings == null) { - copySettings = resizeRequest.getCopySettings(); - } else { - if (rawCopySettings.isEmpty()) { - copySettings = true; - } else { - copySettings = Booleans.parseBoolean(rawCopySettings); - if (copySettings == false) { - throw new IllegalArgumentException("parameter [copy_settings] can not be explicitly set to [false]"); - } - } - deprecationLogger.deprecated("parameter [copy_settings] is deprecated and will be removed in 8.0.0"); - } - resizeRequest.setCopySettings(copySettings); request.applyContentParser(resizeRequest::fromXContent); resizeRequest.timeout(request.paramAsTime("timeout", resizeRequest.timeout())); resizeRequest.masterNodeTimeout(request.paramAsTime("master_timeout", resizeRequest.masterNodeTimeout())); diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java index 9b9243b612b74..ff06e59d4f729 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java @@ -57,7 +57,8 @@ public abstract class RemoteClusterAware { static { // remove search.remote.* settings in 8.0.0 - assert Version.CURRENT.major < 8; + // TODO https://github.com/elastic/elasticsearch/issues/38556 + // assert Version.CURRENT.major < 8; } public static final Setting.AffixSetting> SEARCH_REMOTE_CLUSTERS_SEEDS = diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index 009ee48dd8a99..a126337aa0e54 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -69,7 +69,8 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl static { // remove search.remote.* settings in 8.0.0 - assert Version.CURRENT.major < 8; + // TODO + // assert Version.CURRENT.major < 8; } public static final Setting SEARCH_REMOTE_CONNECTIONS_PER_CLUSTER = diff --git a/server/src/test/java/org/elasticsearch/VersionTests.java b/server/src/test/java/org/elasticsearch/VersionTests.java index 08db8dfaf2100..99077cbea70db 100644 --- a/server/src/test/java/org/elasticsearch/VersionTests.java +++ b/server/src/test/java/org/elasticsearch/VersionTests.java @@ -367,10 +367,9 @@ public void testIsCompatible() { () -> new IllegalStateException("expected previous minor version for [" + currentOrNextMajorVersion + "]")); final Version previousMinorVersion = VersionUtils.getPreviousMinorVersion(); - assert previousMinorVersion.major == currentOrNextMajorVersion.major - || previousMinorVersion.major == lastMinorFromPreviousMajor.major; - boolean isCompatible = previousMinorVersion.major == currentOrNextMajorVersion.major - || previousMinorVersion.minor == lastMinorFromPreviousMajor.minor; + boolean isCompatible = + previousMinorVersion.major == currentOrNextMajorVersion.major + || previousMinorVersion.minor == lastMinorFromPreviousMajor.minor; final String message = String.format( Locale.ROOT, diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/DiscoveryUpgradeServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/DiscoveryUpgradeServiceTests.java deleted file mode 100644 index 01e9c1403313c..0000000000000 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/DiscoveryUpgradeServiceTests.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.cluster.coordination; - -import org.elasticsearch.Version; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.UUIDs; -import org.elasticsearch.test.ESTestCase; - -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.lessThan; - -public class DiscoveryUpgradeServiceTests extends ESTestCase { - public void testCreateDiscoveryNodeWithImpossiblyHighId() { - final DiscoveryNode discoveryNode - = new DiscoveryNode(UUIDs.randomBase64UUID(random()), buildNewFakeTransportAddress(), Version.CURRENT); - final DiscoveryNode fakeNode = DiscoveryUpgradeService.createDiscoveryNodeWithImpossiblyHighId(discoveryNode); - assertThat(discoveryNode.getId(), lessThan(fakeNode.getId())); - assertThat(UUIDs.randomBase64UUID(random()), lessThan(fakeNode.getId())); - assertThat(fakeNode.getId(), containsString(discoveryNode.getId())); - } -} diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/Zen1IT.java b/server/src/test/java/org/elasticsearch/cluster/coordination/Zen1IT.java deleted file mode 100644 index 3ba9f58523d16..0000000000000 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/Zen1IT.java +++ /dev/null @@ -1,373 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.cluster.coordination; - -import org.elasticsearch.action.admin.cluster.configuration.AddVotingConfigExclusionsAction; -import org.elasticsearch.action.admin.cluster.configuration.AddVotingConfigExclusionsRequest; -import org.elasticsearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsAction; -import org.elasticsearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsRequest; -import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; -import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequestBuilder; -import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; -import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; -import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; -import org.elasticsearch.client.Client; -import org.elasticsearch.client.Requests; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.Manifest; -import org.elasticsearch.cluster.routing.UnassignedInfo; -import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.Allocation; -import org.elasticsearch.common.Priority; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.discovery.Discovery; -import org.elasticsearch.discovery.DiscoveryModule; -import org.elasticsearch.discovery.zen.ElectMasterService; -import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.gateway.MetaStateService; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.InternalTestCluster.RestartCallback; -import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.transport.TransportService; - -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.stream.Collectors; -import java.util.stream.IntStream; -import java.util.stream.StreamSupport; - -import static org.elasticsearch.cluster.coordination.ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING; -import static org.elasticsearch.cluster.coordination.Coordinator.ZEN1_BWC_TERM; -import static org.elasticsearch.cluster.coordination.FollowersChecker.FOLLOWER_CHECK_ACTION_NAME; -import static org.elasticsearch.cluster.coordination.JoinHelper.START_JOIN_ACTION_NAME; -import static org.elasticsearch.cluster.coordination.PublicationTransportHandler.PUBLISH_STATE_ACTION_NAME; -import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING; -import static org.elasticsearch.cluster.routing.allocation.decider.FilterAllocationDecider.CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING; -import static org.elasticsearch.node.Node.NODE_NAME_SETTING; -import static org.elasticsearch.test.InternalTestCluster.REMOVED_MINIMUM_MASTER_NODES; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; - -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) -public class Zen1IT extends ESIntegTestCase { - - private static Settings ZEN1_SETTINGS = Coordinator.addZen1Attribute(true, Settings.builder() - .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), DiscoveryModule.ZEN_DISCOVERY_TYPE)).build(); - - private static Settings ZEN2_SETTINGS = Settings.builder() - .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), DiscoveryModule.ZEN2_DISCOVERY_TYPE) - .build(); - - protected Collection> nodePlugins() { - return Collections.singletonList(MockTransportService.TestPlugin.class); - } - - public void testZen2NodesJoiningZen1Cluster() { - internalCluster().startNodes(randomIntBetween(1, 3), ZEN1_SETTINGS); - internalCluster().startNodes(randomIntBetween(1, 3), ZEN2_SETTINGS); - createIndex("test"); - } - - public void testZen1NodesJoiningZen2Cluster() { - internalCluster().startNodes(randomIntBetween(1, 3), ZEN2_SETTINGS); - internalCluster().startNodes(randomIntBetween(1, 3), ZEN1_SETTINGS); - createIndex("test"); - } - - public void testMixedClusterDisruption() throws Exception { - final List nodes = internalCluster().startNodes(IntStream.range(0, 5) - .mapToObj(i -> i < 2 ? ZEN1_SETTINGS : ZEN2_SETTINGS).toArray(Settings[]::new)); - - final List transportServices = nodes.stream() - .map(n -> (MockTransportService) internalCluster().getInstance(TransportService.class, n)).collect(Collectors.toList()); - - logger.info("--> disrupting communications"); - - // The idea here is to make some of the Zen2 nodes believe the Zen1 nodes have gone away by introducing a network partition, so that - // they bootstrap themselves, but keep the Zen1 side of the cluster alive. - - // Set up a bridged network partition with the Zen1 nodes {0,1} on one side, Zen2 nodes {3,4} on the other, and node {2} in both - transportServices.get(0).addFailToSendNoConnectRule(transportServices.get(3)); - transportServices.get(0).addFailToSendNoConnectRule(transportServices.get(4)); - transportServices.get(1).addFailToSendNoConnectRule(transportServices.get(3)); - transportServices.get(1).addFailToSendNoConnectRule(transportServices.get(4)); - transportServices.get(3).addFailToSendNoConnectRule(transportServices.get(0)); - transportServices.get(3).addFailToSendNoConnectRule(transportServices.get(1)); - transportServices.get(4).addFailToSendNoConnectRule(transportServices.get(0)); - transportServices.get(4).addFailToSendNoConnectRule(transportServices.get(1)); - - // Nodes 3 and 4 will bootstrap, but we want to keep node 2 as part of the Zen1 cluster, so prevent any messages that might switch - // its allegiance - transportServices.get(3).addFailToSendNoConnectRule(transportServices.get(2), - PUBLISH_STATE_ACTION_NAME, FOLLOWER_CHECK_ACTION_NAME, START_JOIN_ACTION_NAME); - transportServices.get(4).addFailToSendNoConnectRule(transportServices.get(2), - PUBLISH_STATE_ACTION_NAME, FOLLOWER_CHECK_ACTION_NAME, START_JOIN_ACTION_NAME); - - logger.info("--> waiting for disconnected nodes to be removed"); - ensureStableCluster(3, nodes.get(0)); - - logger.info("--> creating index on Zen1 side"); - assertAcked(client(nodes.get(0)).admin().indices().create(new CreateIndexRequest("test")).get()); - assertFalse(client(nodes.get(0)).admin().cluster().health(new ClusterHealthRequest("test") - .waitForGreenStatus()).get().isTimedOut()); - - logger.info("--> waiting for disconnected nodes to bootstrap themselves"); - assertBusy(() -> assertTrue(IntStream.range(3, 5) - .mapToObj(n -> (Coordinator) internalCluster().getInstance(Discovery.class, nodes.get(n))) - .anyMatch(Coordinator::isInitialConfigurationSet))); - - logger.info("--> clearing disruption and waiting for cluster to reform"); - transportServices.forEach(MockTransportService::clearAllRules); - - ensureStableCluster(5, nodes.get(0)); - assertFalse(client(nodes.get(0)).admin().cluster().health(new ClusterHealthRequest("test") - .waitForGreenStatus()).get().isTimedOut()); - } - - public void testMixedClusterFormation() throws Exception { - final int zen1NodeCount = randomIntBetween(1, 3); - final int zen2NodeCount = randomIntBetween(zen1NodeCount == 1 ? 2 : 1, 3); - logger.info("starting cluster of [{}] Zen1 nodes and [{}] Zen2 nodes", zen1NodeCount, zen2NodeCount); - final List nodes = internalCluster().startNodes(IntStream.range(0, zen1NodeCount + zen2NodeCount) - .mapToObj(i -> i < zen1NodeCount ? ZEN1_SETTINGS : ZEN2_SETTINGS).toArray(Settings[]::new)); - - createIndex("test", - Settings.builder() - .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.ZERO) // assign shards - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, zen1NodeCount + zen2NodeCount + randomIntBetween(0, 2)) // causes rebalancing - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) - .build()); - ensureGreen("test"); - - for (final String node : nodes) { - // With 1 Zen1 node when you stop the Zen1 node the Zen2 nodes might auto-bootstrap. - // But there are only 2 Zen2 nodes so you must do the right things with voting config exclusions to keep the cluster - // alive through the other two restarts. - final boolean masterNodeIsZen2 = zen1NodeCount <= nodes.indexOf(internalCluster().getMasterName()); - final boolean thisNodeIsZen2 = zen1NodeCount <= nodes.indexOf(node); - final boolean requiresVotingConfigExclusions = zen1NodeCount == 1 && zen2NodeCount == 2 && masterNodeIsZen2 && thisNodeIsZen2; - - if (requiresVotingConfigExclusions) { - client().execute(AddVotingConfigExclusionsAction.INSTANCE, - new AddVotingConfigExclusionsRequest(new String[]{node})).get(); - } - - internalCluster().restartNode(node, new RestartCallback() { - @Override - public Settings onNodeStopped(String restartingNode) { - String viaNode = randomValueOtherThan(restartingNode, () -> randomFrom(nodes)); - final ClusterHealthRequestBuilder clusterHealthRequestBuilder = client(viaNode).admin().cluster().prepareHealth() - .setWaitForEvents(Priority.LANGUID) - .setWaitForNodes(Integer.toString(zen1NodeCount + zen2NodeCount - 1)) - .setTimeout(TimeValue.timeValueSeconds(30)); - ClusterHealthResponse clusterHealthResponse = clusterHealthRequestBuilder.get(); - assertFalse(restartingNode, clusterHealthResponse.isTimedOut()); - return Settings.EMPTY; - } - }); - ensureStableCluster(zen1NodeCount + zen2NodeCount); - ensureGreen("test"); - - if (requiresVotingConfigExclusions) { - final ClearVotingConfigExclusionsRequest clearVotingTombstonesRequest = new ClearVotingConfigExclusionsRequest(); - clearVotingTombstonesRequest.setWaitForRemoval(false); - client().execute(ClearVotingConfigExclusionsAction.INSTANCE, clearVotingTombstonesRequest).get(); - } - } - } - - public void testRollingMigrationFromZen1ToZen2() throws Exception { - final int nodeCount = randomIntBetween(2, 5); - final List zen1Nodes = internalCluster().startNodes(nodeCount, ZEN1_SETTINGS); - - createIndex("test", - Settings.builder() - .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.ZERO) // assign shards - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, nodeCount) // causes rebalancing - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) - .build()); - ensureGreen("test"); - - for (final String zen1Node : zen1Nodes) { - logger.info("--> shutting down {}", zen1Node); - internalCluster().stopRandomNode(s -> NODE_NAME_SETTING.get(s).equals(zen1Node)); - - ensureStableCluster(nodeCount - 1); - if (nodeCount > 2) { - ensureGreen("test"); - } else { - ensureYellow("test"); - } - - logger.info("--> starting replacement for {}", zen1Node); - final String newNode = internalCluster().startNode(ZEN2_SETTINGS); - ensureStableCluster(nodeCount); - ensureGreen("test"); - logger.info("--> successfully replaced {} with {}", zen1Node, newNode); - } - - assertThat(internalCluster().size(), equalTo(nodeCount)); - } - - public void testRollingUpgradeFromZen1ToZen2() throws Exception { - final int nodeCount = randomIntBetween(2, 5); - final List nodes = internalCluster().startNodes(nodeCount, ZEN1_SETTINGS); - - createIndex("test", - Settings.builder() - .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.ZERO) // assign shards - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, nodeCount) // causes rebalancing - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) - .build()); - ensureGreen("test"); - - internalCluster().rollingRestart(new RestartCallback() { - @Override - public void doAfterNodes(int n, Client client) { - ensureGreen("test"); - } - - @Override - public Settings onNodeStopped(String nodeName) { - String viaNode = randomValueOtherThan(nodeName, () -> randomFrom(nodes)); - final ClusterHealthRequestBuilder clusterHealthRequestBuilder = client(viaNode).admin().cluster().prepareHealth() - .setWaitForEvents(Priority.LANGUID) - .setWaitForNodes(Integer.toString(nodeCount - 1)) - .setTimeout(TimeValue.timeValueSeconds(30)); - if (nodeCount == 2) { - clusterHealthRequestBuilder.setWaitForYellowStatus(); - } else { - clusterHealthRequestBuilder.setWaitForGreenStatus(); - } - ClusterHealthResponse clusterHealthResponse = clusterHealthRequestBuilder.get(); - assertFalse(nodeName, clusterHealthResponse.isTimedOut()); - return Coordinator.addZen1Attribute(false, Settings.builder().put(ZEN2_SETTINGS) - .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), REMOVED_MINIMUM_MASTER_NODES)).build(); - } - }); - - ensureStableCluster(nodeCount); - ensureGreen("test"); - assertThat(internalCluster().size(), equalTo(nodeCount)); - } - - private void testMultipleNodeMigrationFromZen1ToZen2(int nodeCount) throws Exception { - final List oldNodes = internalCluster().startNodes(nodeCount, ZEN1_SETTINGS); - createIndex("test", - Settings.builder() - .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.ZERO) // assign shards - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, nodeCount) // causes rebalancing - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, nodeCount > 1 ? 1 : 0) - .build()); - ensureGreen("test"); - - internalCluster().startNodes(nodeCount, ZEN2_SETTINGS); - - logger.info("--> updating settings to exclude old nodes"); - client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder() - .put(CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey(), String.join(",", oldNodes))).get(); - - logger.info("--> waiting for old nodes to be vacated"); - waitForRelocation(); - - while (internalCluster().size() > nodeCount) { - internalCluster().stopRandomNode(settings -> oldNodes.contains(NODE_NAME_SETTING.get(settings))); - } - - ensureGreen("test"); - } - - public void testMultipleNodeMigrationFromZen1ToZen2WithOneNode() throws Exception { - testMultipleNodeMigrationFromZen1ToZen2(1); - } - - public void testMultipleNodeMigrationFromZen1ToZen2WithTwoNodes() throws Exception { - testMultipleNodeMigrationFromZen1ToZen2(2); - } - - public void testMultipleNodeMigrationFromZen1ToZen2WithThreeNodes() throws Exception { - testMultipleNodeMigrationFromZen1ToZen2(3); - } - - public void testFreshestMasterElectedAfterFullClusterRestart() throws Exception { - final List nodeNames = internalCluster().startNodes(3, ZEN1_SETTINGS); - - assertTrue(client().admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder() - .put(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), Allocation.ALL)).get().isAcknowledged()); - - final List nodeEnvironments - = StreamSupport.stream(internalCluster().getDataOrMasterNodeInstances(NodeEnvironment.class).spliterator(), false) - .collect(Collectors.toList()); - - final boolean randomiseVersions = rarely(); - - internalCluster().fullRestart(new RestartCallback() { - int nodesStopped; - - @Override - public Settings onNodeStopped(String nodeName) throws Exception { - nodesStopped += 1; - - if (nodesStopped == 1) { - final Client client = internalCluster().client(randomValueOtherThan(nodeName, () -> randomFrom(nodeNames))); - - assertFalse(client.admin().cluster().health(Requests.clusterHealthRequest() - .waitForEvents(Priority.LANGUID) - .waitForNoRelocatingShards(true) - .waitForNodes("2")).actionGet().isTimedOut()); - - assertTrue(client.admin().cluster().prepareUpdateSettings().setPersistentSettings(Settings.builder() - .put(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), Allocation.NONE)).get().isAcknowledged()); - } - - if (nodesStopped == nodeNames.size()) { - for (final NodeEnvironment nodeEnvironment : nodeEnvironments) { - // The versions written by nodes following a Zen1 master cannot be trusted. Randomise them to demonstrate they are - // not important. - final MetaStateService metaStateService = new MetaStateService(nodeEnvironment, xContentRegistry()); - final Manifest manifest = metaStateService.loadManifestOrEmpty(); - assertThat(manifest.getCurrentTerm(), is(ZEN1_BWC_TERM)); - final long newVersion = randomiseVersions ? randomNonNegativeLong() : 0L; - metaStateService.writeManifestAndCleanup("altering version to " + newVersion, - new Manifest(manifest.getCurrentTerm(), newVersion, manifest.getGlobalGeneration(), - manifest.getIndexGenerations())); - } - } - - return Coordinator.addZen1Attribute(false, Settings.builder()) - .put(ZEN2_SETTINGS) - .putList(INITIAL_MASTER_NODES_SETTING.getKey(), nodeNames) - .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), REMOVED_MINIMUM_MASTER_NODES) - .build(); - } - }); - - assertFalse(client().admin().cluster().health(Requests.clusterHealthRequest() - .waitForEvents(Priority.LANGUID) - .waitForNoRelocatingShards(true) - .waitForNodes("3")).actionGet().isTimedOut()); - - assertThat(CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.get( - client().admin().cluster().state(new ClusterStateRequest()).get().getState().metaData().settings()), - equalTo(Allocation.NONE)); - } -} diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java index c1e341fd5bc2f..827680cca1b71 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeServiceTests.java @@ -89,7 +89,10 @@ public void testFailUpgrade() { new MapperRegistry(Collections.emptyMap(), Collections.emptyMap(), MapperPlugin.NOOP_FIELD_FILTER), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, Collections.emptyList()); Version minCompat = Version.CURRENT.minimumIndexCompatibilityVersion(); - Version indexUpgraded = VersionUtils.randomVersionBetween(random(), minCompat, VersionUtils.getPreviousVersion(Version.CURRENT)); + Version indexUpgraded = VersionUtils.randomVersionBetween(random(), + minCompat, + Version.max(minCompat, VersionUtils.getPreviousVersion(Version.CURRENT)) + ); Version indexCreated = Version.fromString((minCompat.major - 1) + "." + randomInt(5) + "." + randomInt(5)); final IndexMetaData metaData = newIndexMeta("foo", Settings.builder() .put(IndexMetaData.SETTING_VERSION_UPGRADED, indexUpgraded) diff --git a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java index 13dadd051c273..a2880c0d330ad 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/RetentionLeaseIT.java @@ -260,6 +260,7 @@ public void testBackgroundRetentionLeaseSync() throws Exception { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/38588") public void testRetentionLeasesSyncOnRecovery() throws Exception { final int numberOfReplicas = 2 - scaledRandomIntBetween(0, 2); internalCluster().ensureAtLeastNumDataNodes(1 + numberOfReplicas); diff --git a/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandlerTests.java b/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandlerTests.java deleted file mode 100644 index b9da177b0b8fa..0000000000000 --- a/server/src/test/java/org/elasticsearch/rest/action/admin/indices/RestResizeHandlerTests.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.rest.action.admin.indices; - -import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.rest.RestController; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.rest.FakeRestRequest; - -import java.io.IOException; -import java.util.Collections; -import java.util.Locale; - -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.hasToString; -import static org.mockito.Mockito.mock; - -public class RestResizeHandlerTests extends ESTestCase { - - public void testShrinkCopySettingsDeprecated() throws IOException { - final RestResizeHandler.RestShrinkIndexAction handler = - new RestResizeHandler.RestShrinkIndexAction(Settings.EMPTY, mock(RestController.class)); - for (final String copySettings : new String[]{null, "", "true", "false"}) { - runTestResizeCopySettingsDeprecated(handler, "shrink", copySettings); - } - } - - public void testSplitCopySettingsDeprecated() throws IOException { - final RestResizeHandler.RestSplitIndexAction handler = - new RestResizeHandler.RestSplitIndexAction(Settings.EMPTY, mock(RestController.class)); - for (final String copySettings : new String[]{null, "", "true", "false"}) { - runTestResizeCopySettingsDeprecated(handler, "split", copySettings); - } - } - - private void runTestResizeCopySettingsDeprecated( - final RestResizeHandler handler, final String resizeOperation, final String copySettings) throws IOException { - final FakeRestRequest.Builder builder = - new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY) - .withParams(Collections.singletonMap("copy_settings", copySettings)) - .withPath(String.format(Locale.ROOT, "source/_%s/target", resizeOperation)); - if (copySettings != null) { - builder.withParams(Collections.singletonMap("copy_settings", copySettings)); - } - final FakeRestRequest request = builder.build(); - if ("false".equals(copySettings)) { - final IllegalArgumentException e = - expectThrows(IllegalArgumentException.class, () -> handler.prepareRequest(request, mock(NodeClient.class))); - assertThat(e, hasToString(containsString("parameter [copy_settings] can not be explicitly set to [false]"))); - } else { - handler.prepareRequest(request, mock(NodeClient.class)); - if ("".equals(copySettings) || "true".equals(copySettings)) { - assertWarnings("parameter [copy_settings] is deprecated and will be removed in 8.0.0"); - } - } - } - -} diff --git a/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java b/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java index 84c480b8d510b..f775f9f5b0121 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/VersionUtils.java @@ -52,7 +52,7 @@ static Tuple, List> resolveReleasedVersions(Version curre // this breaks b/c 5.x is still in version list but master doesn't care about it! //assert majorVersions.size() == 2; // TODO: remove oldVersions, we should only ever have 2 majors in Version - List oldVersions = majorVersions.getOrDefault((int)current.major - 2, Collections.emptyList()); + List> oldVersions = splitByMinor(majorVersions.getOrDefault((int)current.major - 2, Collections.emptyList())); List> previousMajor = splitByMinor(majorVersions.get((int)current.major - 1)); List> currentMajor = splitByMinor(majorVersions.get((int)current.major)); @@ -78,12 +78,21 @@ static Tuple, List> resolveReleasedVersions(Version curre moveLastToUnreleased(stableVersions, unreleasedVersions); } // remove the next bugfix - moveLastToUnreleased(stableVersions, unreleasedVersions); + if (stableVersions.isEmpty() == false) { + moveLastToUnreleased(stableVersions, unreleasedVersions); + } } - List releasedVersions = Stream.concat(oldVersions.stream(), - Stream.concat(previousMajor.stream(), currentMajor.stream()).flatMap(List::stream)) - .collect(Collectors.toList()); + // If none of the previous major was released, then the last minor and bugfix of the old version was not released either. + if (previousMajor.isEmpty()) { + assert currentMajor.isEmpty() : currentMajor; + // minor of the old version is being staged + moveLastToUnreleased(oldVersions, unreleasedVersions); + // bugix of the old version is also being staged + moveLastToUnreleased(oldVersions, unreleasedVersions); + } + List releasedVersions = Stream.of(oldVersions, previousMajor, currentMajor) + .flatMap(List::stream).flatMap(List::stream).collect(Collectors.toList()); Collections.sort(unreleasedVersions); // we add unreleased out of order, so need to sort here return new Tuple<>(Collections.unmodifiableList(releasedVersions), Collections.unmodifiableList(unreleasedVersions)); } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 87343d4c82087..db00ed028d4e2 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -2387,7 +2387,7 @@ public String executor() { assertEquals(1, transportStats.getRxCount()); assertEquals(1, transportStats.getTxCount()); assertEquals(25, transportStats.getRxSize().getBytes()); - assertEquals(50, transportStats.getTxSize().getBytes()); + assertEquals(51, transportStats.getTxSize().getBytes()); }); serviceC.sendRequest(connection, "internal:action", new TestRequest("hello world"), TransportRequestOptions.EMPTY, transportResponseHandler); @@ -2397,7 +2397,7 @@ public String executor() { assertEquals(1, transportStats.getRxCount()); assertEquals(2, transportStats.getTxCount()); assertEquals(25, transportStats.getRxSize().getBytes()); - assertEquals(106, transportStats.getTxSize().getBytes()); + assertEquals(107, transportStats.getTxSize().getBytes()); }); sendResponseLatch.countDown(); responseLatch.await(); @@ -2405,7 +2405,7 @@ public String executor() { assertEquals(2, stats.getRxCount()); assertEquals(2, stats.getTxCount()); assertEquals(46, stats.getRxSize().getBytes()); - assertEquals(106, stats.getTxSize().getBytes()); + assertEquals(107, stats.getTxSize().getBytes()); } finally { serviceC.close(); } @@ -2502,7 +2502,7 @@ public String executor() { assertEquals(1, transportStats.getRxCount()); assertEquals(1, transportStats.getTxCount()); assertEquals(25, transportStats.getRxSize().getBytes()); - assertEquals(50, transportStats.getTxSize().getBytes()); + assertEquals(51, transportStats.getTxSize().getBytes()); }); serviceC.sendRequest(connection, "internal:action", new TestRequest("hello world"), TransportRequestOptions.EMPTY, transportResponseHandler); @@ -2512,7 +2512,7 @@ public String executor() { assertEquals(1, transportStats.getRxCount()); assertEquals(2, transportStats.getTxCount()); assertEquals(25, transportStats.getRxSize().getBytes()); - assertEquals(106, transportStats.getTxSize().getBytes()); + assertEquals(107, transportStats.getTxSize().getBytes()); }); sendResponseLatch.countDown(); responseLatch.await(); @@ -2527,7 +2527,7 @@ public String executor() { // 49 bytes are the non-exception message bytes that have been received. It should include the initial // handshake message and the header, version, etc bytes in the exception message. assertEquals(failedMessage, 49 + streamOutput.bytes().length(), stats.getRxSize().getBytes()); - assertEquals(106, stats.getTxSize().getBytes()); + assertEquals(107, stats.getTxSize().getBytes()); } finally { serviceC.close(); } diff --git a/test/framework/src/main/java/org/elasticsearch/upgrades/AbstractFullClusterRestartTestCase.java b/test/framework/src/main/java/org/elasticsearch/upgrades/AbstractFullClusterRestartTestCase.java index 508949b561fb4..18215b6797ec1 100644 --- a/test/framework/src/main/java/org/elasticsearch/upgrades/AbstractFullClusterRestartTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/upgrades/AbstractFullClusterRestartTestCase.java @@ -20,49 +20,23 @@ package org.elasticsearch.upgrades; import org.elasticsearch.Version; -import org.elasticsearch.client.Request; import org.elasticsearch.common.Booleans; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.test.rest.ESRestTestCase; -import org.junit.Before; - -import java.io.IOException; - -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.hamcrest.Matchers.equalTo; public abstract class AbstractFullClusterRestartTestCase extends ESRestTestCase { private final boolean runningAgainstOldCluster = Booleans.parseBoolean(System.getProperty("tests.is_old_cluster")); - @Before - public void init() throws IOException { - assertThat("we don't need this branch if we aren't compatible with 6.0", - Version.CURRENT.minimumIndexCompatibilityVersion().onOrBefore(Version.V_6_0_0), equalTo(true)); - if (isRunningAgainstOldCluster() && getOldClusterVersion().before(Version.V_7_0_0)) { - XContentBuilder template = jsonBuilder(); - template.startObject(); - { - template.field("index_patterns", "*"); - template.field("order", "0"); - template.startObject("settings"); - template.field("number_of_shards", 5); - template.endObject(); - } - template.endObject(); - Request createTemplate = new Request("PUT", "/_template/template"); - createTemplate.setJsonEntity(Strings.toString(template)); - client().performRequest(createTemplate); - } - } - public final boolean isRunningAgainstOldCluster() { return runningAgainstOldCluster; } private final Version oldClusterVersion = Version.fromString(System.getProperty("tests.old_cluster_version")); + public final boolean isRunningAgainstAncientCluster() { + return isRunningAgainstOldCluster() && oldClusterVersion.before(Version.V_7_0_0); + } + public final Version getOldClusterVersion() { return oldClusterVersion; } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/GetWatchResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/GetWatchResponseTests.java index 9b71079a6e5e7..4cc566adf31bb 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/GetWatchResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/watcher/GetWatchResponseTests.java @@ -6,6 +6,7 @@ package org.elasticsearch.protocol.xpack.watcher; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; @@ -23,9 +24,6 @@ import java.io.IOException; import java.io.InputStream; -import java.time.Clock; -import java.time.Instant; -import java.time.ZoneOffset; import java.time.ZonedDateTime; import java.util.Collections; import java.util.HashMap; @@ -126,15 +124,15 @@ private static BytesReference simpleWatch() { private static WatchStatus randomWatchStatus() { long version = randomLongBetween(-1, Long.MAX_VALUE); - WatchStatus.State state = new WatchStatus.State(randomBoolean(), nowWithMillisResolution()); + WatchStatus.State state = new WatchStatus.State(randomBoolean(), DateUtils.nowWithMillisResolution()); ExecutionState executionState = randomFrom(ExecutionState.values()); - ZonedDateTime lastChecked = rarely() ? null : nowWithMillisResolution(); - ZonedDateTime lastMetCondition = rarely() ? null : nowWithMillisResolution(); + ZonedDateTime lastChecked = rarely() ? null : DateUtils.nowWithMillisResolution(); + ZonedDateTime lastMetCondition = rarely() ? null : DateUtils.nowWithMillisResolution(); int size = randomIntBetween(0, 5); Map actionMap = new HashMap<>(); for (int i = 0; i < size; i++) { ActionStatus.AckStatus ack = new ActionStatus.AckStatus( - nowWithMillisResolution(), + DateUtils.nowWithMillisResolution(), randomFrom(ActionStatus.AckStatus.State.values()) ); ActionStatus actionStatus = new ActionStatus( @@ -154,16 +152,16 @@ private static WatchStatus randomWatchStatus() { } private static ActionStatus.Throttle randomThrottle() { - return new ActionStatus.Throttle(nowWithMillisResolution(), randomAlphaOfLengthBetween(10, 20)); + return new ActionStatus.Throttle(DateUtils.nowWithMillisResolution(), randomAlphaOfLengthBetween(10, 20)); } private static ActionStatus.Execution randomExecution() { if (randomBoolean()) { return null; } else if (randomBoolean()) { - return ActionStatus.Execution.failure(nowWithMillisResolution(), randomAlphaOfLengthBetween(10, 20)); + return ActionStatus.Execution.failure(DateUtils.nowWithMillisResolution(), randomAlphaOfLengthBetween(10, 20)); } else { - return ActionStatus.Execution.successful(nowWithMillisResolution()); + return ActionStatus.Execution.successful(DateUtils.nowWithMillisResolution()); } } @@ -229,8 +227,4 @@ private static ActionStatus.Execution convertHlrcToInternal(org.elasticsearch.cl private static ActionStatus.Throttle convertHlrcToInternal(org.elasticsearch.client.watcher.ActionStatus.Throttle throttle) { return new ActionStatus.Throttle(throttle.timestamp(), throttle.reason()); } - - private static ZonedDateTime nowWithMillisResolution() { - return Instant.ofEpochMilli(Clock.systemUTC().millis()).atZone(ZoneOffset.UTC); - } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/calendars/ScheduledEventTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/calendars/ScheduledEventTests.java index 6508ee5cb2054..9555d364a7bb1 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/calendars/ScheduledEventTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/calendars/ScheduledEventTests.java @@ -7,6 +7,7 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; @@ -17,9 +18,6 @@ import org.elasticsearch.xpack.core.ml.job.config.RuleCondition; import java.io.IOException; -import java.time.Clock; -import java.time.Instant; -import java.time.ZoneOffset; import java.time.ZonedDateTime; import java.util.EnumSet; import java.util.List; @@ -29,7 +27,7 @@ public class ScheduledEventTests extends AbstractSerializingTestCase { public static ScheduledEvent createScheduledEvent(String calendarId) { - ZonedDateTime start = nowWithMillisResolution(); + ZonedDateTime start = DateUtils.nowWithMillisResolution(); return new ScheduledEvent(randomAlphaOfLength(10), start, start.plusSeconds(randomIntBetween(1, 10000)), calendarId, null); } @@ -120,8 +118,4 @@ public void testLenientParser() throws IOException { ScheduledEvent.LENIENT_PARSER.apply(parser, null); } } - - private static ZonedDateTime nowWithMillisResolution() { - return Instant.ofEpochMilli(Clock.systemUTC().millis()).atZone(ZoneOffset.UTC); - } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java index 69e008f60c696..62cb04f035318 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyIntegTests.java @@ -237,6 +237,7 @@ public void testInvalidateApiKeysForApiKeyName() throws InterruptedException, Ex verifyInvalidateResponse(1, responses, invalidateResponse); } + @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/38408") public void testGetAndInvalidateApiKeysWithExpiredAndInvalidatedApiKey() throws Exception { List responses = createApiKeys(1, null); Instant created = Instant.now(); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/TestUtils.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/TestUtils.java index be7f42d3f0c78..9b8fbdef43a77 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/TestUtils.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/TestUtils.java @@ -11,10 +11,6 @@ import org.elasticsearch.xpack.sql.session.Configuration; import org.elasticsearch.xpack.sql.util.DateUtils; -import java.time.Clock; -import java.time.Duration; -import java.time.ZonedDateTime; - public class TestUtils { private TestUtils() {} @@ -23,16 +19,4 @@ private TestUtils() {} Protocol.REQUEST_TIMEOUT, Protocol.PAGE_TIMEOUT, null, Mode.PLAIN, null, null, null); - /** - * Returns the current UTC date-time with milliseconds precision. - * In Java 9+ (as opposed to Java 8) the {@code Clock} implementation uses system's best clock implementation (which could mean - * that the precision of the clock can be milliseconds, microseconds or nanoseconds), whereas in Java 8 - * {@code System.currentTimeMillis()} is always used. To account for these differences, this method defines a new {@code Clock} - * which will offer a value for {@code ZonedDateTime.now()} set to always have milliseconds precision. - * - * @return {@link ZonedDateTime} instance for the current date-time with milliseconds precision in UTC - */ - public static final ZonedDateTime now() { - return ZonedDateTime.now(Clock.tick(Clock.system(DateUtils.UTC), Duration.ofMillis(1))); - } } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java index 73b4ea8fa8daa..686c97b91a06b 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java @@ -7,7 +7,6 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.SqlIllegalArgumentException; -import org.elasticsearch.xpack.sql.TestUtils; import org.elasticsearch.xpack.sql.expression.Literal; import org.elasticsearch.xpack.sql.tree.Location; import org.elasticsearch.xpack.sql.tree.Source; @@ -157,7 +156,8 @@ public void testConversionToDate() { assertEquals(date(18000000L), conversion.convert("1970-01-01T03:10:20-05:00")); // double check back and forth conversion - ZonedDateTime zdt = TestUtils.now(); + + ZonedDateTime zdt = org.elasticsearch.common.time.DateUtils.nowWithMillisResolution(); Conversion forward = conversionFor(DATE, KEYWORD); Conversion back = conversionFor(KEYWORD, DATE); assertEquals(DateUtils.asDateOnly(zdt), back.convert(forward.convert(zdt))); @@ -205,7 +205,8 @@ public void testConversionToDateTime() { assertEquals(dateTime(18000000L), conversion.convert("1970-01-01T00:00:00-05:00")); // double check back and forth conversion - ZonedDateTime dt = TestUtils.now(); + + ZonedDateTime dt = org.elasticsearch.common.time.DateUtils.nowWithMillisResolution(); Conversion forward = conversionFor(DATETIME, KEYWORD); Conversion back = conversionFor(KEYWORD, DATETIME); assertEquals(dt, back.convert(forward.convert(dt))); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherIndexingListener.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherIndexingListener.java index 48e8dd7813e19..81fda0c0bd47c 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherIndexingListener.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherIndexingListener.java @@ -32,7 +32,6 @@ import java.io.IOException; import java.time.Clock; -import java.time.Instant; import java.time.ZoneOffset; import java.time.ZonedDateTime; import java.util.ArrayList; @@ -102,7 +101,7 @@ void setConfiguration(Configuration configuration) { @Override public Engine.Index preIndex(ShardId shardId, Engine.Index operation) { if (isWatchDocument(shardId.getIndexName(), operation.type())) { - ZonedDateTime now = Instant.ofEpochMilli(clock.millis()).atZone(ZoneOffset.UTC); + ZonedDateTime now = clock.instant().atZone(ZoneOffset.UTC); try { Watch watch = parser.parseWithSecrets(operation.id(), true, operation.source(), now, XContentType.JSON, operation.getIfSeqNo(), operation.getIfPrimaryTerm()); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/index/IndexActionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/index/IndexActionTests.java index da702e01047bd..1864bd3bd5cce 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/index/IndexActionTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/index/IndexActionTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -36,6 +37,7 @@ import java.time.ZoneOffset; import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -285,7 +287,7 @@ public void testIndexActionExecuteSingleDoc() throws Exception { refreshPolicy); ExecutableIndexAction executable = new ExecutableIndexAction(action, logger, client, TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(30)); - ZonedDateTime executionTime = ZonedDateTime.now(ZoneOffset.UTC); + ZonedDateTime executionTime = DateUtils.nowWithMillisResolution(); Payload payload; if (customId && docIdAsParam == false) { @@ -324,8 +326,9 @@ public void testIndexActionExecuteSingleDoc() throws Exception { assertThat(indexRequest.getRefreshPolicy(), is(expectedRefreshPolicy)); if (timestampField != null) { + final DateTimeFormatter formatter = DateTimeFormatter.ISO_DATE_TIME; assertThat(indexRequest.sourceAsMap().keySet(), is(hasSize(2))); - assertThat(indexRequest.sourceAsMap(), hasEntry(timestampField, executionTime.toString())); + assertThat(indexRequest.sourceAsMap(), hasEntry(timestampField, formatter.format(executionTime))); } else { assertThat(indexRequest.sourceAsMap().keySet(), is(hasSize(1))); } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchStatusIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchStatusIntegrationTests.java index 9f738d8daa6b2..20e27bd8b8d15 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchStatusIntegrationTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchStatusIntegrationTests.java @@ -24,6 +24,7 @@ public class WatchStatusIntegrationTests extends AbstractWatcherIntegrationTestCase { + @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/38619") public void testThatStatusGetsUpdated() { WatcherClient watcherClient = watcherClient(); watcherClient.preparePutWatch("_name") diff --git a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java index e4d96645b87b7..c7bea7b108804 100644 --- a/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java +++ b/x-pack/qa/full-cluster-restart/src/test/java/org/elasticsearch/xpack/restart/FullClusterRestartIT.java @@ -32,6 +32,7 @@ import org.elasticsearch.xpack.watcher.trigger.schedule.IntervalSchedule; import org.elasticsearch.xpack.watcher.trigger.schedule.ScheduleTrigger; import org.hamcrest.Matcher; +import org.junit.Before; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -60,6 +61,13 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase { + private String type; + + @Before + public void setType() { + type = getOldClusterVersion().before(Version.V_6_7_0) ? "doc" : "_doc"; + } + @Override protected Settings restClientSettings() { String token = "Basic " + Base64.getEncoder().encodeToString("test_user:x-pack-test-password".getBytes(StandardCharsets.UTF_8)); @@ -76,7 +84,7 @@ protected Settings restClientSettings() { * Tests that a single document survives. Super basic smoke test. */ public void testSingleDoc() throws IOException { - String docLocation = "/testsingledoc/doc/1"; + String docLocation = "/testsingledoc/" + type + "/1"; String doc = "{\"test\": \"test\"}"; if (isRunningAgainstOldCluster()) { @@ -87,7 +95,9 @@ public void testSingleDoc() throws IOException { } Request getRequest = new Request("GET", docLocation); - getRequest.setOptions(expectWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE)); + if (getOldClusterVersion().before(Version.V_6_7_0)) { + getRequest.setOptions(expectWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE)); + } assertThat(toStr(client().performRequest(getRequest)), containsString(doc)); } @@ -148,17 +158,21 @@ public void testSecurityNativeRealm() throws Exception { public void testWatcher() throws Exception { if (isRunningAgainstOldCluster()) { logger.info("Adding a watch on old cluster {}", getOldClusterVersion()); - Request createBwcWatch = new Request("PUT", "/_xpack/watcher/watch/bwc_watch"); + Request createBwcWatch = new Request("PUT", getWatcherEndpoint() + "/watch/bwc_watch"); + Request createBwcThrottlePeriod = new Request("PUT", getWatcherEndpoint() + "/watch/bwc_throttle_period"); + if (getOldClusterVersion().onOrAfter(Version.V_7_0_0)) { + createBwcWatch.setOptions(expectWarnings(IndexAction.TYPES_DEPRECATION_MESSAGE)); + createBwcThrottlePeriod.setOptions(expectWarnings(IndexAction.TYPES_DEPRECATION_MESSAGE)); + } createBwcWatch.setJsonEntity(loadWatch("simple-watch.json")); client().performRequest(createBwcWatch); logger.info("Adding a watch with \"fun\" throttle periods on old cluster"); - Request createBwcThrottlePeriod = new Request("PUT", "_xpack/watcher/watch/bwc_throttle_period"); createBwcThrottlePeriod.setJsonEntity(loadWatch("throttle-period-watch.json")); client().performRequest(createBwcThrottlePeriod); logger.info("Adding a watch with \"fun\" read timeout on old cluster"); - Request createFunnyTimeout = new Request("PUT", "_xpack/watcher/watch/bwc_funny_timeout"); + Request createFunnyTimeout = new Request("PUT", getWatcherEndpoint() + "/watch/bwc_funny_timeout"); createFunnyTimeout.setJsonEntity(loadWatch("funny-timeout-watch.json")); client().performRequest(createFunnyTimeout); @@ -246,7 +260,11 @@ public void testRollupAfterRestart() throws Exception { // index documents for the rollup job final StringBuilder bulk = new StringBuilder(); for (int i = 0; i < numDocs; i++) { - bulk.append("{\"index\":{\"_index\":\"rollup-docs\",\"_type\":\"doc\"}}\n"); + if (getOldClusterVersion().onOrAfter(Version.V_7_0_0)) { + bulk.append("{\"index\":{\"_index\":\"rollup-docs\"}}\n"); + } else { + bulk.append("{\"index\":{\"_index\":\"rollup-docs\",\"_type\":\"doc\"}}\n"); + } String date = String.format(Locale.ROOT, "%04d-01-01T00:%02d:00Z", year, i); bulk.append("{\"timestamp\":\"").append(date).append("\",\"value\":").append(i).append("}\n"); } @@ -257,7 +275,8 @@ public void testRollupAfterRestart() throws Exception { client().performRequest(bulkRequest); // create the rollup job - final Request createRollupJobRequest = new Request("PUT", "/_xpack/rollup/job/rollup-job-test"); + final Request createRollupJobRequest = new Request("PUT", getRollupEndpoint() + "/job/rollup-job-test"); + createRollupJobRequest.setJsonEntity("{" + "\"index_pattern\":\"rollup-*\"," + "\"rollup_index\":\"results-rollup\"," @@ -278,7 +297,7 @@ public void testRollupAfterRestart() throws Exception { assertThat(createRollupJobResponse.get("acknowledged"), equalTo(Boolean.TRUE)); // start the rollup job - final Request startRollupJobRequest = new Request("POST", "/_xpack/rollup/job/rollup-job-test/_start"); + final Request startRollupJobRequest = new Request("POST", getRollupEndpoint() + "/job/rollup-job-test/_start"); Map startRollupJobResponse = entityAsMap(client().performRequest(startRollupJobRequest)); assertThat(startRollupJobResponse.get("started"), equalTo(Boolean.TRUE)); @@ -304,12 +323,12 @@ public void testRollupIDSchemeAfterRestart() throws Exception { assumeTrue("Rollup ID scheme changed in 6.4", getOldClusterVersion().before(Version.V_6_4_0)); if (isRunningAgainstOldCluster()) { - final Request indexRequest = new Request("POST", "/id-test-rollup/doc/1"); + final Request indexRequest = new Request("POST", "/id-test-rollup" + type + "/1"); indexRequest.setJsonEntity("{\"timestamp\":\"2018-01-01T00:00:01\",\"value\":123}"); client().performRequest(indexRequest); // create the rollup job - final Request createRollupJobRequest = new Request("PUT", "/_xpack/rollup/job/rollup-id-test"); + final Request createRollupJobRequest = new Request("PUT", getRollupEndpoint() + "/job/rollup-id-test"); createRollupJobRequest.setJsonEntity("{" + "\"index_pattern\":\"id-test-rollup\"," + "\"rollup_index\":\"id-test-results-rollup\"," @@ -337,7 +356,7 @@ public void testRollupIDSchemeAfterRestart() throws Exception { assertThat(createRollupJobResponse.get("acknowledged"), equalTo(Boolean.TRUE)); // start the rollup job - final Request startRollupJobRequest = new Request("POST", "/_xpack/rollup/job/rollup-id-test/_start"); + final Request startRollupJobRequest = new Request("POST", getRollupEndpoint() + "/job/rollup-id-test/_start"); Map startRollupJobResponse = entityAsMap(client().performRequest(startRollupJobRequest)); assertThat(startRollupJobResponse.get("started"), equalTo(Boolean.TRUE)); @@ -365,9 +384,11 @@ public void testRollupIDSchemeAfterRestart() throws Exception { } else { - final Request indexRequest = new Request("POST", "/id-test-rollup/doc/2"); + final Request indexRequest = new Request("POST", "/id-test-rollup/" + type + "/2"); indexRequest.setJsonEntity("{\"timestamp\":\"2018-01-02T00:00:01\",\"value\":345}"); - indexRequest.setOptions(expectWarnings(RestIndexAction.TYPES_DEPRECATION_MESSAGE)); + if (getOldClusterVersion().before(Version.V_6_7_0)) { + indexRequest.setOptions(expectWarnings(RestIndexAction.TYPES_DEPRECATION_MESSAGE)); + } client().performRequest(indexRequest); assertRollUpJob("rollup-id-test"); @@ -431,12 +452,8 @@ public void testSqlFailsOnIndexWithTwoTypes() throws IOException { client().performRequest(doc2); return; } - final Request sqlRequest; - if (isRunningAgainstOldCluster()) { - sqlRequest = new Request("POST", "/_xpack/sql"); - } else { - sqlRequest = new Request("POST", "/_sql"); - } + final Request sqlRequest = new Request("POST", getSQLEndpoint()); + sqlRequest.setJsonEntity("{\"query\":\"SELECT * FROM testsqlfailsonindexwithtwotypes\"}"); ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(sqlRequest)); assertEquals(400, e.getResponse().getStatusLine().getStatusCode()); @@ -458,8 +475,21 @@ private void assertOldTemplatesAreDeleted() throws IOException { private void assertWatchIndexContentsWork() throws Exception { // Fetch a basic watch Request getRequest = new Request("GET", "_watcher/watch/bwc_watch"); - getRequest.setOptions(expectWarnings(IndexAction.TYPES_DEPRECATION_MESSAGE, - WatcherSearchTemplateRequest.TYPES_DEPRECATION_MESSAGE)); + if (getOldClusterVersion().before(Version.V_7_0_0)) { + getRequest.setOptions( + expectWarnings( + IndexAction.TYPES_DEPRECATION_MESSAGE, + WatcherSearchTemplateRequest.TYPES_DEPRECATION_MESSAGE + ) + ); + } else { + getRequest.setOptions( + expectWarnings( + IndexAction.TYPES_DEPRECATION_MESSAGE + ) + ); + } + Map bwcWatch = entityAsMap(client().performRequest(getRequest)); logger.error("-----> {}", bwcWatch); @@ -475,8 +505,20 @@ private void assertWatchIndexContentsWork() throws Exception { // Fetch a watch with "fun" throttle periods getRequest = new Request("GET", "_watcher/watch/bwc_throttle_period"); - getRequest.setOptions(expectWarnings(IndexAction.TYPES_DEPRECATION_MESSAGE, - WatcherSearchTemplateRequest.TYPES_DEPRECATION_MESSAGE)); + if (getOldClusterVersion().before(Version.V_7_0_0)) { + getRequest.setOptions( + expectWarnings( + IndexAction.TYPES_DEPRECATION_MESSAGE, + WatcherSearchTemplateRequest.TYPES_DEPRECATION_MESSAGE + ) + ); + } else { + getRequest.setOptions( + expectWarnings( + IndexAction.TYPES_DEPRECATION_MESSAGE + ) + ); + } bwcWatch = entityAsMap(client().performRequest(getRequest)); assertThat(bwcWatch.get("found"), equalTo(true)); source = (Map) bwcWatch.get("watch"); @@ -560,7 +602,13 @@ private void waitForHits(String indexName, int expectedHits) throws Exception { try { Map response = entityAsMap(client().performRequest(request)); Map hits = (Map) response.get("hits"); - int total = (int) hits.get("total"); + logger.info("Hits are: {}", hits); + int total; + if (getOldClusterVersion().onOrAfter(Version.V_7_0_0) || isRunningAgainstOldCluster() == false) { + total = (int) ((Map) hits.get("total")).get("value"); + } else { + total = (int) hits.get("total"); + } assertThat(total, greaterThanOrEqualTo(expectedHits)); } catch (IOException ioe) { if (ioe instanceof ResponseException) { @@ -580,12 +628,7 @@ static String toStr(Response response) throws IOException { private void createUser(final boolean oldCluster) throws Exception { final String id = oldCluster ? "preupgrade_user" : "postupgrade_user"; - Request request; - if (oldCluster) { - request = new Request("PUT", "/_xpack/security/user/" + id); - } else { - request = new Request("PUT", "/_security/user/" + id); - } + Request request = new Request("PUT", getSecurityEndpoint() + "/user/" + id); request.setJsonEntity( "{\n" + " \"password\" : \"j@rV1s\",\n" + @@ -599,12 +642,7 @@ private void createUser(final boolean oldCluster) throws Exception { private void createRole(final boolean oldCluster) throws Exception { final String id = oldCluster ? "preupgrade_role" : "postupgrade_role"; - Request request; - if (oldCluster) { - request = new Request("PUT", "/_xpack/security/role/" + id); - } else { - request = new Request("PUT", "/_security/role/" + id); - } + Request request = new Request("PUT", getSecurityEndpoint() + "/role/" + id); request.setJsonEntity( "{\n" + " \"run_as\": [ \"abc\" ],\n" + @@ -625,20 +663,59 @@ private void createRole(final boolean oldCluster) throws Exception { private void assertUserInfo(final boolean oldCluster) throws Exception { final String user = oldCluster ? "preupgrade_user" : "postupgrade_user"; - Map response = oldCluster ? - entityAsMap(client().performRequest(new Request("GET", "/_xpack/security/user/" + user))) : - entityAsMap(client().performRequest(new Request("GET", "/_security/user/" + user))); + Request request = new Request("GET", getSecurityEndpoint() + "/user/" + user);; + Map response = entityAsMap(client().performRequest(request)); @SuppressWarnings("unchecked") Map userInfo = (Map) response.get(user); assertEquals(user + "@example.com", userInfo.get("email")); assertNotNull(userInfo.get("full_name")); assertNotNull(userInfo.get("roles")); } + private String getSecurityEndpoint() { + String securityEndpoint; + if (getOldClusterVersion().onOrAfter(Version.V_7_0_0) || isRunningAgainstOldCluster() == false) { + securityEndpoint = "/_security"; + } else { + securityEndpoint = "/_xpack/security"; + } + return securityEndpoint; + } + + private String getSQLEndpoint() { + String securityEndpoint; + if (getOldClusterVersion().onOrAfter(Version.V_7_0_0) || isRunningAgainstOldCluster() == false) { + securityEndpoint = "/_sql"; + } else { + securityEndpoint = "/_xpack/sql"; + } + return securityEndpoint; + } + + private String getRollupEndpoint() { + String securityEndpoint; + if (getOldClusterVersion().onOrAfter(Version.V_7_0_0) || isRunningAgainstOldCluster() == false) { + securityEndpoint = "/_rollup"; + } else { + securityEndpoint = "/_xpack/rollup"; + } + return securityEndpoint; + } + + private String getWatcherEndpoint() { + String securityEndpoint; + if (getOldClusterVersion().onOrAfter(Version.V_7_0_0) || isRunningAgainstOldCluster() == false) { + securityEndpoint = "/_watcher"; + } else { + securityEndpoint = "/_xpack/watcher"; + } + return securityEndpoint; + } + private void assertRoleInfo(final boolean oldCluster) throws Exception { final String role = oldCluster ? "preupgrade_role" : "postupgrade_role"; - @SuppressWarnings("unchecked") Map response = oldCluster ? - (Map) entityAsMap(client().performRequest(new Request("GET", "/_xpack/security/role/" + role))).get(role) : - (Map) entityAsMap(client().performRequest(new Request("GET", "/_security/role/" + role))).get(role); + @SuppressWarnings("unchecked") Map response = (Map) entityAsMap( + client().performRequest(new Request("GET", getSecurityEndpoint() + "/role/" + role)) + ).get(role); assertNotNull(response.get("run_as")); assertNotNull(response.get("cluster")); assertNotNull(response.get("indices")); @@ -650,12 +727,7 @@ private void assertRollUpJob(final String rollupJob) throws Exception { waitForRollUpJob(rollupJob, expectedStates); // check that the rollup job is started using the RollUp API - final Request getRollupJobRequest; - if (isRunningAgainstOldCluster()) { - getRollupJobRequest = new Request("GET", "/_xpack/rollup/job/" + rollupJob); - } else { - getRollupJobRequest = new Request("GET", "/_rollup/job/" + rollupJob); - } + final Request getRollupJobRequest = new Request("GET", getRollupEndpoint() + "/job/" + rollupJob); Map getRollupJobResponse = entityAsMap(client().performRequest(getRollupJobRequest)); Map job = getJob(getRollupJobResponse, rollupJob); assertNotNull(job); @@ -700,12 +772,8 @@ private void assertRollUpJob(final String rollupJob) throws Exception { private void waitForRollUpJob(final String rollupJob, final Matcher expectedStates) throws Exception { assertBusy(() -> { - final Request getRollupJobRequest; - if (isRunningAgainstOldCluster()) { - getRollupJobRequest = new Request("GET", "/_xpack/rollup/job/" + rollupJob); - } else { - getRollupJobRequest = new Request("GET", "/_rollup/job/" + rollupJob); - } + final Request getRollupJobRequest = new Request("GET", getRollupEndpoint() + "/job/" + rollupJob); + Response getRollupJobResponse = client().performRequest(getRollupJobRequest); assertThat(getRollupJobResponse.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus())); diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java index 8d8d999a55b8d..b44ae0360d884 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/IndexingIT.java @@ -6,20 +6,15 @@ package org.elasticsearch.upgrades; import org.apache.http.util.EntityUtils; -import org.elasticsearch.common.Booleans; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.rest.action.document.RestBulkAction; -import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; +import org.elasticsearch.common.Booleans; +import org.elasticsearch.rest.action.document.RestBulkAction; import java.io.IOException; import java.nio.charset.StandardCharsets; -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.rest.action.search.RestSearchAction.TOTAL_HITS_AS_INT_PARAM; -import static org.hamcrest.Matchers.equalTo; /** * Basic test that indexed documents survive the rolling restart. @@ -53,25 +48,6 @@ public void testIndexing() throws IOException { } if (CLUSTER_TYPE == ClusterType.OLD) { - { - Version minimumIndexCompatibilityVersion = Version.CURRENT.minimumIndexCompatibilityVersion(); - assertThat("this branch is not needed if we aren't compatible with 6.0", - minimumIndexCompatibilityVersion.onOrBefore(Version.V_6_0_0), equalTo(true)); - if (minimumIndexCompatibilityVersion.before(Version.V_7_0_0)) { - XContentBuilder template = jsonBuilder(); - template.startObject(); - { - template.field("index_patterns", "*"); - template.startObject("settings"); - template.field("number_of_shards", 5); - template.endObject(); - } - template.endObject(); - Request createTemplate = new Request("PUT", "/_template/template"); - createTemplate.setJsonEntity(Strings.toString(template)); - client().performRequest(createTemplate); - } - } Request createTestIndex = new Request("PUT", "/test_index"); createTestIndex.setJsonEntity("{\"settings\": {\"index.number_of_replicas\": 0}}"); diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java index 1f0b8cffe20b0..6c5fa0c1a9704 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TokenBackwardsCompatibilityIT.java @@ -13,9 +13,6 @@ import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.rest.action.document.RestGetAction; import org.elasticsearch.test.rest.yaml.ObjectPath; import java.io.IOException; @@ -23,32 +20,10 @@ import java.util.List; import java.util.Map; -import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.hamcrest.Matchers.equalTo; - public class TokenBackwardsCompatibilityIT extends AbstractUpgradeTestCase { public void testGeneratingTokenInOldCluster() throws Exception { assumeTrue("this test should only run against the old cluster", CLUSTER_TYPE == ClusterType.OLD); - { - Version minimumIndexCompatibilityVersion = Version.CURRENT.minimumIndexCompatibilityVersion(); - assertThat("this branch is not needed if we aren't compatible with 6.0", - minimumIndexCompatibilityVersion.onOrBefore(Version.V_6_0_0), equalTo(true)); - if (minimumIndexCompatibilityVersion.before(Version.V_7_0_0)) { - XContentBuilder template = jsonBuilder(); - template.startObject(); - { - template.field("index_patterns", "*"); - template.startObject("settings"); - template.field("number_of_shards", 5); - template.endObject(); - } - template.endObject(); - Request createTemplate = new Request("PUT", "/_template/template"); - createTemplate.setJsonEntity(Strings.toString(template)); - client().performRequest(createTemplate); - } - } Request createTokenRequest = new Request("POST", "/_security/oauth2/token"); createTokenRequest.setJsonEntity( @@ -64,7 +39,7 @@ public void testGeneratingTokenInOldCluster() throws Exception { assertNotNull(token); assertTokenWorks(token); - Request indexRequest1 = new Request("PUT", "token_backwards_compatibility_it/doc/old_cluster_token1"); + Request indexRequest1 = new Request("PUT", "token_backwards_compatibility_it/_doc/old_cluster_token1"); indexRequest1.setJsonEntity( "{\n" + " \"token\": \"" + token + "\"\n" + @@ -78,7 +53,7 @@ public void testGeneratingTokenInOldCluster() throws Exception { token = (String) responseMap.get("access_token"); assertNotNull(token); assertTokenWorks(token); - Request indexRequest2 = new Request("PUT", "token_backwards_compatibility_it/doc/old_cluster_token2"); + Request indexRequest2 = new Request("PUT", "token_backwards_compatibility_it/_doc/old_cluster_token2"); indexRequest2.setJsonEntity( "{\n" + " \"token\": \"" + token + "\"\n" + @@ -89,8 +64,7 @@ public void testGeneratingTokenInOldCluster() throws Exception { public void testTokenWorksInMixedOrUpgradedCluster() throws Exception { assumeTrue("this test should only run against the mixed or upgraded cluster", CLUSTER_TYPE == ClusterType.MIXED || CLUSTER_TYPE == ClusterType.UPGRADED); - Request getRequest = new Request("GET", "token_backwards_compatibility_it/doc/old_cluster_token1"); - getRequest.setOptions(expectWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE)); + Request getRequest = new Request("GET", "token_backwards_compatibility_it/_doc/old_cluster_token1"); Response getResponse = client().performRequest(getRequest); assertOK(getResponse); Map source = (Map) entityAsMap(getResponse).get("_source"); @@ -100,8 +74,7 @@ public void testTokenWorksInMixedOrUpgradedCluster() throws Exception { public void testMixedCluster() throws Exception { assumeTrue("this test should only run against the mixed cluster", CLUSTER_TYPE == ClusterType.MIXED); assumeTrue("the master must be on the latest version before we can write", isMasterOnLatestVersion()); - Request getRequest = new Request("GET", "token_backwards_compatibility_it/doc/old_cluster_token2"); - getRequest.setOptions(expectWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE)); + Request getRequest = new Request("GET", "token_backwards_compatibility_it/_doc/old_cluster_token2"); Response getResponse = client().performRequest(getRequest); Map source = (Map) entityAsMap(getResponse).get("_source"); @@ -152,8 +125,7 @@ public void testMixedCluster() throws Exception { public void testUpgradedCluster() throws Exception { assumeTrue("this test should only run against the mixed cluster", CLUSTER_TYPE == ClusterType.UPGRADED); - Request getRequest = new Request("GET", "token_backwards_compatibility_it/doc/old_cluster_token2"); - getRequest.setOptions(expectWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE)); + Request getRequest = new Request("GET", "token_backwards_compatibility_it/_doc/old_cluster_token2"); Response getResponse = client().performRequest(getRequest); assertOK(getResponse); @@ -168,8 +140,7 @@ public void testUpgradedCluster() throws Exception { assertOK(invalidationResponse); assertTokenDoesNotWork(token); - getRequest = new Request("GET", "token_backwards_compatibility_it/doc/old_cluster_token1"); - getRequest.setOptions(expectWarnings(RestGetAction.TYPES_DEPRECATION_MESSAGE)); + getRequest = new Request("GET", "token_backwards_compatibility_it/_doc/old_cluster_token1"); getResponse = client().performRequest(getRequest); source = (Map) entityAsMap(getResponse).get("_source"); diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml index bbe26eb69bd20..d7dba6b3b2d3a 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/40_ml_datafeed_crud.yml @@ -28,7 +28,6 @@ { "job_id":"old-cluster-datafeed-job", "indices":["airline-data"], - "types":["response"], "scroll_size": 2000 }