diff --git a/TESTING.asciidoc b/TESTING.asciidoc index c33ce2030c3a8..44f6ba2bcadae 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -229,34 +229,6 @@ Pass arbitrary jvm arguments. ./gradlew test -Dtests.jvm.argline="-Djava.security.debug=access,failure" ------------------------------ -== Backwards Compatibility Tests - -Running backwards compatibility tests is disabled by default since it -requires a release version of elasticsearch to be present on the test system. -To run backwards compatibility tests untar or unzip a release and run the tests -with the following command: - ---------------------------------------------------------------------------- -./gradlew test -Dtests.filter="@backwards" -Dtests.bwc.version=x.y.z -Dtests.bwc.path=/path/to/elasticsearch -Dtests.security.manager=false ---------------------------------------------------------------------------- - -Note that backwards tests must be run with security manager disabled. -If the elasticsearch release is placed under `./backwards/elasticsearch-x.y.z` the path -can be omitted: - ---------------------------------------------------------------------------- -./gradlew test -Dtests.filter="@backwards" -Dtests.bwc.version=x.y.z -Dtests.security.manager=false ---------------------------------------------------------------------------- - -To setup the bwc test environment execute the following steps (provided you are -already in your elasticsearch clone): - ---------------------------------------------------------------------------- -$ mkdir backwards && cd backwards -$ curl -O https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-1.2.1.tar.gz -$ tar -xzf elasticsearch-1.2.1.tar.gz ---------------------------------------------------------------------------- - == Running verification tasks To run all verification tasks, including static checks, unit tests, and integration tests: @@ -554,25 +526,28 @@ environment variable. == Testing backwards compatibility Backwards compatibility tests exist to test upgrading from each supported version -to the current version. To run all backcompat tests use: +to the current version. To run them all use: ------------------------------------------------- ./gradlew bwcTest ------------------------------------------------- -A specific version can be tested as well. For example, to test backcompat with +A specific version can be tested as well. For example, to test bwc with version 5.3.2 run: ------------------------------------------------- ./gradlew v5.3.2#bwcTest ------------------------------------------------- -When running `./gradlew check`, some minimal backcompat checks are run. Which version -is tested depends on the branch. On master, this will test against the current -stable branch. On the stable branch, it will test against the latest release -branch. Finally, on a release branch, it will test against the most recent release. +Tests are ran for versions that are not yet released but with which the current version will be compatible with. +These are automatically checked out and built from source. +See link:./buildSrc/src/main/java/org/elasticsearch/gradle/VersionCollection.java[VersionCollection] +and link:./distribution/bwc/build.gradle[distribution/bwc/build.gradle] +for more information. + +When running `./gradlew check`, minimal bwc checks are also run against compatible versions that are not yet released. -=== BWC Testing against a specific remote/branch +==== BWC Testing against a specific remote/branch Sometimes a backward compatibility change spans two versions. A common case is a new functionality that needs a BWC bridge in an unreleased versioned of a release branch (for example, 5.x). @@ -597,7 +572,7 @@ will contain your change. . Push both branches to your remote repository. . Run the tests with `./gradlew check -Dtests.bwc.remote=${remote} -Dtests.bwc.refspec.5.x=index_req_bwc_5.x`. -== Skip fetching latest +==== Skip fetching latest For some BWC testing scenarios, you want to use the local clone of the repository without fetching latest. For these use cases, you can set the system diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/Allocators.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/Allocators.java index 591fa400d18da..39a2bdfca0953 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/Allocators.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/Allocators.java @@ -46,10 +46,6 @@ public final class Allocators { private static class NoopGatewayAllocator extends GatewayAllocator { public static final NoopGatewayAllocator INSTANCE = new NoopGatewayAllocator(); - protected NoopGatewayAllocator() { - super(Settings.EMPTY); - } - @Override public void applyStartedShards(RoutingAllocation allocation, List startedShards) { // noop @@ -79,7 +75,7 @@ public static AllocationService createAllocationService(Settings settings) throw public static AllocationService createAllocationService(Settings settings, ClusterSettings clusterSettings) throws InvocationTargetException, NoSuchMethodException, InstantiationException, IllegalAccessException { - return new AllocationService(settings, + return new AllocationService( defaultAllocationDeciders(settings, clusterSettings), NoopGatewayAllocator.INSTANCE, new BalancedShardsAllocator(settings), EmptyClusterInfoService.INSTANCE); } @@ -88,7 +84,7 @@ public static AllocationDeciders defaultAllocationDeciders(Settings settings, Cl IllegalAccessException, InvocationTargetException, InstantiationException, NoSuchMethodException { Collection deciders = ClusterModule.createAllocationDeciders(settings, clusterSettings, Collections.emptyList()); - return new AllocationDeciders(settings, deciders); + return new AllocationDeciders(deciders); } diff --git a/build.gradle b/build.gradle index 7c7e065671928..3a6196a21b7e4 100644 --- a/build.gradle +++ b/build.gradle @@ -39,7 +39,7 @@ if (properties.get("org.elasticsearch.acceptScanTOS", "false") == "true") { // common maven publishing configuration subprojects { group = 'org.elasticsearch' - version = VersionProperties.elasticsearch.toString() + version = VersionProperties.elasticsearch description = "Elasticsearch subproject ${project.path}" } @@ -103,10 +103,6 @@ subprojects { * in a branch if there are only betas and rcs in the branch so we have * *something* to test against. */ VersionCollection versions = new VersionCollection(file('server/src/main/java/org/elasticsearch/Version.java').readLines('UTF-8')) -if (versions.currentVersion != VersionProperties.elasticsearch) { - throw new GradleException("The last version in Versions.java [${versions.currentVersion}] does not match " + - "VersionProperties.elasticsearch [${VersionProperties.elasticsearch}]") -} // build metadata from previous build, contains eg hashes for bwc builds String buildMetadataValue = System.getenv('BUILD_METADATA') @@ -140,26 +136,16 @@ task verifyVersions { if (gradle.startParameter.isOffline()) { throw new GradleException("Must run in online mode to verify versions") } - // Read the list from maven central - Node xml + // Read the list from maven central. + // Fetch the metadata an parse the xml into Version instances because it's more straight forward here + // rather than bwcVersion ( VersionCollection ). new URL('https://repo1.maven.org/maven2/org/elasticsearch/elasticsearch/maven-metadata.xml').openStream().withStream { s -> - xml = new XmlParser().parse(s) - } - Set knownVersions = new TreeSet<>(xml.versioning.versions.version.collect { it.text() }.findAll { it ==~ /\d+\.\d+\.\d+/ }.collect { Version.fromString(it) }) - - // Limit the known versions to those that should be index compatible, and are not future versions - knownVersions = knownVersions.findAll { it.major >= bwcVersions.currentVersion.major - 1 && it.before(VersionProperties.elasticsearch) } - - /* Limit the listed versions to those that have been marked as released. - * Versions not marked as released don't get the same testing and we want - * to make sure that we flip all unreleased versions to released as soon - * as possible after release. */ - Set actualVersions = new TreeSet<>(bwcVersions.indexCompatible.findAll { false == it.snapshot }) - - // Finally, compare! - if (knownVersions.equals(actualVersions) == false) { - throw new GradleException("out-of-date released versions\nActual :" + actualVersions + "\nExpected:" + knownVersions + - "\nUpdate Version.java. Note that Version.CURRENT doesn't count because it is not released.") + bwcVersions.compareToAuthoritative( + new XmlParser().parse(s) + .versioning.versions.version + .collect { it.text() }.findAll { it ==~ /\d+\.\d+\.\d+/ } + .collect { Version.fromString(it) } + ) } } } @@ -255,20 +241,17 @@ subprojects { "org.elasticsearch.plugin:percolator-client:${version}": ':modules:percolator', "org.elasticsearch.plugin:rank-eval-client:${version}": ':modules:rank-eval', ] - - bwcVersions.snapshotProjectNames.each { snapshotName -> - Version snapshot = bwcVersions.getSnapshotForProject(snapshotName) - if (snapshot != null ) { - String snapshotProject = ":distribution:bwc:${snapshotName}" - project(snapshotProject).ext.bwcVersion = snapshot - ext.projectSubstitutions["org.elasticsearch.distribution.deb:elasticsearch:${snapshot}"] = snapshotProject - ext.projectSubstitutions["org.elasticsearch.distribution.rpm:elasticsearch:${snapshot}"] = snapshotProject - ext.projectSubstitutions["org.elasticsearch.distribution.zip:elasticsearch:${snapshot}"] = snapshotProject - if (snapshot.onOrAfter('6.3.0')) { - ext.projectSubstitutions["org.elasticsearch.distribution.deb:elasticsearch-oss:${snapshot}"] = snapshotProject - ext.projectSubstitutions["org.elasticsearch.distribution.rpm:elasticsearch-oss:${snapshot}"] = snapshotProject - ext.projectSubstitutions["org.elasticsearch.distribution.zip:elasticsearch-oss:${snapshot}"] = snapshotProject - } + // substitute unreleased versions with projects that check out and build locally + bwcVersions.forPreviousUnreleased { VersionCollection.UnreleasedVersionInfo unreleasedVersion -> + Version unreleased = unreleasedVersion.version + String snapshotProject = ":distribution:bwc:${unreleasedVersion.gradleProjectName}" + ext.projectSubstitutions["org.elasticsearch.distribution.deb:elasticsearch:${unreleased}"] = snapshotProject + ext.projectSubstitutions["org.elasticsearch.distribution.rpm:elasticsearch:${unreleased}"] = snapshotProject + ext.projectSubstitutions["org.elasticsearch.distribution.zip:elasticsearch:${unreleased}"] = snapshotProject + if (unreleased.onOrAfter('6.3.0')) { + ext.projectSubstitutions["org.elasticsearch.distribution.deb:elasticsearch-oss:${unreleased}"] = snapshotProject + ext.projectSubstitutions["org.elasticsearch.distribution.rpm:elasticsearch-oss:${unreleased}"] = snapshotProject + ext.projectSubstitutions["org.elasticsearch.distribution.zip:elasticsearch-oss:${unreleased}"] = snapshotProject } } @@ -303,7 +286,7 @@ subprojects { // other packages (e.g org.elasticsearch.client) will point to server rather than // their own artifacts. if (project.plugins.hasPlugin(BuildPlugin) || project.plugins.hasPlugin(PluginBuildPlugin)) { - String artifactsHost = VersionProperties.elasticsearch.isSnapshot() ? "https://snapshots.elastic.co" : "https://artifacts.elastic.co" + String artifactsHost = VersionProperties.elasticsearch.endsWith("-SNAPSHOT") ? "https://snapshots.elastic.co" : "https://artifacts.elastic.co" Closure sortClosure = { a, b -> b.group <=> a.group } Closure depJavadocClosure = { shadowed, dep -> if (dep.group == null || false == dep.group.startsWith('org.elasticsearch')) { @@ -576,11 +559,13 @@ wrapper { } } -/* Remove assemble/dependenciesInfo on all qa projects because we don't need to publish - * artifacts for them. */ gradle.projectsEvaluated { subprojects { - if (project.path.startsWith(':qa')) { + /* + * Remove assemble/dependenciesInfo on all qa projects because we don't + * need to publish artifacts for them. + */ + if (project.name.equals('qa') || project.path.contains(':qa:')) { Task assemble = project.tasks.findByName('assemble') if (assemble) { assemble.enabled = false diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index c201572d4ab5e..ce0162f7bf252 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -44,46 +44,33 @@ if (project == rootProject) { * Propagating version.properties to the rest of the build * *****************************************************************************/ -Properties props = new Properties() -props.load(project.file('version.properties').newDataInputStream()) -version = props.getProperty('elasticsearch') -boolean snapshot = "true".equals(System.getProperty("build.snapshot", "true")); -if (snapshot) { - // we update the version property to reflect if we are building a snapshot or a release build - // we write this back out below to load it in the Build.java which will be shown in rest main action - // to indicate this being a snapshot build or a release build. - version += "-SNAPSHOT" - props.put("elasticsearch", version); -} - -File tempPropertiesFile = new File(project.buildDir, "version.properties") -task writeVersionProperties { - inputs.properties(props) - outputs.file(tempPropertiesFile) +// we update the version property to reflect if we are building a snapshot or a release build +// we write this back out below to load it in the Build.java which will be shown in rest main action +// to indicate this being a snapshot build or a release build. +File propsFile = project.file('version.properties') +Properties props = VersionPropertiesLoader.loadBuildSrcVersion(propsFile) +version = props.getProperty("elasticsearch") +processResources { + inputs.file(propsFile) + // We need to be explicit with the version because we add snapshot and qualifier to it based on properties + inputs.property("dynamic_elasticsearch_version", props.getProperty("elasticsearch")) doLast { - OutputStream stream = Files.newOutputStream(tempPropertiesFile.toPath()); + Writer writer = file("$destinationDir/version.properties").newWriter() try { - props.store(stream, "UTF-8"); + props.store(writer, "Generated version properties") } finally { - stream.close(); + writer.close() } } } -processResources { - dependsOn writeVersionProperties - from tempPropertiesFile -} - - -if (JavaVersion.current() < JavaVersion.VERSION_1_10) { - throw new GradleException('At least Java 10 is required to build elasticsearch gradle tools') -} - /***************************************************************************** * Java version * *****************************************************************************/ +if (JavaVersion.current() < JavaVersion.VERSION_11) { + throw new GradleException('At least Java 11 is required to build elasticsearch gradle tools') +} // Gradle 4.10 does not support setting this to 11 yet targetCompatibility = "10" sourceCompatibility = "10" @@ -238,3 +225,42 @@ if (project != rootProject) { generatePomFileForPluginMavenPublication.enabled = false } } + +// Define this here because we need it early. +class VersionPropertiesLoader { + static Properties loadBuildSrcVersion(File input) throws IOException { + Properties props = new Properties(); + InputStream is = new FileInputStream(input) + try { + props.load(is) + } finally { + is.close() + } + loadBuildSrcVersion(props, System.getProperties()) + return props + } + + protected static void loadBuildSrcVersion(Properties loadedProps, Properties systemProperties) { + String elasticsearch = loadedProps.getProperty("elasticsearch") + if (elasticsearch == null) { + throw new IllegalStateException("Elasticsearch version is missing from properties.") + } + if (elasticsearch.matches("[0-9]+\\.[0-9]+\\.[0-9]+") == false) { + throw new IllegalStateException( + "Expected elasticsearch version to be numbers only of the form X.Y.Z but it was: " + + elasticsearch + ) + } + String qualifier = systemProperties.getProperty("build.version_qualifier", ""); + if (qualifier.isEmpty() == false) { + if (qualifier.matches("(alpha|beta|rc)\\d+") == false) { + throw new IllegalStateException("Invalid qualifier: " + qualifier) + } + elasticsearch += "-" + qualifier + } + if ("true".equals(systemProperties.getProperty("build.snapshot", "true"))) { + elasticsearch += "-SNAPSHOT" + } + loadedProps.put("elasticsearch", elasticsearch) + } +} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index f42dde73f73de..d9af2f25dcba0 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -242,11 +242,12 @@ class BuildPlugin implements Plugin { compilerJavaHome = findJavaHome(compilerJavaProperty) } if (compilerJavaHome == null) { - if (System.getProperty("idea.active") != null || System.getProperty("eclipse.launcher") != null) { + if (System.getProperty("idea.executable") != null || System.getProperty("eclipse.launcher") != null) { // IntelliJ does not set JAVA_HOME, so we use the JDK that Gradle was run with return Jvm.current().javaHome } else { throw new GradleException( + " " + System.getProperties().toString() + " " + "JAVA_HOME must be set to build Elasticsearch. " + "Note that if the variable was just set you might have to run `./gradlew --stop` for " + "it to be picked up. See https://github.com/elastic/elasticsearch/issues/31399 details." @@ -696,18 +697,12 @@ class BuildPlugin implements Plugin { jarTask.destinationDir = new File(project.buildDir, 'distributions') // fixup the jar manifest jarTask.doFirst { - final Version versionWithoutSnapshot = new Version( - VersionProperties.elasticsearch.major, - VersionProperties.elasticsearch.minor, - VersionProperties.elasticsearch.revision, - VersionProperties.elasticsearch.suffix, - false) // this doFirst is added before the info plugin, therefore it will run // after the doFirst added by the info plugin, and we can override attributes jarTask.manifest.attributes( - 'X-Compile-Elasticsearch-Version': versionWithoutSnapshot, + 'X-Compile-Elasticsearch-Version': VersionProperties.elasticsearch.replace("-SNAPSHOT", ""), 'X-Compile-Lucene-Version': VersionProperties.lucene, - 'X-Compile-Elasticsearch-Snapshot': VersionProperties.elasticsearch.isSnapshot(), + 'X-Compile-Elasticsearch-Snapshot': VersionProperties.isElasticsearchSnapshot(), 'Build-Date': ZonedDateTime.now(ZoneOffset.UTC), 'Build-Java-Version': project.compilerJavaVersion) if (jarTask.manifest.attributes.containsKey('Change') == false) { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/VersionCollection.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/VersionCollection.groovy deleted file mode 100644 index c69958ddad4b3..0000000000000 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/VersionCollection.groovy +++ /dev/null @@ -1,353 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.gradle - -import org.gradle.api.GradleException -import org.gradle.api.InvalidUserDataException - -import java.util.regex.Matcher - -/** - * The collection of version constants declared in Version.java, for use in BWC testing. - * - * if major+1 released: released artifacts from $version down to major-1.highestMinor.highestPatch, none of these should be snapshots, period. - * if major+1 unreleased: - * - if released: - * -- caveat 0: snapshot for the major-1.highestMinor.highestPatch - * - if unreleased: - * -- caveat 0: snapshot for the major-1.highestMinor.highestPatch - * -- caveat 1: every same major lower minor branch should also be tested if its released, and if not, its a snapshot. There should only be max 2 of these. - * -- caveat 2: the largest released minor branch before the unreleased minor should also be a snapshot - * -- caveat 3: if the current version is a different major than the previous rules apply to major - 1 of the current version - * - * Please note that the caveat's also correspond with the 4 types of snapshots. - * - Caveat 0 - always maintenanceBugfixSnapshot. - * - Caveat 1 - This is tricky. If caveat 3 applies, the highest matching value is nextMinorSnapshot, if there is another it is the stagedMinorSnapshot. - * If caveat 3 does not apply then the only possible value is the stagedMinorSnapshot. - * - Caveat 2 - always nextBugfixSnapshot - * - Caveat 3 - this only changes the applicability of Caveat 1 - * - * Notes on terminology: - * - The case for major+1 being released is accomplished through the isReleasableBranch value. If this is false, then the branch is no longer - * releasable, meaning not to test against any snapshots. - * - Released is defined as having > 1 suffix-free version in a major.minor series. For instance, only 6.2.0 means unreleased, but a - * 6.2.0 and 6.2.1 mean that 6.2.0 was released already. - */ -class VersionCollection { - - private final List versions - Version nextMinorSnapshot - Version stagedMinorSnapshot - Version nextBugfixSnapshot - Version maintenanceBugfixSnapshot - final Version currentVersion - private final TreeSet versionSet = new TreeSet<>() - final List snapshotProjectNames = ['next-minor-snapshot', - 'staged-minor-snapshot', - 'next-bugfix-snapshot', - 'maintenance-bugfix-snapshot'] - - // When we roll 8.0 its very likely these will need to be extracted from this class - private final boolean isReleasableBranch = true - - /** - * Construct a VersionCollection from the lines of the Version.java file. The basic logic for the following is pretty straight forward. - - * @param versionLines The lines of the Version.java file. - */ - VersionCollection(List versionLines) { - final boolean buildSnapshot = System.getProperty("build.snapshot", "true") == "true" - - List versions = [] - // This class should be converted wholesale to use the treeset - - for (final String line : versionLines) { - final Matcher match = line =~ /\W+public static final Version V_(\d+)_(\d+)_(\d+)(_alpha\d+|_beta\d+|_rc\d+)? .*/ - if (match.matches()) { - final Version foundVersion = new Version( - Integer.parseInt(match.group(1)), Integer.parseInt(match.group(2)), - Integer.parseInt(match.group(3)), (match.group(4) ?: '').replace('_', '-'), false) - safeAddToSet(foundVersion) - } - } - - if (versionSet.empty) { - throw new GradleException("Unexpectedly found no version constants in Versions.java") - } - - // If the major version has been released, then remove all of the alpha/beta/rc versions that exist in the set - versionSet.removeAll { it.suffix.isEmpty() == false && isMajorReleased(it, versionSet) } - - // set currentVersion - Version lastVersion = versionSet.last() - currentVersion = new Version(lastVersion.major, lastVersion.minor, lastVersion.revision, lastVersion.suffix, buildSnapshot) - - // remove all of the potential alpha/beta/rc from the currentVersion - versionSet.removeAll { - it.suffix.isEmpty() == false && - it.major == currentVersion.major && - it.minor == currentVersion.minor && - it.revision == currentVersion.revision } - - // re-add the currentVersion to the set - versionSet.add(currentVersion) - - if (isReleasableBranch) { - if (isReleased(currentVersion)) { - // caveat 0 - if the minor has been released then it only has a maintenance version - // go back 1 version to get the last supported snapshot version of the line, which is a maint bugfix - Version highestMinor = getHighestPreviousMinor(currentVersion.major) - maintenanceBugfixSnapshot = replaceAsSnapshot(highestMinor) - } else { - // caveat 3 - if our currentVersion is a X.0.0, we need to check X-1 minors to see if they are released - if (currentVersion.minor == 0) { - for (Version version: getMinorTips(currentVersion.major - 1)) { - if (isReleased(version) == false) { - // caveat 1 - This should only ever contain 2 non released branches in flight. An example is 6.x is frozen, - // and 6.2 is cut but not yet released there is some simple logic to make sure that in the case of more than 2, - // it will bail. The order is that the minor snapshot is fulfilled first, and then the staged minor snapshot - if (nextMinorSnapshot == null) { - // it has not been set yet - nextMinorSnapshot = replaceAsSnapshot(version) - } else if (stagedMinorSnapshot == null) { - stagedMinorSnapshot = replaceAsSnapshot(version) - } else { - throw new GradleException("More than 2 snapshot version existed for the next minor and staged (frozen) minors.") - } - } else { - // caveat 2 - this is the last minor snap for this major, so replace the highest (last) one of these and break - nextBugfixSnapshot = replaceAsSnapshot(version) - // we only care about the largest minor here, so in the case of 6.1 and 6.0, it will only get 6.1 - break - } - } - // caveat 0 - now dip back 2 versions to get the last supported snapshot version of the line - Version highestMinor = getHighestPreviousMinor(currentVersion.major - 1) - maintenanceBugfixSnapshot = replaceAsSnapshot(highestMinor) - } else { - // caveat 3 did not apply. version is not a X.0.0, so we are somewhere on a X.Y line - // only check till minor == 0 of the major - for (Version version: getMinorTips(currentVersion.major)) { - if (isReleased(version) == false) { - // caveat 1 - This should only ever contain 0 or 1 branch in flight. An example is 6.x is frozen, and 6.2 is cut - // but not yet released there is some simple logic to make sure that in the case of more than 1, it will bail - if (stagedMinorSnapshot == null) { - stagedMinorSnapshot = replaceAsSnapshot(version) - } else { - throw new GradleException("More than 1 snapshot version existed for the staged (frozen) minors.") - } - } else { - // caveat 2 - this is the last minor snap for this major, so replace the highest (last) one of these and break - nextBugfixSnapshot = replaceAsSnapshot(version) - // we only care about the largest minor here, so in the case of 6.1 and 6.0, it will only get 6.1 - break - } - } - // caveat 0 - now dip back 1 version to get the last supported snapshot version of the line - Version highestMinor = getHighestPreviousMinor(currentVersion.major) - maintenanceBugfixSnapshot = replaceAsSnapshot(highestMinor) - } - } - } - - this.versions = Collections.unmodifiableList(versionSet.toList()) - } - - /** - * @return The list of versions read from the Version.java file - */ - List getVersions() { - return versions - } - - /** - * Index compat supports 1 previous entire major version. For instance, any 6.x test for this would test all of 5 up to that 6.x version - * - * @return All earlier versions that should be tested for index BWC with the current version. - */ - List getIndexCompatible() { - int actualMajor = (currentVersion.major == 5 ? 2 : currentVersion.major - 1) - return versionSet - .tailSet(Version.fromString("${actualMajor}.0.0")) - .headSet(currentVersion) - .asList() - } - - /** - * Ensures the types of snapshot are not null and are also in the index compat list - */ - List getSnapshotsIndexCompatible() { - List compatSnapshots = [] - List allCompatVersions = getIndexCompatible() - if (allCompatVersions.contains(nextMinorSnapshot)) { - compatSnapshots.add(nextMinorSnapshot) - } - if (allCompatVersions.contains(stagedMinorSnapshot)) { - compatSnapshots.add(stagedMinorSnapshot) - } - if (allCompatVersions.contains(nextBugfixSnapshot)) { - compatSnapshots.add(nextBugfixSnapshot) - } - if (allCompatVersions.contains(maintenanceBugfixSnapshot)) { - compatSnapshots.add(maintenanceBugfixSnapshot) - } - - return compatSnapshots - } - - /** - * Wire compat supports the last minor of the previous major. For instance, any 6.x test would test 5.6 up to that 6.x version - * - * @return All earlier versions that should be tested for wire BWC with the current version. - */ - List getWireCompatible() { - // Get the last minor of the previous major - Version lowerBound = getHighestPreviousMinor(currentVersion.major) - return versionSet - .tailSet(Version.fromString("${lowerBound.major}.${lowerBound.minor}.0")) - .headSet(currentVersion) - .toList() - } - - /** - * Ensures the types of snapshot are not null and are also in the wire compat list - */ - List getSnapshotsWireCompatible() { - List compatSnapshots = [] - List allCompatVersions = getWireCompatible() - if (allCompatVersions.contains(nextMinorSnapshot)) { - compatSnapshots.add(nextMinorSnapshot) - } - if (allCompatVersions.contains(stagedMinorSnapshot)) { - compatSnapshots.add(stagedMinorSnapshot) - } - if (allCompatVersions.contains(nextBugfixSnapshot)) { - compatSnapshots.add(nextBugfixSnapshot) - } - if (allCompatVersions.contains(maintenanceBugfixSnapshot)) { - compatSnapshots.add(maintenanceBugfixSnapshot) - } - // There was no wire compat for the 2.x line - compatSnapshots.removeAll {it.major == 2} - - return compatSnapshots - } - - /** - * Grabs the proper snapshot based on the name passed in. These names should correspond with gradle project names under bwc. If you - * are editing this if/else it is only because you added another project under :distribution:bwc. Do not modify this method or its - * reasoning for throwing the exception unless you are sure that it will not harm :distribution:bwc. - */ - Version getSnapshotForProject(String snapshotProjectName) { - if (snapshotProjectName == 'next-minor-snapshot') { - return nextMinorSnapshot - } else if (snapshotProjectName == 'staged-minor-snapshot') { - return stagedMinorSnapshot - } else if (snapshotProjectName == 'maintenance-bugfix-snapshot') { - return maintenanceBugfixSnapshot - } else if (snapshotProjectName == 'next-bugfix-snapshot') { - return nextBugfixSnapshot - } else { - throw new InvalidUserDataException("Unsupported project name ${snapshotProjectName}") - } - } - - /** - * Uses basic logic about our releases to determine if this version has been previously released - */ - private boolean isReleased(Version version) { - return version.revision > 0 - } - - /** - * Validates that the count of non suffixed (alpha/beta/rc) versions in a given major to major+1 is greater than 1. - * This means that there is more than just a major.0.0 or major.0.0-alpha in a branch to signify it has been prevously released. - */ - private boolean isMajorReleased(Version version, TreeSet items) { - return items - .tailSet(Version.fromString("${version.major}.0.0")) - .headSet(Version.fromString("${version.major + 1}.0.0")) - .count { it.suffix.isEmpty() } // count only non suffix'd versions as actual versions that may be released - .intValue() > 1 - } - - /** - * Gets the largest version previous major version based on the nextMajorVersion passed in. - * If you have a list [5.0.2, 5.1.2, 6.0.1, 6.1.1] and pass in 6 for the nextMajorVersion, it will return you 5.1.2 - */ - private Version getHighestPreviousMinor(Integer nextMajorVersion) { - return versionSet.headSet(Version.fromString("${nextMajorVersion}.0.0")).last() - } - - /** - * Helper function for turning a version into a snapshot version, removing and readding it to the tree - */ - private Version replaceAsSnapshot(Version version) { - versionSet.remove(version) - Version snapshotVersion = new Version(version.major, version.minor, version.revision, version.suffix, true) - safeAddToSet(snapshotVersion) - return snapshotVersion - } - - /** - * Safely adds a value to the treeset, or bails if the value already exists. - * @param version - */ - private void safeAddToSet(Version version) { - if (versionSet.add(version) == false) { - throw new GradleException("Versions.java contains duplicate entries for ${version}") - } - } - - /** - * Gets the entire set of major.minor.* given those parameters. - */ - private SortedSet getMinorSetForMajor(Integer major, Integer minor) { - return versionSet - .tailSet(Version.fromString("${major}.${minor}.0")) - .headSet(Version.fromString("${major}.${minor + 1}.0")) - } - - /** - * Gets the entire set of major.* to the currentVersion - */ - private SortedSet getMajorSet(Integer major) { - return versionSet - .tailSet(Version.fromString("${major}.0.0")) - .headSet(currentVersion) - } - - /** - * Gets the tip of each minor set and puts it in a list. - * - * examples: - * [1.0.0, 1.1.0, 1.1.1, 1.2.0, 1.3.1] will return [1.0.0, 1.1.1, 1.2.0, 1.3.1] - * [1.0.0, 1.0.1, 1.0.2, 1.0.3, 1.0.4] will return [1.0.4] - */ - private List getMinorTips(Integer major) { - TreeSet majorSet = getMajorSet(major) - List minorList = new ArrayList<>() - for (int minor = majorSet.last().minor; minor >= 0; minor--) { - TreeSet minorSetInMajor = getMinorSetForMajor(major, minor) - minorList.add(minorSetInMajor.last()) - } - return minorList - } -} diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy index 9b2b1ca215673..881fce443a792 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/DocsTestPlugin.groovy @@ -42,7 +42,7 @@ public class DocsTestPlugin extends RestTestPlugin { * to the version being built for testing but needs to resolve to * the last released version for docs. */ '\\{version\\}': - VersionProperties.elasticsearch.toString().replace('-SNAPSHOT', ''), + VersionProperties.elasticsearch.replace('-SNAPSHOT', ''), '\\{lucene_version\\}' : VersionProperties.lucene.replaceAll('-snapshot-\\w+$', ''), '\\{build_flavor\\}' : project.integTestCluster.distribution.startsWith('oss-') ? 'oss' : 'default', diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy index 9828f68991f9b..440127c456247 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy @@ -96,7 +96,7 @@ public class PluginBuildPlugin extends BuildPlugin { project.pluginProperties.extension.name + "-client" ) project.tasks.withType(GenerateMavenPom.class) { GenerateMavenPom generatePOMTask -> - generatePOMTask.ext.pomFileName = "${project.archivesBaseName}-client-${project.version}.pom" + generatePOMTask.ext.pomFileName = "${project.archivesBaseName}-client-${project.versions.elasticsearch}.pom" } } else { project.plugins.withType(MavenPublishPlugin).whenPluginAdded { diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesTask.groovy index 9588f77a71db7..633647514ed7d 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginPropertiesTask.groovy @@ -76,7 +76,7 @@ class PluginPropertiesTask extends Copy { 'name': extension.name, 'description': extension.description, 'version': stringSnap(extension.version), - 'elasticsearchVersion': stringSnap(VersionProperties.elasticsearch.toString()), + 'elasticsearchVersion': stringSnap(VersionProperties.elasticsearch), 'javaVersion': project.targetCompatibility as String, 'classname': extension.classname, 'extendedPlugins': extension.extendedPlugins.join(','), diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy index 0e706aa5956f1..b5476ea96621b 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/PrecommitTasks.groovy @@ -22,6 +22,7 @@ import com.github.jengelman.gradle.plugins.shadow.ShadowPlugin import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApis import de.thetaphi.forbiddenapis.gradle.ForbiddenApisPlugin import org.elasticsearch.gradle.ExportElasticsearchBuildResourcesTask +import org.elasticsearch.gradle.VersionProperties import org.gradle.api.JavaVersion import org.gradle.api.Project import org.gradle.api.Task @@ -220,7 +221,7 @@ class PrecommitTasks { private static Task configureLoggerUsage(Project project) { project.configurations.create('loggerUsagePlugin') project.dependencies.add('loggerUsagePlugin', - "org.elasticsearch.test:logger-usage:${org.elasticsearch.gradle.VersionProperties.elasticsearch}") + "org.elasticsearch.test:logger-usage:${VersionProperties.elasticsearch}") return project.tasks.create('loggerUsageCheck', LoggerUsageTask.class) { classpath = project.configurations.loggerUsagePlugin javaHome = project.runtimeJavaHome diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index 4f3ade2dae296..52ed30880d374 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -98,7 +98,7 @@ class ClusterFormationTasks { // from here on everything else works the same as if it's the current version, we fetch the BWC version // from mirrors using gradles built-in mechanism etc. - configureDistributionDependency(project, config.distribution, bwcDistro, config.bwcVersion) + configureDistributionDependency(project, config.distribution, bwcDistro, config.bwcVersion.toString()) for (Map.Entry entry : config.plugins.entrySet()) { configureBwcPluginDependency(project, entry.getValue(), bwcPlugins, config.bwcVersion) } @@ -109,9 +109,12 @@ class ClusterFormationTasks { // we start N nodes and out of these N nodes there might be M bwc nodes. // for each of those nodes we might have a different configuration final Configuration distro - final Version elasticsearchVersion + final String elasticsearchVersion if (i < config.numBwcNodes) { - elasticsearchVersion = config.bwcVersion + elasticsearchVersion = config.bwcVersion.toString() + if (project.bwcVersions.unreleased.contains(config.bwcVersion)) { + elasticsearchVersion += "-SNAPSHOT" + } distro = bwcDistro } else { elasticsearchVersion = VersionProperties.elasticsearch @@ -130,8 +133,10 @@ class ClusterFormationTasks { } /** Adds a dependency on the given distribution */ - static void configureDistributionDependency(Project project, String distro, Configuration configuration, Version elasticsearchVersion) { - if (elasticsearchVersion.before('6.3.0') && distro.startsWith('oss-')) { + static void configureDistributionDependency(Project project, String distro, Configuration configuration, String elasticsearchVersion) { + if (Version.fromString(elasticsearchVersion).before('6.3.0') && + distro.startsWith('oss-') + ) { distro = distro.substring('oss-'.length()) } String packaging = distro @@ -201,7 +206,7 @@ class ClusterFormationTasks { setup = configureAddKeystoreFileTasks(prefix, project, setup, node) if (node.config.plugins.isEmpty() == false) { - if (node.nodeVersion == VersionProperties.elasticsearch) { + if (node.nodeVersion == Version.fromString(VersionProperties.elasticsearch)) { setup = configureCopyPluginsTask(taskName(prefix, node, 'copyPlugins'), project, setup, node, prefix) } else { setup = configureCopyBwcPluginsTask(taskName(prefix, node, 'copyBwcPlugins'), project, setup, node, prefix) @@ -558,7 +563,7 @@ class ClusterFormationTasks { static Task configureInstallPluginTask(String name, Project project, Task setup, NodeInfo node, String pluginName, String prefix) { final FileCollection pluginZip; - if (node.nodeVersion != VersionProperties.elasticsearch) { + if (node.nodeVersion != Version.fromString(VersionProperties.elasticsearch)) { pluginZip = project.configurations.getByName(pluginBwcConfigurationName(prefix, pluginName)) } else { pluginZip = project.configurations.getByName(pluginConfigurationName(prefix, pluginName)) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy index a0100767d8552..4c8546bcc8928 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/NodeInfo.groovy @@ -112,7 +112,7 @@ class NodeInfo { Version nodeVersion /** Holds node configuration for part of a test cluster. */ - NodeInfo(ClusterConfiguration config, int nodeNum, Project project, String prefix, Version nodeVersion, File sharedDir) { + NodeInfo(ClusterConfiguration config, int nodeNum, Project project, String prefix, String nodeVersion, File sharedDir) { this.config = config this.nodeNum = nodeNum this.project = project @@ -124,7 +124,7 @@ class NodeInfo { } baseDir = new File(project.buildDir, "cluster/${prefix} node${nodeNum}") pidFile = new File(baseDir, 'es.pid') - this.nodeVersion = nodeVersion + this.nodeVersion = Version.fromString(nodeVersion) homeDir = homeDir(baseDir, config.distribution, nodeVersion) pathConf = pathConf(baseDir, config.distribution, nodeVersion) if (config.dataDir != null) { @@ -173,11 +173,11 @@ class NodeInfo { } - if (nodeVersion.before("6.2.0")) { + if (this.nodeVersion.before("6.2.0")) { javaVersion = 8 - } else if (nodeVersion.onOrAfter("6.2.0") && nodeVersion.before("6.3.0")) { + } else if (this.nodeVersion.onOrAfter("6.2.0") && this.nodeVersion.before("6.3.0")) { javaVersion = 9 - } else if (nodeVersion.onOrAfter("6.3.0") && nodeVersion.before("6.5.0")) { + } else if (this.nodeVersion.onOrAfter("6.3.0") && this.nodeVersion.before("6.5.0")) { javaVersion = 10 } @@ -200,7 +200,7 @@ class NodeInfo { else { env.put('ES_PATH_CONF', pathConf) } - if (nodeVersion.major == 5) { + if (this.nodeVersion.major == 5) { if (Os.isFamily(Os.FAMILY_WINDOWS)) { /* * We have to delay building the string as the path will not exist during configuration which will fail on Windows due to @@ -312,7 +312,7 @@ class NodeInfo { } /** Returns the directory elasticsearch home is contained in for the given distribution */ - static File homeDir(File baseDir, String distro, Version nodeVersion) { + static File homeDir(File baseDir, String distro, String nodeVersion) { String path switch (distro) { case 'integ-test-zip': @@ -332,7 +332,7 @@ class NodeInfo { return new File(baseDir, path) } - static File pathConf(File baseDir, String distro, Version nodeVersion) { + static File pathConf(File baseDir, String distro, String nodeVersion) { switch (distro) { case 'integ-test-zip': case 'zip': diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/Version.java b/buildSrc/src/main/java/org/elasticsearch/gradle/Version.java index f9f1791a9569f..460df76ace198 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/Version.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/Version.java @@ -12,44 +12,20 @@ public final class Version implements Comparable { private final int minor; private final int revision; private final int id; - private final boolean snapshot; - /** - * Suffix on the version name. - */ - private final String suffix; private static final Pattern pattern = Pattern.compile("(\\d)+\\.(\\d+)\\.(\\d+)(-alpha\\d+|-beta\\d+|-rc\\d+)?(-SNAPSHOT)?"); - public Version(int major, int minor, int revision, String suffix, boolean snapshot) { + public Version(int major, int minor, int revision) { Objects.requireNonNull(major, "major version can't be null"); Objects.requireNonNull(minor, "minor version can't be null"); Objects.requireNonNull(revision, "revision version can't be null"); this.major = major; this.minor = minor; this.revision = revision; - this.snapshot = snapshot; - this.suffix = suffix == null ? "" : suffix; - - int suffixOffset = 0; - if (this.suffix.isEmpty()) { - // no suffix will be considered smaller, uncomment to change that - // suffixOffset = 100; - } else { - if (this.suffix.contains("alpha")) { - suffixOffset += parseSuffixNumber(this.suffix.substring(6)); - } else if (this.suffix.contains("beta")) { - suffixOffset += 25 + parseSuffixNumber(this.suffix.substring(5)); - } else if (this.suffix.contains("rc")) { - suffixOffset += 50 + parseSuffixNumber(this.suffix.substring(3)); - } - else { - throw new IllegalArgumentException("Suffix must contain one of: alpha, beta or rc"); - } - } // currently snapshot is not taken into account - this.id = major * 10000000 + minor * 100000 + revision * 1000 + suffixOffset * 10 /*+ (snapshot ? 1 : 0)*/; + this.id = major * 10000000 + minor * 100000 + revision * 1000; } private static int parseSuffixNumber(String substring) { @@ -71,17 +47,13 @@ public static Version fromString(final String s) { return new Version( Integer.parseInt(matcher.group(1)), parseSuffixNumber(matcher.group(2)), - parseSuffixNumber(matcher.group(3)), - matcher.group(4), - matcher.group(5) != null + parseSuffixNumber(matcher.group(3)) ); } @Override public String toString() { - final String snapshotStr = snapshot ? "-SNAPSHOT" : ""; - return String.valueOf(getMajor()) + "." + String.valueOf(getMinor()) + "." + String.valueOf(getRevision()) + - (suffix == null ? "" : suffix) + snapshotStr; + return String.valueOf(getMajor()) + "." + String.valueOf(getMinor()) + "." + String.valueOf(getRevision()); } public boolean before(Version compareTo) { @@ -116,19 +88,6 @@ public boolean after(String compareTo) { return after(fromString(compareTo)); } - public boolean onOrBeforeIncludingSuffix(Version otherVersion) { - if (id != otherVersion.getId()) { - return id < otherVersion.getId(); - } - - if (suffix.equals("")) { - return otherVersion.getSuffix().equals(""); - } - - - return otherVersion.getSuffix().equals("") || suffix.compareTo(otherVersion.getSuffix()) < 0; - } - @Override public boolean equals(Object o) { if (this == o) return true; @@ -136,16 +95,12 @@ public boolean equals(Object o) { Version version = (Version) o; return major == version.major && minor == version.minor && - revision == version.revision && - id == version.id && - snapshot == version.snapshot && - Objects.equals(suffix, version.suffix); + revision == version.revision; } @Override public int hashCode() { - - return Objects.hash(major, minor, revision, id, snapshot, suffix); + return Objects.hash(major, minor, revision, id); } public int getMajor() { @@ -164,14 +119,6 @@ protected int getId() { return id; } - public boolean isSnapshot() { - return snapshot; - } - - public String getSuffix() { - return suffix; - } - @Override public int compareTo(Version other) { return Integer.compare(getId(), other.getId()); diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/VersionCollection.java b/buildSrc/src/main/java/org/elasticsearch/gradle/VersionCollection.java new file mode 100644 index 0000000000000..1cf2fd9e1037c --- /dev/null +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/VersionCollection.java @@ -0,0 +1,328 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.gradle; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.Consumer; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static java.util.Collections.emptyList; +import static java.util.Collections.unmodifiableList; + +/** + * A container for elasticsearch supported version information used in BWC testing. + * + * Parse the Java source file containing the versions declarations and use the known rules to figure out which are all + * the version the current one is wire and index compatible with. + * On top of this, figure out which of these are unreleased and provide the branch they can be built from. + * + * Note that in this context, currentVersion is the unreleased version this build operates on. + * At any point in time there will surely be four such unreleased versions being worked on, + * thus currentVersion will be one of these. + * + * Considering: + *
+ *
M, M > 0
+ *
last released major
+ *
N, N > 0
+ *
last released minor
+ *
+ * + *
    + *
  • the unreleased major, M+1.0.0 on the `master` branch
  • + *
  • the unreleased minor, M.N.0 on the `M.x` (x is literal) branch
  • + *
  • the unreleased bugfix, M.N.c (c > 0) on the `M.b` branch
  • + *
  • the unreleased maintenance, M-1.d.e ( d > 0, e > 0) on the `(M-1).d` branch
  • + *
+ * In addition to these, there will be a fifth one when a minor reaches feature freeze, we call this the staged + * version: + *
    + *
  • the unreleased staged, M.N-2.0 (N > 2) on the `M.(N-2)` branch
  • + *
+ * + * Each build is only concerned with versions before it, as those are the ones that need to be tested + * for backwards compatibility. We never look forward, and don't add forward facing version number to branches of previous + * version. + * + * Each branch has a current version, and expected compatible versions are parsed from the server code's Version` class. + * We can reliably figure out which the unreleased versions are due to the convention of always adding the next unreleased + * version number to server in all branches when a version is released. + * E.x when M.N.c is released M.N.c+1 is added to the Version class mentioned above in all the following branches: + * `M.b`, `M.x` and `master` so we can reliably assume that the leafs of the version tree are unreleased. + * This convention is enforced by checking the versions we consider to be unreleased against an + * authoritative source (maven central). + * We are then able to map the unreleased version to branches in git and Gradle projects that are capable of checking + * out and building them, so we can include these in the testing plan as well. + */ +public class VersionCollection { + + private static final Pattern LINE_PATTERN = Pattern.compile( + "\\W+public static final Version V_(\\d+)_(\\d+)_(\\d+)(_alpha\\d+|_beta\\d+|_rc\\d+)? .*" + ); + + private final Version currentVersion; + private final Map> groupByMajor; + + public class UnreleasedVersionInfo { + public final Version version; + public final String branch; + public final String gradleProjectName; + + UnreleasedVersionInfo(Version version, String branch, String gradleProjectName) { + this.version = version; + this.branch = branch; + this.gradleProjectName = gradleProjectName; + } + } + + public VersionCollection(List versionLines) { + this(versionLines, Version.fromString(VersionProperties.getElasticsearch())); + } + + protected VersionCollection(List versionLines, Version currentVersionProperty) { + groupByMajor = versionLines.stream() + .map(LINE_PATTERN::matcher) + .filter(Matcher::matches) + .map(match -> new Version( + Integer.parseInt(match.group(1)), + Integer.parseInt(match.group(2)), + Integer.parseInt(match.group(3)) + )) + .sorted() + .distinct() + .collect(Collectors.groupingBy(Version::getMajor, Collectors.toList())); + + if (groupByMajor.isEmpty()) { + throw new IllegalArgumentException("Could not parse any versions"); + } + + currentVersion = getLatestVersionByKey( + groupByMajor, + groupByMajor.keySet().stream().max(Integer::compareTo) + .orElseThrow(() -> new IllegalStateException("Unexpected number of versions in collection")) + ); + + assertCurrentVersionMatchesParsed(currentVersionProperty); + + assertNoOlderThanTwoMajors(); + } + + private void assertNoOlderThanTwoMajors() { + Set majors = groupByMajor.keySet(); + if (majors.size() != 2 && currentVersion.getMinor() != 0 && currentVersion.getRevision() != 0) { + throw new IllegalStateException( + "Expected exactly 2 majors in parsed versions but found: " + majors + ); + } + } + + private void assertCurrentVersionMatchesParsed(Version currentVersionProperty) { + if (currentVersionProperty.equals(currentVersion) == false) { + throw new IllegalStateException( + "Parsed versions latest version does not match the one configured in build properties. " + + "Parsed latest version is " + currentVersion + " but the build has " + + currentVersionProperty + ); + } + } + + public void forPreviousUnreleased(Consumer consumer) { + getUnreleased().stream() + .filter(version -> version.equals(currentVersion) == false) + .forEach(version -> consumer.accept( + new UnreleasedVersionInfo( + version, + getBranchFor(version), + getGradleProjectNameFor(version) + ) + )); + } + + private String getGradleProjectNameFor(Version version) { + if (version.equals(currentVersion)) { + throw new IllegalArgumentException("The Gradle project to build " + version + " is the current build."); + } + Map> releasedMajorGroupedByMinor = getReleasedMajorGroupedByMinor(); + + if (version.getRevision() == 0) { + if (releasedMajorGroupedByMinor + .get(releasedMajorGroupedByMinor.keySet().stream().max(Integer::compareTo).orElse(0)) + .contains(version)) { + return "minor"; + } else { + return "staged"; + } + } else { + if (releasedMajorGroupedByMinor + .getOrDefault(version.getMinor(), emptyList()) + .contains(version)) { + return "bugfix"; + } else { + return "maintenance"; + } + } + } + + private String getBranchFor(Version version) { + switch (getGradleProjectNameFor(version)) { + case "minor": + return version.getMajor() + ".x"; + case "staged": + case "maintenance": + case "bugfix": + return version.getMajor() + "." + version.getMinor(); + default: + throw new IllegalStateException("Unexpected Gradle project name"); + } + } + + public List getUnreleased() { + List unreleased = new ArrayList<>(); + // The current version is being worked, is always unreleased + unreleased.add(currentVersion); + + // the tip of the previous major is unreleased for sure, be it a minor or a bugfix + unreleased.add(getLatestVersionByKey(this.groupByMajor, currentVersion.getMajor() - 1)); + + final Map> groupByMinor = getReleasedMajorGroupedByMinor(); + int greatestMinor = groupByMinor.keySet().stream().max(Integer::compareTo).orElse(0); + + // the last bugfix for this minor series is always unreleased + unreleased.add(getLatestVersionByKey(groupByMinor, greatestMinor)); + + if (groupByMinor.get(greatestMinor).size() == 1) { + // we found an unreleased minor + unreleased.add(getLatestVersionByKey(groupByMinor, greatestMinor - 1)); + if (groupByMinor.getOrDefault(greatestMinor - 1, emptyList()).size() == 1) { + // we found that the previous minor is staged but not yet released + // in this case, the minor before that has a bugfix + unreleased.add(getLatestVersionByKey(groupByMinor, greatestMinor - 2)); + } + } + + return unmodifiableList( + unreleased.stream() + .sorted() + .distinct() + .collect(Collectors.toList()) + ); + } + + private Version getLatestVersionByKey(Map> groupByMajor, int key) { + return groupByMajor.getOrDefault(key, emptyList()).stream() + .max(Version::compareTo) + .orElseThrow(() -> new IllegalStateException("Unexpected number of versions in collection")); + } + + private Map> getReleasedMajorGroupedByMinor() { + List currentMajorVersions = groupByMajor.get(currentVersion.getMajor()); + List previousMajorVersions = groupByMajor.get(currentVersion.getMajor() - 1); + + final Map> groupByMinor; + if (currentMajorVersions.size() == 1) { + // Current is an unreleased major: x.0.0 so we have to look for other unreleased versions in the previous major + groupByMinor = previousMajorVersions.stream() + .collect(Collectors.groupingBy(Version::getMinor, Collectors.toList())); + } else { + groupByMinor = currentMajorVersions.stream() + .collect(Collectors.groupingBy(Version::getMinor, Collectors.toList())); + } + return groupByMinor; + } + + public void compareToAuthoritative(List authoritativeReleasedVersions) { + Set notReallyReleased = new HashSet<>(getReleased()); + notReallyReleased.removeAll(authoritativeReleasedVersions); + if (notReallyReleased.isEmpty() == false) { + throw new IllegalStateException( + "out-of-date released versions" + + "\nFollowing versions are not really released, but the build thinks they are: " + notReallyReleased + ); + } + + Set incorrectlyConsideredUnreleased = new HashSet<>(authoritativeReleasedVersions); + incorrectlyConsideredUnreleased.retainAll(getUnreleased()); + if (incorrectlyConsideredUnreleased.isEmpty() == false) { + throw new IllegalStateException( + "out-of-date released versions" + + "\nBuild considers versions unreleased, " + + "but they are released according to an authoritative source: " + incorrectlyConsideredUnreleased + + "\nThe next versions probably needs to be added to Version.java (CURRENT doesn't count)." + ); + } + } + + private List getReleased() { + List unreleased = getUnreleased(); + return groupByMajor.values().stream() + .flatMap(Collection::stream) + .filter(each -> unreleased.contains(each) == false) + .collect(Collectors.toList()); + } + + public List getIndexCompatible() { + return unmodifiableList( + Stream.concat( + groupByMajor.get(currentVersion.getMajor() - 1).stream(), + groupByMajor.get(currentVersion.getMajor()).stream() + ) + .filter(version -> version.equals(currentVersion) == false) + .collect(Collectors.toList()) + ); + } + + public List getWireCompatible() { + List wireCompat = new ArrayList<>(); + + List prevMajors = groupByMajor.get(currentVersion.getMajor() - 1); + int minor = prevMajors.get(prevMajors.size() - 1).getMinor(); + for (int i = prevMajors.size() - 1; + i > 0 && prevMajors.get(i).getMinor() == minor; + i-- + ) { + wireCompat.add(prevMajors.get(i)); + } + wireCompat.addAll(groupByMajor.get(currentVersion.getMajor())); + wireCompat.remove(currentVersion); + wireCompat.sort(Version::compareTo); + + return unmodifiableList(wireCompat); + } + + public List getUnreleasedIndexCompatible() { + List unreleasedIndexCompatible = new ArrayList<>(getIndexCompatible()); + unreleasedIndexCompatible.retainAll(getUnreleased()); + return unmodifiableList(unreleasedIndexCompatible); + } + + public List getUnreleasedWireCompatible() { + List unreleasedWireCompatible = new ArrayList<>(getWireCompatible()); + unreleasedWireCompatible.retainAll(getUnreleased()); + return unmodifiableList(unreleasedWireCompatible); + } + +} diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/VersionProperties.java b/buildSrc/src/main/java/org/elasticsearch/gradle/VersionProperties.java index 9ee597eb25ad8..23ac9458b961d 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/VersionProperties.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/VersionProperties.java @@ -10,7 +10,7 @@ * Accessor for shared dependency versions used by elasticsearch, namely the elasticsearch and lucene versions. */ public class VersionProperties { - public static Version getElasticsearch() { + public static String getElasticsearch() { return elasticsearch; } @@ -22,12 +22,12 @@ public static Map getVersions() { return versions; } - private static final Version elasticsearch; + private static final String elasticsearch; private static final String lucene; private static final Map versions = new HashMap(); static { Properties props = getVersionProperties(); - elasticsearch = Version.fromString(props.getProperty("elasticsearch")); + elasticsearch = props.getProperty("elasticsearch"); lucene = props.getProperty("lucene"); for (String property : props.stringPropertyNames()) { versions.put(property, props.getProperty(property)); @@ -38,13 +38,17 @@ private static Properties getVersionProperties() { Properties props = new Properties(); InputStream propsStream = VersionProperties.class.getResourceAsStream("/version.properties"); if (propsStream == null) { - throw new RuntimeException("/version.properties resource missing"); + throw new IllegalStateException("/version.properties resource missing"); } try { props.load(propsStream); } catch (IOException e) { - throw new RuntimeException(e); + throw new IllegalStateException("Failed to load version properties", e); } return props; } + + public static boolean isElasticsearchSnapshot() { + return elasticsearch.endsWith("-SNAPSHOT"); + } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java index 85931c7846b34..012e05f2f6c8d 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java @@ -25,16 +25,13 @@ import org.gradle.api.logging.Logging; import java.util.Objects; -import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; public class ElasticsearchNode { private final String name; private final GradleServicesAdapter services; - private final AtomicInteger noOfClaims = new AtomicInteger(); - private final AtomicBoolean started = new AtomicBoolean(false); + private final AtomicBoolean configurationFrozen = new AtomicBoolean(false); private final Logger logger = Logging.getLogger(ElasticsearchNode.class); private Distribution distribution; @@ -54,7 +51,7 @@ public Version getVersion() { } public void setVersion(Version version) { - checkNotRunning(); + checkFrozen(); this.version = version; } @@ -63,47 +60,26 @@ public Distribution getDistribution() { } public void setDistribution(Distribution distribution) { - checkNotRunning(); + checkFrozen(); this.distribution = distribution; } - public void claim() { - noOfClaims.incrementAndGet(); + void start() { + logger.info("Starting `{}`", this); } - /** - * Start the cluster if not running. Does nothing if the cluster is already running. - * - * @return future of thread running in the background - */ - public Future start() { - if (started.getAndSet(true)) { - logger.lifecycle("Already started cluster: {}", name); - } else { - logger.lifecycle("Starting cluster: {}", name); - } - return null; + void stop(boolean tailLogs) { + logger.info("Stopping `{}`, tailLogs: {}", this, tailLogs); } - /** - * Stops a running cluster if it's not claimed. Does nothing otherwise. - */ - public void unClaimAndStop() { - int decrementedClaims = noOfClaims.decrementAndGet(); - if (decrementedClaims > 0) { - logger.lifecycle("Not stopping {}, since cluster still has {} claim(s)", name, decrementedClaims); - return; - } - if (started.get() == false) { - logger.lifecycle("Asked to unClaimAndStop, but cluster was not running: {}", name); - return; - } - logger.lifecycle("Stopping {}, number of claims is {}", name, decrementedClaims); + public void freeze() { + logger.info("Locking configuration of `{}`", this); + configurationFrozen.set(true); } - private void checkNotRunning() { - if (started.get()) { - throw new IllegalStateException("Configuration can not be altered while running "); + private void checkFrozen() { + if (configurationFrozen.get()) { + throw new IllegalStateException("Configuration can not be altered, already locked"); } } @@ -119,4 +95,9 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(name); } + + @Override + public String toString() { + return "ElasticsearchNode{name='" + name + "'}"; + } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java index fcd83a1f46101..5191c7d4febb2 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/TestClustersPlugin.java @@ -33,73 +33,171 @@ import java.util.ArrayList; import java.util.Collections; -import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.stream.Collectors; public class TestClustersPlugin implements Plugin { private static final String LIST_TASK_NAME = "listTestClusters"; private static final String NODE_EXTENSION_NAME = "testClusters"; + public static final String PROPERTY_TESTCLUSTERS_RUN_ONCE = "_testclusters_run_once"; private final Logger logger = Logging.getLogger(TestClustersPlugin.class); + // this is static because we need a single mapping across multi project builds, as some of the listeners we use, + // like task graph are singletons across multi project builds. + private static final Map> usedClusters = new ConcurrentHashMap<>(); + private static final Map claimsInventory = new ConcurrentHashMap<>(); + private static final Set runningClusters = Collections.synchronizedSet(new HashSet<>()); + @Override public void apply(Project project) { - NamedDomainObjectContainer container = project.container( + // enable the DSL to describe clusters + NamedDomainObjectContainer container = createTestClustersContainerExtension(project); + + // provide a task to be able to list defined clusters. + createListClustersTask(project, container); + + // create DSL for tasks to mark clusters these use + createUseClusterTaskExtension(project); + + // There's a single Gradle instance for multi project builds, this means that some configuration needs to be + // done only once even if the plugin is applied multiple times as a part of multi project build + ExtraPropertiesExtension rootProperties = project.getRootProject().getExtensions().getExtraProperties(); + if (rootProperties.has(PROPERTY_TESTCLUSTERS_RUN_ONCE) == false) { + rootProperties.set(PROPERTY_TESTCLUSTERS_RUN_ONCE, true); + // When running in the Daemon it's possible for this to hold references to past + usedClusters.clear(); + claimsInventory.clear(); + runningClusters.clear(); + + // When we know what tasks will run, we claim the clusters of those task to differentiate between clusters + // that are defined in the build script and the ones that will actually be used in this invocation of gradle + // we use this information to determine when the last task that required the cluster executed so that we can + // terminate the cluster right away and free up resources. + configureClaimClustersHook(project); + + // Before each task, we determine if a cluster needs to be started for that task. + configureStartClustersHook(project); + + // After each task we determine if there are clusters that are no longer needed. + configureStopClustersHook(project); + } + } + + private NamedDomainObjectContainer createTestClustersContainerExtension(Project project) { + // Create an extensions that allows describing clusters + NamedDomainObjectContainer container = project.container( ElasticsearchNode.class, - (name) -> new ElasticsearchNode(name, GradleServicesAdapter.getInstance(project)) + name -> new ElasticsearchNode( + name, + GradleServicesAdapter.getInstance(project) + ) ); project.getExtensions().add(NODE_EXTENSION_NAME, container); + return container; + } + + private void createListClustersTask(Project project, NamedDomainObjectContainer container) { Task listTask = project.getTasks().create(LIST_TASK_NAME); listTask.setGroup("ES cluster formation"); listTask.setDescription("Lists all ES clusters configured for this project"); listTask.doLast((Task task) -> - container.forEach((ElasticsearchNode cluster) -> + container.forEach(cluster -> logger.lifecycle(" * {}: {}", cluster.getName(), cluster.getDistribution()) ) ); + } - Map> taskToCluster = new HashMap<>(); - + private void createUseClusterTaskExtension(Project project) { // register an extension for all current and future tasks, so that any task can declare that it wants to use a // specific cluster. project.getTasks().all((Task task) -> task.getExtensions().findByType(ExtraPropertiesExtension.class) - .set( - "useCluster", - new Closure(this, this) { - public void doCall(ElasticsearchNode conf) { - taskToCluster.computeIfAbsent(task, k -> new ArrayList<>()).add(conf); - } - }) + .set( + "useCluster", + new Closure(this, task) { + public void doCall(ElasticsearchNode node) { + usedClusters.computeIfAbsent(task, k -> new ArrayList<>()).add(node); + } + }) ); + } + private void configureClaimClustersHook(Project project) { project.getGradle().getTaskGraph().whenReady(taskExecutionGraph -> taskExecutionGraph.getAllTasks() .forEach(task -> - taskToCluster.getOrDefault(task, Collections.emptyList()).forEach(ElasticsearchNode::claim) + usedClusters.getOrDefault(task, Collections.emptyList()).forEach(each -> { + synchronized (claimsInventory) { + claimsInventory.put(each, claimsInventory.getOrDefault(each, 0) + 1); + } + each.freeze(); + }) ) ); + } + + private void configureStartClustersHook(Project project) { project.getGradle().addListener( new TaskActionListener() { @Override public void beforeActions(Task task) { // we only start the cluster before the actions, so we'll not start it if the task is up-to-date - taskToCluster.getOrDefault(task, new ArrayList<>()).forEach(ElasticsearchNode::start); + final List clustersToStart; + synchronized (runningClusters) { + clustersToStart = usedClusters.getOrDefault(task,Collections.emptyList()).stream() + .filter(each -> runningClusters.contains(each) == false) + .collect(Collectors.toList()); + runningClusters.addAll(clustersToStart); + } + clustersToStart.forEach(ElasticsearchNode::start); + } @Override public void afterActions(Task task) {} } ); + } + + private void configureStopClustersHook(Project project) { project.getGradle().addListener( new TaskExecutionListener() { @Override public void afterExecute(Task task, TaskState state) { - // always un-claim the cluster, even if _this_ task is up-to-date, as others might not have been and caused the - // cluster to start. - taskToCluster.getOrDefault(task, new ArrayList<>()).forEach(ElasticsearchNode::unClaimAndStop); + // always unclaim the cluster, even if _this_ task is up-to-date, as others might not have been + // and caused the cluster to start. + List clustersUsedByTask = usedClusters.getOrDefault( + task, + Collections.emptyList() + ); + if (state.getFailure() != null) { + // If the task fails, and other tasks use this cluster, the other task will likely never be + // executed at all, so we will never get to un-claim and terminate it. + // The downside is that with multi project builds if that other task is in a different + // project and executing right now, we may terminate the cluster while it's running it. + clustersUsedByTask.forEach(each -> each.stop(true)); + } else { + clustersUsedByTask.forEach(each -> { + synchronized (claimsInventory) { + claimsInventory.put(each, claimsInventory.get(each) - 1); + } + }); + final List stoppable; + synchronized (runningClusters) { + stoppable = claimsInventory.entrySet().stream() + .filter(entry -> entry.getValue() == 0) + .filter(entry -> runningClusters.contains(entry.getKey())) + .map(Map.Entry::getKey) + .collect(Collectors.toList()); + } + stoppable.forEach(each -> each.stop(false)); + } } @Override public void beforeExecute(Task task) {} diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index 5fb10036a09aa..6a07c9058c836 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -82,20 +82,6 @@ - - - - - - - - - - - - - - @@ -142,29 +128,6 @@ - - - - - - - - - - - - - - - - - - - - - - - @@ -267,9 +230,6 @@ - - - @@ -293,33 +253,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/buildSrc/src/test/groovy/org/elasticsearch/gradle/VersionCollectionTests.groovy b/buildSrc/src/test/groovy/org/elasticsearch/gradle/VersionCollectionTests.groovy deleted file mode 100644 index ad36c84078398..0000000000000 --- a/buildSrc/src/test/groovy/org/elasticsearch/gradle/VersionCollectionTests.groovy +++ /dev/null @@ -1,236 +0,0 @@ -package org.elasticsearch.gradle - -import org.elasticsearch.gradle.test.GradleUnitTestCase -import org.junit.Test - -class VersionCollectionTests extends GradleUnitTestCase { - - String formatVersion(String version) { - return " public static final Version V_${version.replaceAll("\\.", "_")} " - } - List allVersions = [formatVersion('5.0.0'), formatVersion('5.0.0_alpha1'), formatVersion('5.0.0_alpha2'), formatVersion('5.0.0_beta1'), - formatVersion('5.0.0_rc1'),formatVersion('5.0.0_rc2'),formatVersion('5.0.1'), formatVersion('5.0.2'), - formatVersion('5.1.1'), formatVersion('5.1.2'), formatVersion('5.2.0'), formatVersion('5.2.1'), formatVersion('6.0.0'), - formatVersion('6.0.1'), formatVersion('6.1.0'), formatVersion('6.1.1'), formatVersion('6.2.0'), formatVersion('6.3.0'), - formatVersion('7.0.0_alpha1'), formatVersion('7.0.0_alpha2')] - - /** - * This validates the logic of being on a unreleased major branch with a staged major-1.minor sibling. This case happens when a version is - * branched from Major-1.x At the time of this writing 6.2 is unreleased and 6.3 is the 6.x branch. This test simulates the behavior - * from 7.0 perspective, or master at the time of this writing. - */ - @Test - void testAgainstMajorUnreleasedWithExistingStagedMinorRelease() { - VersionCollection vc = new VersionCollection(allVersions) - assertNotNull(vc) - assertEquals(vc.nextMinorSnapshot, Version.fromString("6.3.0-SNAPSHOT")) - assertEquals(vc.stagedMinorSnapshot, Version.fromString("6.2.0-SNAPSHOT")) - assertEquals(vc.nextBugfixSnapshot, Version.fromString("6.1.1-SNAPSHOT")) - assertEquals(vc.maintenanceBugfixSnapshot, Version.fromString("5.2.1-SNAPSHOT")) - - vc.indexCompatible.containsAll(vc.versions) - - // This should contain the same list sans the current version - List indexCompatList = [Version.fromString("6.0.0"), Version.fromString("6.0.1"), - Version.fromString("6.1.0"), Version.fromString("6.1.1-SNAPSHOT"), - Version.fromString("6.2.0-SNAPSHOT"), Version.fromString("6.3.0-SNAPSHOT")] - assertTrue(indexCompatList.containsAll(vc.indexCompatible)) - assertTrue(vc.indexCompatible.containsAll(indexCompatList)) - - List wireCompatList = [Version.fromString("6.3.0-SNAPSHOT")] - assertTrue(wireCompatList.containsAll(vc.wireCompatible)) - assertTrue(vc.wireCompatible.containsAll(wireCompatList)) - - assertEquals(vc.snapshotsIndexCompatible.size(), 3) - assertTrue(vc.snapshotsIndexCompatible.contains(Version.fromString("6.3.0-SNAPSHOT"))) - assertTrue(vc.snapshotsIndexCompatible.contains(Version.fromString("6.2.0-SNAPSHOT"))) - assertTrue(vc.snapshotsIndexCompatible.contains(Version.fromString("6.1.1-SNAPSHOT"))) - - assertEquals(vc.snapshotsWireCompatible.size(), 1) - assertEquals(vc.snapshotsWireCompatible.first(), Version.fromString("6.3.0-SNAPSHOT")) - } - - /** - * This validates the logic of being on a unreleased major branch without a staged major-1.minor sibling. This case happens once a staged, - * unreleased minor is released. At the time of this writing 6.2 is unreleased, so adding a 6.2.1 simulates a 6.2 release. This test - * simulates the behavior from 7.0 perspective, or master at the time of this writing. - */ - @Test - void testAgainstMajorUnreleasedWithoutStagedMinorRelease() { - List localVersion = allVersions.clone() - localVersion.add(formatVersion('6.2.1')) // release 6.2 - - VersionCollection vc = new VersionCollection(localVersion) - assertNotNull(vc) - assertEquals(vc.nextMinorSnapshot, Version.fromString("6.3.0-SNAPSHOT")) - assertEquals(vc.stagedMinorSnapshot, null) - assertEquals(vc.nextBugfixSnapshot, Version.fromString("6.2.1-SNAPSHOT")) - assertEquals(vc.maintenanceBugfixSnapshot, Version.fromString("5.2.1-SNAPSHOT")) - - vc.indexCompatible.containsAll(vc.versions) - - // This should contain the same list sans the current version - List indexCompatList = [Version.fromString("6.0.0"), Version.fromString("6.0.1"), - Version.fromString("6.1.0"), Version.fromString("6.1.1"), - Version.fromString("6.2.0"), Version.fromString("6.2.1-SNAPSHOT"), - Version.fromString("6.3.0-SNAPSHOT")] - assertTrue(indexCompatList.containsAll(vc.indexCompatible)) - assertTrue(vc.indexCompatible.containsAll(indexCompatList)) - - List wireCompatList = [Version.fromString("6.3.0-SNAPSHOT")] - assertTrue(wireCompatList.containsAll(vc.wireCompatible)) - assertTrue(vc.wireCompatible.containsAll(wireCompatList)) - - assertEquals(vc.snapshotsIndexCompatible.size(), 2) - assertTrue(vc.snapshotsIndexCompatible.contains(Version.fromString("6.3.0-SNAPSHOT"))) - assertTrue(vc.snapshotsIndexCompatible.contains(Version.fromString("6.2.1-SNAPSHOT"))) - - assertEquals(vc.snapshotsWireCompatible.size(), 1) - assertEquals(vc.snapshotsWireCompatible.first(), Version.fromString("6.3.0-SNAPSHOT")) - } - - /** - * This validates the logic of being on a unreleased minor branch with a staged minor sibling. This case happens when a version is - * branched from Major.x At the time of this writing 6.2 is unreleased and 6.3 is the 6.x branch. This test simulates the behavior - * from 6.3 perspective. - */ - @Test - void testAgainstMinorReleasedBranch() { - List localVersion = allVersions.clone() - localVersion.removeAll { it.toString().contains('7_0_0')} // remove all the 7.x so that the actual version is 6.3 (6.x) - VersionCollection vc = new VersionCollection(localVersion) - assertNotNull(vc) - assertEquals(vc.nextMinorSnapshot, null) - assertEquals(vc.stagedMinorSnapshot, Version.fromString("6.2.0-SNAPSHOT")) - assertEquals(vc.nextBugfixSnapshot, Version.fromString("6.1.1-SNAPSHOT")) - assertEquals(vc.maintenanceBugfixSnapshot, Version.fromString("5.2.1-SNAPSHOT")) - - // This should contain the same list sans the current version - List indexCompatList = vc.versions.subList(0, vc.versions.size() - 1) - assertTrue(indexCompatList.containsAll(vc.indexCompatible)) - assertTrue(vc.indexCompatible.containsAll(indexCompatList)) - - List wireCompatList = [Version.fromString("5.2.0"), Version.fromString("5.2.1-SNAPSHOT"), Version.fromString("6.0.0"), - Version.fromString("6.0.1"), Version.fromString("6.1.0"), Version.fromString("6.1.1-SNAPSHOT"), - Version.fromString("6.2.0-SNAPSHOT")] - assertTrue(wireCompatList.containsAll(vc.wireCompatible)) - assertTrue(vc.wireCompatible.containsAll(wireCompatList)) - - assertEquals(vc.snapshotsIndexCompatible.size(), 3) - assertTrue(vc.snapshotsIndexCompatible.contains(Version.fromString("6.2.0-SNAPSHOT"))) - assertTrue(vc.snapshotsIndexCompatible.contains(Version.fromString("6.1.1-SNAPSHOT"))) - assertTrue(vc.snapshotsIndexCompatible.contains(Version.fromString("5.2.1-SNAPSHOT"))) - - assertEquals(vc.snapshotsWireCompatible.size(), 3) - assertTrue(vc.snapshotsWireCompatible.contains(Version.fromString("6.2.0-SNAPSHOT"))) - assertTrue(vc.snapshotsWireCompatible.contains(Version.fromString("6.1.1-SNAPSHOT"))) - assertTrue(vc.snapshotsWireCompatible.contains(Version.fromString("5.2.1-SNAPSHOT"))) - } - - /** - * This validates the logic of being on a unreleased minor branch without a staged minor sibling. This case happens once a staged, - * unreleased minor is released. At the time of this writing 6.2 is unreleased, so adding a 6.2.1 simulates a 6.2 release. This test - * simulates the behavior from 6.3 perspective. - */ - @Test - void testAgainstMinorReleasedBranchNoStagedMinor() { - List localVersion = allVersions.clone() - // remove all the 7.x and add a 6.2.1 which means 6.2 was released - localVersion.removeAll { it.toString().contains('7_0_0')} - localVersion.add(formatVersion('6.2.1')) - VersionCollection vc = new VersionCollection(localVersion) - assertNotNull(vc) - assertEquals(vc.nextMinorSnapshot, null) - assertEquals(vc.stagedMinorSnapshot, null) - assertEquals(vc.nextBugfixSnapshot, Version.fromString("6.2.1-SNAPSHOT")) - assertEquals(vc.maintenanceBugfixSnapshot, Version.fromString("5.2.1-SNAPSHOT")) - - // This should contain the same list sans the current version - List indexCompatList = vc.versions.subList(0, vc.versions.size() - 1) - assertTrue(indexCompatList.containsAll(vc.indexCompatible)) - assertTrue(vc.indexCompatible.containsAll(indexCompatList)) - - List wireCompatList = [Version.fromString("5.2.0"), Version.fromString("5.2.1-SNAPSHOT"), Version.fromString("6.0.0"), - Version.fromString("6.0.1"), Version.fromString("6.1.0"), Version.fromString("6.1.1"), - Version.fromString("6.2.0"), Version.fromString("6.2.1-SNAPSHOT")] - assertTrue(wireCompatList.containsAll(vc.wireCompatible)) - assertTrue(vc.wireCompatible.containsAll(wireCompatList)) - - assertEquals(vc.snapshotsIndexCompatible.size(), 2) - assertTrue(vc.snapshotsIndexCompatible.contains(Version.fromString("6.2.1-SNAPSHOT"))) - assertTrue(vc.snapshotsIndexCompatible.contains(Version.fromString("5.2.1-SNAPSHOT"))) - - assertEquals(vc.snapshotsWireCompatible.size(), 2) - assertTrue(vc.snapshotsWireCompatible.contains(Version.fromString("6.2.1-SNAPSHOT"))) - assertTrue(vc.snapshotsWireCompatible.contains(Version.fromString("5.2.1-SNAPSHOT"))) - } - - /** - * This validates the logic of being on a released minor branch. At the time of writing, 6.2 is unreleased, so this is equivalent of being - * on 6.1. - */ - @Test - void testAgainstOldMinor() { - - List localVersion = allVersions.clone() - // remove the 7 alphas and the ones greater than 6.1 - localVersion.removeAll { it.toString().contains('7_0_0') || it.toString().contains('V_6_2') || it.toString().contains('V_6_3') } - VersionCollection vc = new VersionCollection(localVersion) - assertNotNull(vc) - assertEquals(vc.nextMinorSnapshot, null) - assertEquals(vc.stagedMinorSnapshot, null) - assertEquals(vc.nextBugfixSnapshot, null) - assertEquals(vc.maintenanceBugfixSnapshot, Version.fromString("5.2.1-SNAPSHOT")) - - // This should contain the same list sans the current version - List indexCompatList = vc.versions.subList(0, vc.versions.size() - 1) - assertTrue(indexCompatList.containsAll(vc.indexCompatible)) - assertTrue(vc.indexCompatible.containsAll(indexCompatList)) - - List wireCompatList = [Version.fromString("5.2.0"), Version.fromString("5.2.1-SNAPSHOT"), Version.fromString("6.0.0"), - Version.fromString("6.0.1"), Version.fromString("6.1.0")] - assertTrue(wireCompatList.containsAll(vc.wireCompatible)) - assertTrue(vc.wireCompatible.containsAll(wireCompatList)) - - assertEquals(vc.snapshotsIndexCompatible.size(), 1) - assertTrue(vc.snapshotsIndexCompatible.contains(Version.fromString("5.2.1-SNAPSHOT"))) - - assertEquals(vc.snapshotsWireCompatible.size(), 1) - assertTrue(vc.snapshotsWireCompatible.contains(Version.fromString("5.2.1-SNAPSHOT"))) - } - - /** - * This validates the lower bound of wire compat, which is 5.0. It also validates that the span of 2.x to 5.x if it is decided to port - * this fix all the way to the maint 5.6 release. - */ - @Test - void testFloorOfWireCompatVersions() { - List localVersion = [formatVersion('2.0.0'), formatVersion('2.0.1'), formatVersion('2.1.0'), formatVersion('2.1.1'), - formatVersion('5.0.0'), formatVersion('5.0.1'), formatVersion('5.1.0'), formatVersion('5.1.1'), - formatVersion('5.2.0'),formatVersion('5.2.1'),formatVersion('5.3.0'),formatVersion('5.3.1'), - formatVersion('5.3.2')] - VersionCollection vc = new VersionCollection(localVersion) - assertNotNull(vc) - assertEquals(vc.maintenanceBugfixSnapshot, Version.fromString("2.1.1-SNAPSHOT")) - - // This should contain the same list sans the current version - List indexCompatList = vc.versions.subList(0, vc.versions.size() - 1) - assertTrue(indexCompatList.containsAll(vc.indexCompatible)) - assertTrue(vc.indexCompatible.containsAll(indexCompatList)) - - List wireCompatList = [Version.fromString("2.1.0"), Version.fromString("2.1.1-SNAPSHOT"), Version.fromString("5.0.0"), - Version.fromString("5.0.1"), Version.fromString("5.1.0"), - Version.fromString("5.1.1"), Version.fromString("5.2.0"), Version.fromString("5.2.1"), - Version.fromString("5.3.0"), Version.fromString("5.3.1")] - - List compatible = vc.wireCompatible - assertTrue(wireCompatList.containsAll(compatible)) - assertTrue(vc.wireCompatible.containsAll(wireCompatList)) - - assertEquals(vc.snapshotsIndexCompatible.size(), 1) - assertTrue(vc.snapshotsIndexCompatible.contains(Version.fromString("2.1.1-SNAPSHOT"))) - - // ensure none of the 2.x snapshots appear here, as this is the floor of bwc for wire compat - assertEquals(vc.snapshotsWireCompatible.size(), 0) - } -} diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java index 2a2304182c80e..661dba7716a7f 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/BuildExamplePluginsIT.java @@ -23,6 +23,7 @@ import org.elasticsearch.gradle.test.GradleIntegrationTestCase; import org.gradle.testkit.runner.GradleRunner; import org.junit.BeforeClass; +import org.junit.Ignore; import org.junit.Rule; import org.junit.rules.TemporaryFolder; @@ -38,6 +39,7 @@ import java.util.Objects; import java.util.stream.Collectors; +@Ignore public class BuildExamplePluginsIT extends GradleIntegrationTestCase { private static List EXAMPLE_PLUGINS = Collections.unmodifiableList( diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/VersionCollectionTests.java b/buildSrc/src/test/java/org/elasticsearch/gradle/VersionCollectionTests.java new file mode 100644 index 0000000000000..d1b4e893ec6ad --- /dev/null +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/VersionCollectionTests.java @@ -0,0 +1,406 @@ +package org.elasticsearch.gradle; + +import org.elasticsearch.gradle.test.GradleUnitTestCase; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +public class VersionCollectionTests extends GradleUnitTestCase { + + private static final Map> sampleVersions = new HashMap<>(); + + @Rule + public ExpectedException expectedEx = ExpectedException.none(); + + static { + // unreleased major and two unreleased minors ( minor in feature freeze ) + sampleVersions.put("8.0.0", asList( + "7_0_0", "7_0_1", "7_1_0", "7_1_1", "7_2_0", "7_3_0", "8.0.0" + )); + sampleVersions.put("7.0.0-alpha1", asList( + "6_0_0_alpha1", "6_0_0_alpha2", "6_0_0_beta1", "6_0_0_beta2", "6_0_0_rc1", "6_0_0_rc2", + "6_0_0", "6_0_1", "6_1_0", "6_1_1", "6_1_2", "6_1_3", "6_1_4", + "6_2_0", "6_2_1", "6_2_2", "6_2_3", "6_2_4", + "6_3_0", "6_3_1", "6_3_2", + "6_4_0", "6_4_1", "6_4_2", + "6_5_0", "7_0_0_alpha1" + )); + sampleVersions.put("6.5.0", asList( + "5_0_0_alpha1", "5_0_0_alpha2", "5_0_0_alpha3", "5_0_0_alpha4", "5_0_0_alpha5", "5_0_0_beta1", "5_0_0_rc1", + "5_0_0", "5_0_1", "5_0_2", "5_1_1", "5_1_2", "5_2_0", "5_2_1", "5_2_2", "5_3_0", "5_3_1", "5_3_2", "5_3_3", + "5_4_0", "5_4_1", "5_4_2", "5_4_3", "5_5_0", "5_5_1", "5_5_2", "5_5_3", "5_6_0", "5_6_1", "5_6_2", "5_6_3", + "5_6_4", "5_6_5", "5_6_6", "5_6_7", "5_6_8", "5_6_9", "5_6_10", "5_6_11", "5_6_12", "5_6_13", + "6_0_0_alpha1", "6_0_0_alpha2", "6_0_0_beta1", "6_0_0_beta2", "6_0_0_rc1", "6_0_0_rc2", "6_0_0", "6_0_1", + "6_1_0", "6_1_1", "6_1_2", "6_1_3", "6_1_4", "6_2_0", "6_2_1", "6_2_2", "6_2_3", "6_2_4", "6_3_0", "6_3_1", + "6_3_2", "6_4_0", "6_4_1", "6_4_2", "6_5_0" + )); + sampleVersions.put("6.6.0", asList( + "5_0_0_alpha1", "5_0_0_alpha2", "5_0_0_alpha3", "5_0_0_alpha4", "5_0_0_alpha5", "5_0_0_beta1", "5_0_0_rc1", + "5_0_0", "5_0_1", "5_0_2", "5_1_1", "5_1_2", "5_2_0", "5_2_1", "5_2_2", "5_3_0", "5_3_1", "5_3_2", "5_3_3", + "5_4_0", "5_4_1", "5_4_2", "5_4_3", "5_5_0", "5_5_1", "5_5_2", "5_5_3", "5_6_0", "5_6_1", "5_6_2", "5_6_3", + "5_6_4", "5_6_5", "5_6_6", "5_6_7", "5_6_8", "5_6_9", "5_6_10", "5_6_11", "5_6_12", "5_6_13", + "6_0_0_alpha1", "6_0_0_alpha2", "6_0_0_beta1", "6_0_0_beta2", "6_0_0_rc1", "6_0_0_rc2", "6_0_0", "6_0_1", + "6_1_0", "6_1_1", "6_1_2", "6_1_3", "6_1_4", "6_2_0", "6_2_1", "6_2_2", "6_2_3", "6_2_4", "6_3_0", "6_3_1", + "6_3_2", "6_4_0", "6_4_1", "6_4_2", "6_5_0", "6_6_0" + )); + sampleVersions.put("6.4.2", asList( + "5_0_0_alpha1", "5_0_0_alpha2", "5_0_0_alpha3", "5_0_0_alpha4", "5_0_0_alpha5", "5_0_0_beta1", "5_0_0_rc1", + "5_0_0", "5_0_1", "5_0_2", "5_1_1", "5_1_2", "5_2_0", "5_2_1", "5_2_2", "5_3_0", + "5_3_1", "5_3_2", "5_3_3", "5_4_0", "5_4_1", "5_4_2", "5_4_3", "5_5_0", "5_5_1", "5_5_2", "5_5_3", + "5_6_0", "5_6_1", "5_6_2", "5_6_3", "5_6_4", "5_6_5", "5_6_6", "5_6_7", "5_6_8", "5_6_9", "5_6_10", + "5_6_11", "5_6_12", "5_6_13", + "6_0_0_alpha1", "6_0_0_alpha2", "6_0_0_beta1", "6_0_0_beta2", "6_0_0_rc1", "6_0_0_rc2", + "6_0_0", "6_0_1", "6_1_0", "6_1_1", "6_1_2", "6_1_3", "6_1_4", "6_2_0", "6_2_1", "6_2_2", "6_2_3", + "6_2_4", "6_3_0", "6_3_1", "6_3_2", "6_4_0", "6_4_1", "6_4_2" + )); + } + + @Test(expected = IllegalArgumentException.class) + public void testExceptionOnEmpty() { + new VersionCollection(asList("foo", "bar"), Version.fromString("7.0.0")); + } + + @Test(expected = IllegalStateException.class) + public void testExceptionOnNonCurrent() { + new VersionCollection(singletonList(formatVersionToLine("6.5.0")), Version.fromString("7.0.0")); + } + + @Test(expected = IllegalStateException.class) + public void testExceptionOnTooManyMajors() { + new VersionCollection( + asList( + formatVersionToLine("5.6.12"), + formatVersionToLine("6.5.0"), + formatVersionToLine("7.0.0") + ), + Version.fromString("6.5.0") + ); + } + + public void testWireCompatible() { + assertVersionsEquals( + singletonList("6.5.0-SNAPSHOT"), + getVersionCollection("7.0.0-alpha1").getWireCompatible() + ); + assertVersionsEquals( + asList( + "5.6.0", "5.6.1", "5.6.2", "5.6.3", "5.6.4", "5.6.5", "5.6.6", "5.6.7", "5.6.8", "5.6.9", "5.6.10", + "5.6.11", "5.6.12", "5.6.13-SNAPSHOT", + "6.0.0", "6.0.1", "6.1.0", "6.1.1", "6.1.2", "6.1.3", "6.1.4", + "6.2.0", "6.2.1", "6.2.2", "6.2.3", "6.2.4", + "6.3.0", "6.3.1", "6.3.2", "6.4.0", "6.4.1", "6.4.2-SNAPSHOT" + ), + getVersionCollection("6.5.0").getWireCompatible() + ); + + assertVersionsEquals( + asList( + "5.6.0", "5.6.1", "5.6.2", "5.6.3", "5.6.4", "5.6.5", "5.6.6", "5.6.7", "5.6.8", "5.6.9", "5.6.10", + "5.6.11", "5.6.12", "5.6.13-SNAPSHOT", "6.0.0", "6.0.1", "6.1.0", "6.1.1", "6.1.2", "6.1.3", "6.1.4", + "6.2.0", "6.2.1", "6.2.2", "6.2.3", "6.2.4", "6.3.0", "6.3.1", "6.3.2", "6.4.0", "6.4.1" + ), + getVersionCollection("6.4.2").getWireCompatible() + ); + + assertVersionsEquals( + asList( + "5.6.0", "5.6.1", "5.6.2", "5.6.3", "5.6.4", "5.6.5", "5.6.6", "5.6.7", "5.6.8", "5.6.9", "5.6.10", + "5.6.11", "5.6.12", "5.6.13-SNAPSHOT", + "6.0.0", "6.0.1", "6.1.0", "6.1.1", "6.1.2", "6.1.3", "6.1.4", + "6.2.0", "6.2.1", "6.2.2", "6.2.3", "6.2.4", + "6.3.0", "6.3.1", "6.3.2", "6.4.0", "6.4.1", "6.4.2-SNAPSHOT", "6.5.0-SNAPSHOT" + ), + getVersionCollection("6.6.0").getWireCompatible() + ); + + assertVersionsEquals( + singletonList("7.3.0"), + getVersionCollection("8.0.0").getWireCompatible() + ); + } + + public void testWireCompatibleUnreleased() { + assertVersionsEquals( + singletonList("6.5.0-SNAPSHOT"), + getVersionCollection("7.0.0-alpha1").getUnreleasedWireCompatible() + ); + assertVersionsEquals( + asList("5.6.13-SNAPSHOT", "6.4.2-SNAPSHOT"), + getVersionCollection("6.5.0").getUnreleasedWireCompatible() + ); + + assertVersionsEquals( + singletonList("5.6.13-SNAPSHOT"), + getVersionCollection("6.4.2").getUnreleasedWireCompatible() + ); + + assertVersionsEquals( + asList("5.6.13-SNAPSHOT", "6.4.2-SNAPSHOT", "6.5.0-SNAPSHOT"), + getVersionCollection("6.6.0").getUnreleasedWireCompatible() + ); + + assertVersionsEquals( + singletonList("7.3.0"), + getVersionCollection("8.0.0").getUnreleasedWireCompatible() + ); + } + + public void testIndexCompatible() { + assertVersionsEquals( + asList( + "6.0.0", "6.0.1", "6.1.0", "6.1.1", "6.1.2", "6.1.3", "6.1.4", + "6.2.0", "6.2.1", "6.2.2", "6.2.3", "6.2.4", "6.3.0", "6.3.1", + "6.3.2", "6.4.0", "6.4.1", "6.4.2-SNAPSHOT", "6.5.0-SNAPSHOT" + ), + getVersionCollection("7.0.0-alpha1").getIndexCompatible() + ); + + assertVersionsEquals( + asList( + "5.0.0", "5.0.1", "5.0.2", "5.1.1", "5.1.2", "5.2.0", "5.2.1", "5.2.2", "5.3.0", "5.3.1", "5.3.2", "5.3.3", + "5.4.0", "5.4.1", "5.4.2", "5.4.3", "5.5.0", "5.5.1", "5.5.2", "5.5.3", "5.6.0", "5.6.1", "5.6.2", "5.6.3", + "5.6.4", "5.6.5", "5.6.6", "5.6.7", "5.6.8", "5.6.9", "5.6.10", "5.6.11", "5.6.12", "5.6.13-SNAPSHOT", + "6.0.0", "6.0.1", "6.1.0", "6.1.1", "6.1.2", "6.1.3", "6.1.4", "6.2.0", "6.2.1", "6.2.2", "6.2.3", "6.2.4", + "6.3.0", "6.3.1", "6.3.2", "6.4.0", "6.4.1", "6.4.2-SNAPSHOT" + ), + getVersionCollection("6.5.0").getIndexCompatible() + ); + + assertVersionsEquals( + asList( + "5.0.0", "5.0.1", "5.0.2", "5.1.1", "5.1.2", "5.2.0", "5.2.1", "5.2.2", "5.3.0", "5.3.1", "5.3.2", "5.3.3", + "5.4.0", "5.4.1", "5.4.2", "5.4.3", "5.5.0", "5.5.1", "5.5.2", "5.5.3", "5.6.0", "5.6.1", "5.6.2", "5.6.3", + "5.6.4", "5.6.5", "5.6.6", "5.6.7", "5.6.8", "5.6.9", "5.6.10", "5.6.11", "5.6.12", "5.6.13-SNAPSHOT", + "6.0.0", "6.0.1", "6.1.0", "6.1.1", "6.1.2", "6.1.3", "6.1.4", "6.2.0", "6.2.1", "6.2.2", "6.2.3", "6.2.4", + "6.3.0", "6.3.1", "6.3.2", "6.4.0", "6.4.1" + ), + getVersionCollection("6.4.2").getIndexCompatible() + ); + + assertVersionsEquals( + asList( + "5.0.0", "5.0.1", "5.0.2", "5.1.1", "5.1.2", "5.2.0", "5.2.1", "5.2.2", "5.3.0", "5.3.1", "5.3.2", "5.3.3", + "5.4.0", "5.4.1", "5.4.2", "5.4.3", "5.5.0", "5.5.1", "5.5.2", "5.5.3", "5.6.0", "5.6.1", "5.6.2", "5.6.3", + "5.6.4", "5.6.5", "5.6.6", "5.6.7", "5.6.8", "5.6.9", "5.6.10", "5.6.11", "5.6.12", "5.6.13-SNAPSHOT", + "6.0.0", "6.0.1", "6.1.0", "6.1.1", "6.1.2", "6.1.3", "6.1.4", "6.2.0", "6.2.1", "6.2.2", "6.2.3", "6.2.4", + "6.3.0", "6.3.1", "6.3.2", "6.4.0", "6.4.1", "6.4.2-SNAPSHOT", "6.5.0-SNAPSHOT" + ), + getVersionCollection("6.6.0").getIndexCompatible() + ); + + assertVersionsEquals( + asList("7.0.0", "7.0.1", "7.1.0", "7.1.1", "7.2.0", "7.3.0"), + getVersionCollection("8.0.0").getIndexCompatible() + ); + } + + public void testIndexCompatibleUnreleased() { + assertVersionsEquals( + asList("6.4.2-SNAPSHOT", "6.5.0-SNAPSHOT"), + getVersionCollection("7.0.0-alpha1").getUnreleasedIndexCompatible() + ); + + assertVersionsEquals( + asList("5.6.13-SNAPSHOT", "6.4.2-SNAPSHOT"), + getVersionCollection("6.5.0").getUnreleasedIndexCompatible() + ); + + assertVersionsEquals( + singletonList("5.6.13-SNAPSHOT"), + getVersionCollection("6.4.2").getUnreleasedIndexCompatible() + ); + + assertVersionsEquals( + asList("5.6.13-SNAPSHOT", "6.4.2-SNAPSHOT", "6.5.0-SNAPSHOT"), + getVersionCollection("6.6.0").getUnreleasedIndexCompatible() + ); + + assertVersionsEquals( + asList("7.1.1", "7.2.0", "7.3.0"), + getVersionCollection("8.0.0").getUnreleasedIndexCompatible() + ); + } + + public void testGetUnreleased() { + assertVersionsEquals( + asList("6.4.2", "6.5.0", "7.0.0-alpha1"), + getVersionCollection("7.0.0-alpha1").getUnreleased() + ); + assertVersionsEquals( + asList("5.6.13", "6.4.2", "6.5.0"), + getVersionCollection("6.5.0").getUnreleased() + ); + assertVersionsEquals( + asList("5.6.13", "6.4.2"), + getVersionCollection("6.4.2").getUnreleased() + ); + assertVersionsEquals( + asList("5.6.13", "6.4.2", "6.5.0", "6.6.0"), + getVersionCollection("6.6.0").getUnreleased() + ); + assertVersionsEquals( + asList("7.1.1", "7.2.0", "7.3.0", "8.0.0"), + getVersionCollection("8.0.0").getUnreleased() + ); + } + + public void testGetBranch() { + assertUnreleasedBranchNames( + asList("6.4", "6.x"), + getVersionCollection("7.0.0-alpha1") + ); + assertUnreleasedBranchNames( + asList("5.6", "6.4"), + getVersionCollection("6.5.0") + ); + assertUnreleasedBranchNames( + singletonList("5.6"), + getVersionCollection("6.4.2") + ); + assertUnreleasedBranchNames( + asList("5.6", "6.4", "6.5"), + getVersionCollection("6.6.0") + ); + assertUnreleasedBranchNames( + asList("7.1", "7.2", "7.x"), + getVersionCollection("8.0.0") + ); + } + + public void testGetGradleProjectName() { + assertUnreleasedGradleProjectNames( + asList("bugfix", "minor"), + getVersionCollection("7.0.0-alpha1") + ); + assertUnreleasedGradleProjectNames( + asList("maintenance", "bugfix"), + getVersionCollection("6.5.0") + ); + assertUnreleasedGradleProjectNames( + singletonList("maintenance"), + getVersionCollection("6.4.2") + ); + assertUnreleasedGradleProjectNames( + asList("maintenance", "bugfix", "staged"), + getVersionCollection("6.6.0") + ); + assertUnreleasedGradleProjectNames( + asList("bugfix", "staged", "minor"), + getVersionCollection("8.0.0") + ); + } + + public void testCompareToAuthoritative() { + List listOfVersions = asList("7.0.0", "7.0.1", "7.1.0", "7.1.1", "7.2.0", "7.3.0", "8.0.0"); + List authoritativeReleasedVersions = Stream.of("7.0.0", "7.0.1", "7.1.0") + .map(Version::fromString) + .collect(Collectors.toList()); + + VersionCollection vc = new VersionCollection( + listOfVersions.stream() + .map(this::formatVersionToLine) + .collect(Collectors.toList()), + Version.fromString("8.0.0") + ); + vc.compareToAuthoritative(authoritativeReleasedVersions); + } + + public void testCompareToAuthoritativeUnreleasedActuallyReleased() { + List listOfVersions = asList("7.0.0", "7.0.1", "7.1.0", "7.1.1", "7.2.0", "7.3.0", "8.0.0"); + List authoritativeReleasedVersions = Stream.of("7.0.0", "7.0.1", "7.1.0", "7.1.1", "8.0.0") + .map(Version::fromString) + .collect(Collectors.toList()); + + VersionCollection vc = new VersionCollection( + listOfVersions.stream() + .map(this::formatVersionToLine) + .collect(Collectors.toList()), + Version.fromString("8.0.0") + ); + expectedEx.expect(IllegalStateException.class); + expectedEx.expectMessage("but they are released"); + vc.compareToAuthoritative(authoritativeReleasedVersions); + } + + public void testCompareToAuthoritativeNotReallyRelesed() { + List listOfVersions = asList("7.0.0", "7.0.1", "7.1.0", "7.1.1", "7.2.0", "7.3.0", "8.0.0"); + List authoritativeReleasedVersions = Stream.of("7.0.0", "7.0.1") + .map(Version::fromString) + .collect(Collectors.toList()); + VersionCollection vc = new VersionCollection( + listOfVersions.stream() + .map(this::formatVersionToLine) + .collect(Collectors.toList()), + Version.fromString("8.0.0") + ); + expectedEx.expect(IllegalStateException.class); + expectedEx.expectMessage("not really released"); + vc.compareToAuthoritative(authoritativeReleasedVersions); + } + + private void assertUnreleasedGradleProjectNames(List expectedNAmes, VersionCollection versionCollection) { + List actualNames = new ArrayList<>(); + versionCollection.forPreviousUnreleased(unreleasedVersion -> + actualNames.add(unreleasedVersion.gradleProjectName) + ); + assertEquals(expectedNAmes, actualNames); + } + + private void assertUnreleasedBranchNames(List expectedBranches, VersionCollection versionCollection) { + List actualBranches = new ArrayList<>(); + versionCollection.forPreviousUnreleased(unreleasedVersionInfo -> + actualBranches.add(unreleasedVersionInfo.branch) + ); + assertEquals(expectedBranches, actualBranches); + } + + private String formatVersionToLine(final String version) { + return " public static final Version V_" + version.replaceAll("\\.", "_") + " "; + } + + private void assertVersionsEquals(List expected, List actual) { + assertEquals( + expected.stream() + .map(Version::fromString) + .collect(Collectors.toList()), + actual + ); + } + + private VersionCollection getVersionCollection(String currentVersion) { + return new VersionCollection( + sampleVersions.get(currentVersion).stream() + .map(this::formatVersionToLine) + .collect(Collectors.toList()), + Version.fromString(currentVersion) + ); + } +} diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/VersionTests.java b/buildSrc/src/test/java/org/elasticsearch/gradle/VersionTests.java index d3c3b4a43cb41..3394285157e17 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/VersionTests.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/VersionTests.java @@ -33,27 +33,23 @@ public class VersionTests extends GradleUnitTestCase { public ExpectedException expectedEx = ExpectedException.none(); public void testVersionParsing() { - assertVersionEquals("7.0.1", 7, 0, 1, "", false); - assertVersionEquals("7.0.1-alpha2", 7, 0, 1, "-alpha2", false); - assertVersionEquals("5.1.2-rc3", 5, 1, 2, "-rc3", false); - assertVersionEquals("6.1.2-SNAPSHOT", 6, 1, 2, "", true); - assertVersionEquals("6.1.2-beta1-SNAPSHOT", 6, 1, 2, "-beta1", true); + assertVersionEquals("7.0.1", 7, 0, 1); + assertVersionEquals("7.0.1-alpha2", 7, 0, 1); + assertVersionEquals("5.1.2-rc3", 5, 1, 2); + assertVersionEquals("6.1.2-SNAPSHOT", 6, 1, 2); + assertVersionEquals("6.1.2-beta1-SNAPSHOT", 6, 1, 2); } public void testCompareWithStringVersions() { assertTrue("1.10.20 is not interpreted as before 2.0.0", Version.fromString("1.10.20").before("2.0.0") ); - assertTrue("7.0.0-alpha1 is not interpreted as before 7.0.0-alpha2", - Version.fromString("7.0.0-alpha1").before("7.0.0-alpha2") - ); assertTrue("7.0.0-alpha1 should be equal to 7.0.0-alpha1", Version.fromString("7.0.0-alpha1").equals(Version.fromString("7.0.0-alpha1")) ); assertTrue("7.0.0-SNAPSHOT should be equal to 7.0.0-SNAPSHOT", Version.fromString("7.0.0-SNAPSHOT").equals(Version.fromString("7.0.0-SNAPSHOT")) ); - assertEquals(Version.fromString("5.2.1-SNAPSHOT"), Version.fromString("5.2.1-SNAPSHOT")); } public void testCollections() { @@ -78,62 +74,12 @@ public void testCollections() { } public void testToString() { - assertEquals("7.0.1", new Version(7, 0, 1, null, false).toString()); + assertEquals("7.0.1", new Version(7, 0, 1).toString()); } public void testCompareVersions() { - assertEquals(0, new Version(7, 0, 0, null, true).compareTo( - new Version(7, 0, 0, null, true) - )); - assertEquals(0, new Version(7, 0, 0, null, true).compareTo( - new Version(7, 0, 0, "", true) - )); - - // snapshot is not taken into account TODO inconsistent with equals - assertEquals( - 0, - new Version(7, 0, 0, "", false).compareTo( - new Version(7, 0, 0, null, true)) - ); - // without sufix is smaller than with TODO - assertOrder( - new Version(7, 0, 0, null, false), - new Version(7, 0, 0, "-alpha1", false) - ); - // numbered sufix - assertOrder( - new Version(7, 0, 0, "-alpha1", false), - new Version(7, 0, 0, "-alpha2", false) - ); - // ranked sufix - assertOrder( - new Version(7, 0, 0, "-alpha8", false), - new Version(7, 0, 0, "-rc1", false) - ); - // ranked sufix - assertOrder( - new Version(7, 0, 0, "-alpha8", false), - new Version(7, 0, 0, "-beta1", false) - ); - // ranked sufix - assertOrder( - new Version(7, 0, 0, "-beta8", false), - new Version(7, 0, 0, "-rc1", false) - ); - // major takes precedence - assertOrder( - new Version(6, 10, 10, "-alpha8", true), - new Version(7, 0, 0, "-alpha2", false) - ); - // then minor - assertOrder( - new Version(7, 0, 10, "-alpha8", true), - new Version(7, 1, 0, "-alpha2", false) - ); - // then revision - assertOrder( - new Version(7, 1, 0, "-alpha8", true), - new Version(7, 1, 10, "-alpha2", false) + assertEquals(0, + new Version(7, 0, 0).compareTo(new Version(7, 0, 0)) ); } @@ -149,33 +95,15 @@ public void testExceptionSyntax() { Version.fromString("foo.bar.baz"); } - public void testExceptionSuffixNumber() { - expectedEx.expect(IllegalArgumentException.class); - expectedEx.expectMessage("Invalid suffix"); - new Version(7, 1, 1, "-alpha", true); - } - - public void testExceptionSuffix() { - expectedEx.expect(IllegalArgumentException.class); - expectedEx.expectMessage("Suffix must contain one of:"); - new Version(7, 1, 1, "foo1", true); - } - private void assertOrder(Version smaller, Version bigger) { assertEquals(smaller + " should be smaller than " + bigger, -1, smaller.compareTo(bigger)); } - private void assertVersionEquals(String stringVersion, int major, int minor, int revision, String sufix, boolean snapshot) { + private void assertVersionEquals(String stringVersion, int major, int minor, int revision) { Version version = Version.fromString(stringVersion); assertEquals(major, version.getMajor()); assertEquals(minor, version.getMinor()); assertEquals(revision, version.getRevision()); - if (snapshot) { - assertTrue("Expected version to be a snapshot but it was not", version.isSnapshot()); - } else { - assertFalse("Expected version not to be a snapshot but it was", version.isSnapshot()); - } - assertEquals(sufix, version.getSuffix()); } } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java b/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java index f8e3cf88c4094..025c549489afa 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/test/GradleIntegrationTestCase.java @@ -78,8 +78,16 @@ protected void assertTaskFailed(BuildResult result, String taskName) { assertTaskOutcome(result, taskName, TaskOutcome.FAILED); } - protected void assertTaskSuccessful(BuildResult result, String taskName) { - assertTaskOutcome(result, taskName, TaskOutcome.SUCCESS); + protected void assertTaskSuccessful(BuildResult result, String... taskNames) { + for (String taskName : taskNames) { + assertTaskOutcome(result, taskName, TaskOutcome.SUCCESS); + } + } + + protected void assertTaskSkipped(BuildResult result, String... taskNames) { + for (String taskName : taskNames) { + assertTaskOutcome(result, taskName, TaskOutcome.SKIPPED); + } } private void assertTaskOutcome(BuildResult result, String taskName, TaskOutcome taskOutcome) { @@ -96,17 +104,19 @@ private void assertTaskOutcome(BuildResult result, String taskName, TaskOutcome ); } - protected void assertTaskUpToDate(BuildResult result, String taskName) { - BuildTask task = result.task(taskName); - if (task == null) { - fail("Expected task `" + taskName + "` to be up-to-date, but it did not run"); + protected void assertTaskUpToDate(BuildResult result, String... taskNames) { + for (String taskName : taskNames) { + BuildTask task = result.task(taskName); + if (task == null) { + fail("Expected task `" + taskName + "` to be up-to-date, but it did not run"); + } + assertEquals( + "Expected task to be up to date but it was: " + task.getOutcome() + + "\n\nOutput is:\n" + result.getOutput(), + TaskOutcome.UP_TO_DATE, + task.getOutcome() + ); } - assertEquals( - "Expected task to be up to date but it was: " + task.getOutcome() + - "\n\nOutput is:\n" + result.getOutput() , - TaskOutcome.UP_TO_DATE, - task.getOutcome() - ); } protected void assertBuildFileExists(BuildResult result, String projectName, String path) { @@ -139,4 +149,16 @@ protected String getLocalTestRepoPath() { return file.getAbsolutePath(); } } + + public void assertOutputOnlyOnce(String output, String... text) { + for (String each : text) { + int i = output.indexOf(each); + if (i == -1 ) { + fail("Expected `" + text + "` to appear at most once, but it didn't at all.\n\nOutout is:\n"+ output); + } + if(output.indexOf(each) != output.lastIndexOf(each)) { + fail("Expected `" + text + "` to appear at most once, but it did multiple times.\n\nOutout is:\n"+ output); + } + } + } } diff --git a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java index f1461dbbd3d97..c6e3b2ca370ce 100644 --- a/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java +++ b/buildSrc/src/test/java/org/elasticsearch/gradle/testclusters/TestClustersPluginIT.java @@ -21,124 +21,130 @@ import org.elasticsearch.gradle.test.GradleIntegrationTestCase; import org.gradle.testkit.runner.BuildResult; import org.gradle.testkit.runner.GradleRunner; -import org.gradle.testkit.runner.TaskOutcome; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; +import java.util.Arrays; public class TestClustersPluginIT extends GradleIntegrationTestCase { public void testListClusters() { - BuildResult result = GradleRunner.create() - .withProjectDir(getProjectDir("testclusters")) - .withArguments("listTestClusters", "-s") - .withPluginClasspath() - .build(); + BuildResult result = getTestClustersRunner("listTestClusters").build(); - assertEquals(TaskOutcome.SUCCESS, result.task(":listTestClusters").getOutcome()); + assertTaskSuccessful(result, ":listTestClusters"); assertOutputContains( result.getOutput(), - " * myTestCluster:" + " * myTestCluster:" ); - } public void testUseClusterByOne() { - BuildResult result = GradleRunner.create() - .withProjectDir(getProjectDir("testclusters")) - .withArguments("user1", "-s") - .withPluginClasspath() - .build(); - - assertEquals(TaskOutcome.SUCCESS, result.task(":user1").getOutcome()); - assertOutputContains( - result.getOutput(), - "Starting cluster: myTestCluster", - "Stopping myTestCluster, number of claims is 0" - ); + BuildResult result = getTestClustersRunner("user1").build(); + assertTaskSuccessful(result, ":user1"); + assertStartedAndStoppedOnce(result); } public void testUseClusterByOneWithDryRun() { - BuildResult result = GradleRunner.create() - .withProjectDir(getProjectDir("testclusters")) - .withArguments("user1", "-s", "--dry-run") - .withPluginClasspath() - .build(); - + BuildResult result = getTestClustersRunner("--dry-run", "user1").build(); assertNull(result.task(":user1")); - assertOutputDoesNotContain( - result.getOutput(), - "Starting cluster: myTestCluster", - "Stopping myTestCluster, number of claims is 0" - ); + assertNotStarted(result); } public void testUseClusterByTwo() { - BuildResult result = GradleRunner.create() - .withProjectDir(getProjectDir("testclusters")) - .withArguments("user1", "user2", "-s") - .withPluginClasspath() - .build(); + BuildResult result = getTestClustersRunner("user1", "user2").build(); + assertTaskSuccessful(result, ":user1", ":user2"); + assertStartedAndStoppedOnce(result); + } - assertEquals(TaskOutcome.SUCCESS, result.task(":user1").getOutcome()); - assertEquals(TaskOutcome.SUCCESS, result.task(":user2").getOutcome()); + public void testUseClusterByUpToDateTask() { + BuildResult result = getTestClustersRunner("upToDate1", "upToDate2").build(); + assertTaskUpToDate(result, ":upToDate1", ":upToDate2"); + assertNotStarted(result); + } + + public void testUseClusterBySkippedTask() { + BuildResult result = getTestClustersRunner("skipped1", "skipped2").build(); + assertTaskSkipped(result, ":skipped1", ":skipped2"); + assertNotStarted(result); + } + + public void testUseClusterBySkippedAndWorkingTask() { + BuildResult result = getTestClustersRunner("skipped1", "user1").build(); + assertTaskSkipped(result, ":skipped1"); + assertTaskSuccessful(result, ":user1"); assertOutputContains( result.getOutput(), - "Starting cluster: myTestCluster", - "Not stopping myTestCluster, since cluster still has 1 claim(s)", - "Stopping myTestCluster, number of claims is 0" + "> Task :user1", + "Starting `ElasticsearchNode{name='myTestCluster'}`", + "Stopping `ElasticsearchNode{name='myTestCluster'}`" ); } - public void testUseClusterByUpToDateTask() { + public void testMultiProject() { BuildResult result = GradleRunner.create() - .withProjectDir(getProjectDir("testclusters")) - .withArguments("upToDate1", "upToDate2", "-s") + .withProjectDir(getProjectDir("testclusters_multiproject")) + .withArguments("user1", "user2", "-s", "-i", "--parallel") .withPluginClasspath() .build(); + assertTaskSuccessful(result, ":user1", ":user2"); + + assertStartedAndStoppedOnce(result); + } - assertEquals(TaskOutcome.UP_TO_DATE, result.task(":upToDate1").getOutcome()); - assertEquals(TaskOutcome.UP_TO_DATE, result.task(":upToDate2").getOutcome()); + public void testUseClusterByFailingOne() { + BuildResult result = getTestClustersRunner("itAlwaysFails").buildAndFail(); + assertTaskFailed(result, ":itAlwaysFails"); + assertStartedAndStoppedOnce(result); assertOutputContains( result.getOutput(), - "Not stopping myTestCluster, since cluster still has 1 claim(s)", - "cluster was not running: myTestCluster" + "Stopping `ElasticsearchNode{name='myTestCluster'}`, tailLogs: true", + "Execution failed for task ':itAlwaysFails'." ); - assertOutputDoesNotContain(result.getOutput(), "Starting cluster: myTestCluster"); } - public void testUseClusterBySkippedTask() { - BuildResult result = GradleRunner.create() - .withProjectDir(getProjectDir("testclusters")) - .withArguments("skipped1", "skipped2", "-s") - .withPluginClasspath() - .build(); + public void testUseClusterByFailingDependency() { + BuildResult result = getTestClustersRunner("dependsOnFailed").buildAndFail(); + assertTaskFailed(result, ":itAlwaysFails"); + assertNull(result.task(":dependsOnFailed")); + assertStartedAndStoppedOnce(result); + assertOutputContains( + result.getOutput(), + "Stopping `ElasticsearchNode{name='myTestCluster'}`, tailLogs: true", + "Execution failed for task ':itAlwaysFails'." + ); + } - assertEquals(TaskOutcome.SKIPPED, result.task(":skipped1").getOutcome()); - assertEquals(TaskOutcome.SKIPPED, result.task(":skipped2").getOutcome()); + public void testConfigurationLocked() { + BuildResult result = getTestClustersRunner("illegalConfigAlter").buildAndFail(); + assertTaskFailed(result, ":illegalConfigAlter"); assertOutputContains( result.getOutput(), - "Not stopping myTestCluster, since cluster still has 1 claim(s)", - "cluster was not running: myTestCluster" + "Configuration can not be altered, already locked" ); - assertOutputDoesNotContain(result.getOutput(), "Starting cluster: myTestCluster"); } - public void tetUseClusterBySkippedAndWorkingTask() { - BuildResult result = GradleRunner.create() + private void assertNotStarted(BuildResult result) { + assertOutputDoesNotContain( + result.getOutput(), + "Starting ", + "Stopping " + ); + } + + private GradleRunner getTestClustersRunner(String... tasks) { + String[] arguments = Arrays.copyOf(tasks, tasks.length + 2); + arguments[tasks.length] = "-s"; + arguments[tasks.length + 1] = "-i"; + return GradleRunner.create() .withProjectDir(getProjectDir("testclusters")) - .withArguments("skipped1", "user1", "-s") - .withPluginClasspath() - .build(); + .withArguments(arguments) + .withPluginClasspath(); + } - assertEquals(TaskOutcome.SKIPPED, result.task(":skipped1").getOutcome()); - assertEquals(TaskOutcome.SUCCESS, result.task(":user1").getOutcome()); - assertOutputContains( + + private void assertStartedAndStoppedOnce(BuildResult result) { + assertOutputOnlyOnce( result.getOutput(), - "> Task :user1", - "Starting cluster: myTestCluster", - "Stopping myTestCluster, number of claims is 0" + "Starting `ElasticsearchNode{name='myTestCluster'}`", + "Stopping `ElasticsearchNode{name='myTestCluster'}`" ); } - } diff --git a/buildSrc/src/testKit/testclusters/build.gradle b/buildSrc/src/testKit/testclusters/build.gradle index 470111f056ef9..bd1cfc143f42d 100644 --- a/buildSrc/src/testKit/testclusters/build.gradle +++ b/buildSrc/src/testKit/testclusters/build.gradle @@ -39,3 +39,23 @@ task skipped2 { enabled = false useCluster testClusters.myTestCluster } + +task itAlwaysFails { + doLast { + throw new GradleException("Task 1 failed!") + } + useCluster testClusters.myTestCluster +} + +task dependsOnFailed { + dependsOn itAlwaysFails + useCluster testClusters.myTestCluster +} + +task illegalConfigAlter { + useCluster testClusters.myTestCluster + doFirst { + println "Going to alter configuration after use" + testClusters.myTestCluster.distribution = 'ZIP_OSS' + } +} diff --git a/buildSrc/src/testKit/testclusters_multiproject/alpha/build.gradle b/buildSrc/src/testKit/testclusters_multiproject/alpha/build.gradle new file mode 100644 index 0000000000000..d9f18afd68b90 --- /dev/null +++ b/buildSrc/src/testKit/testclusters_multiproject/alpha/build.gradle @@ -0,0 +1,18 @@ +plugins { + id 'elasticsearch.testclusters' +} +testClusters { + myTestCluster +} +task user1 { + useCluster testClusters.myTestCluster + doFirst { + println "$path" + } +} +task user2 { + useCluster testClusters.myTestCluster + doFirst { + println "$path" + } +} diff --git a/buildSrc/src/testKit/testclusters_multiproject/bravo/build.gradle b/buildSrc/src/testKit/testclusters_multiproject/bravo/build.gradle new file mode 100644 index 0000000000000..2e1461f0b0f28 --- /dev/null +++ b/buildSrc/src/testKit/testclusters_multiproject/bravo/build.gradle @@ -0,0 +1,21 @@ +plugins { + id 'elasticsearch.testclusters' +} + +testClusters { + myTestCluster +} + +task user1 { + useCluster testClusters.myTestCluster + doFirst { + println "$path" + } +} + +task user2 { + useCluster testClusters.myTestCluster + doFirst { + println "$path" + } +} diff --git a/buildSrc/src/testKit/testclusters_multiproject/build.gradle b/buildSrc/src/testKit/testclusters_multiproject/build.gradle new file mode 100644 index 0000000000000..3527d1821d212 --- /dev/null +++ b/buildSrc/src/testKit/testclusters_multiproject/build.gradle @@ -0,0 +1,21 @@ +plugins { + id 'elasticsearch.testclusters' +} + +testClusters { + myTestCluster +} + +task user1 { + useCluster testClusters.myTestCluster + doFirst { + println "$path" + } +} + +task user2 { + useCluster testClusters.myTestCluster + doFirst { + println "$path" + } +} \ No newline at end of file diff --git a/buildSrc/src/testKit/testclusters_multiproject/charlie/build.gradle b/buildSrc/src/testKit/testclusters_multiproject/charlie/build.gradle new file mode 100644 index 0000000000000..f63a77aaea42d --- /dev/null +++ b/buildSrc/src/testKit/testclusters_multiproject/charlie/build.gradle @@ -0,0 +1,5 @@ +task hello() { + doLast { + println "This task does not use the testclusters plugin. So it will have no extension." + } +} \ No newline at end of file diff --git a/buildSrc/src/testKit/testclusters_multiproject/settings.gradle b/buildSrc/src/testKit/testclusters_multiproject/settings.gradle new file mode 100644 index 0000000000000..aa91948920148 --- /dev/null +++ b/buildSrc/src/testKit/testclusters_multiproject/settings.gradle @@ -0,0 +1,3 @@ +include ':alpha' +include ':bravo' +include ':charlie' \ No newline at end of file diff --git a/buildSrc/version.properties b/buildSrc/version.properties index d8cd559cc2def..14eec1efe1c0b 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ elasticsearch = 6.6.0 -lucene = 7.5.0 +lucene = 7.6.0-snapshot-f9598f335b # optional dependencies spatial4j = 0.7 diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/GraphClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/GraphClient.java index 3d5365fedde5a..70912b094d023 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/GraphClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/GraphClient.java @@ -44,7 +44,7 @@ public class GraphClient { public final GraphExploreResponse explore(GraphExploreRequest graphExploreRequest, RequestOptions options) throws IOException { return restHighLevelClient.performRequestAndParseEntity(graphExploreRequest, GraphRequestConverters::explore, - options, GraphExploreResponse::fromXContext, emptySet()); + options, GraphExploreResponse::fromXContent, emptySet()); } /** @@ -57,7 +57,7 @@ public final void exploreAsync(GraphExploreRequest graphExploreRequest, RequestOptions options, ActionListener listener) { restHighLevelClient.performRequestAsyncAndParseEntity(graphExploreRequest, GraphRequestConverters::explore, - options, GraphExploreResponse::fromXContext, listener, emptySet()); + options, GraphExploreResponse::fromXContent, listener, emptySet()); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleClient.java new file mode 100644 index 0000000000000..88456f8dcc095 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleClient.java @@ -0,0 +1,303 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.indexlifecycle.DeleteLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.ExplainLifecycleRequest; +import org.elasticsearch.client.indexlifecycle.ExplainLifecycleResponse; +import org.elasticsearch.client.indexlifecycle.GetLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.GetLifecyclePolicyResponse; +import org.elasticsearch.client.indexlifecycle.LifecycleManagementStatusRequest; +import org.elasticsearch.client.indexlifecycle.LifecycleManagementStatusResponse; +import org.elasticsearch.client.indexlifecycle.PutLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.RetryLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.RemoveIndexLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.RemoveIndexLifecyclePolicyResponse; +import org.elasticsearch.client.indexlifecycle.StartILMRequest; +import org.elasticsearch.client.indexlifecycle.StopILMRequest; + +import java.io.IOException; + +import static java.util.Collections.emptySet; + +public class IndexLifecycleClient { + private final RestHighLevelClient restHighLevelClient; + + IndexLifecycleClient(RestHighLevelClient restHighLevelClient) { + this.restHighLevelClient = restHighLevelClient; + } + + /** + * Retrieve one or more lifecycle policy definition + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public GetLifecyclePolicyResponse getLifecyclePolicy(GetLifecyclePolicyRequest request, + RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::getLifecyclePolicy, options, + GetLifecyclePolicyResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously retrieve one or more lifecycle policy definition + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void getLifecyclePolicyAsync(GetLifecyclePolicyRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::getLifecyclePolicy, options, + GetLifecyclePolicyResponse::fromXContent, listener, emptySet()); + } + + /** + * Create or modify a lifecycle definition + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public AcknowledgedResponse putLifecyclePolicy(PutLifecyclePolicyRequest request, + RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::putLifecyclePolicy, options, + AcknowledgedResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously create or modify a lifecycle definition + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void putLifecyclePolicyAsync(PutLifecyclePolicyRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::putLifecyclePolicy, options, + AcknowledgedResponse::fromXContent, listener, emptySet()); + } + + /** + * Delete a lifecycle definition + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public AcknowledgedResponse deleteLifecyclePolicy(DeleteLifecyclePolicyRequest request, + RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::deleteLifecyclePolicy, options, + AcknowledgedResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously delete a lifecycle definition + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void deleteLifecyclePolicyAsync(DeleteLifecyclePolicyRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::deleteLifecyclePolicy, options, + AcknowledgedResponse::fromXContent, listener, emptySet()); + } + + /** + * Remove the index lifecycle policy for an index + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public RemoveIndexLifecyclePolicyResponse removeIndexLifecyclePolicy(RemoveIndexLifecyclePolicyRequest request, + RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::removeIndexLifecyclePolicy, + options, RemoveIndexLifecyclePolicyResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously remove the index lifecycle policy for an index + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void removeIndexLifecyclePolicyAsync(RemoveIndexLifecyclePolicyRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::removeIndexLifecyclePolicy, options, + RemoveIndexLifecyclePolicyResponse::fromXContent, listener, emptySet()); + } + + /** + * Start the Index Lifecycle Management feature. + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public AcknowledgedResponse startILM(StartILMRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::startILM, options, + AcknowledgedResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously start the Index Lifecycle Management feature. + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void startILMAsync(StartILMRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::startILM, options, + AcknowledgedResponse::fromXContent, listener, emptySet()); + } + + /** + * Stop the Index Lifecycle Management feature. + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public AcknowledgedResponse stopILM(StopILMRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::stopILM, options, + AcknowledgedResponse::fromXContent, emptySet()); + } + + /** + * Get the status of index lifecycle management + * See + * the docs for more. + * + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + */ + public LifecycleManagementStatusResponse lifecycleManagementStatus(LifecycleManagementStatusRequest request, RequestOptions options) + throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::lifecycleManagementStatus, + options, LifecycleManagementStatusResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously get the status of index lifecycle management + * See + * the docs for more. + * + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void lifecycleManagementStatusAsync(LifecycleManagementStatusRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::lifecycleManagementStatus, options, + LifecycleManagementStatusResponse::fromXContent, listener, emptySet()); + } + + /** + * Asynchronously stop the Index Lifecycle Management feature. + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void stopILMAsync(StopILMRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::stopILM, options, + AcknowledgedResponse::fromXContent, listener, emptySet()); + } + + /** + * Explain the lifecycle state for an index + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public ExplainLifecycleResponse explainLifecycle(ExplainLifecycleRequest request,RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::explainLifecycle, options, + ExplainLifecycleResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously explain the lifecycle state for an index + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void explainLifecycleAsync(ExplainLifecycleRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::explainLifecycle, options, + ExplainLifecycleResponse::fromXContent, listener, emptySet()); + } + + /** + * Retry lifecycle step for given indices + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public AcknowledgedResponse retryLifecycleStep(RetryLifecyclePolicyRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::retryLifecycle, options, + AcknowledgedResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously retry the lifecycle step for given indices + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void retryLifecycleStepAsync(RetryLifecyclePolicyRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::retryLifecycle, options, + AcknowledgedResponse::fromXContent, listener, emptySet()); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleRequestConverters.java new file mode 100644 index 0000000000000..0ca4f22edf282 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleRequestConverters.java @@ -0,0 +1,154 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpDelete; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.client.indexlifecycle.DeleteLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.ExplainLifecycleRequest; +import org.elasticsearch.client.indexlifecycle.GetLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.LifecycleManagementStatusRequest; +import org.elasticsearch.client.indexlifecycle.PutLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.RemoveIndexLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.RetryLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.StartILMRequest; +import org.elasticsearch.client.indexlifecycle.StopILMRequest; +import org.elasticsearch.common.Strings; + +import java.io.IOException; + +final class IndexLifecycleRequestConverters { + + private IndexLifecycleRequestConverters() {} + + static Request getLifecyclePolicy(GetLifecyclePolicyRequest getLifecyclePolicyRequest) { + String endpoint = new RequestConverters.EndpointBuilder().addPathPartAsIs("_ilm/policy") + .addCommaSeparatedPathParts(getLifecyclePolicyRequest.getPolicyNames()).build(); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + RequestConverters.Params params = new RequestConverters.Params(request); + params.withMasterTimeout(getLifecyclePolicyRequest.masterNodeTimeout()); + params.withTimeout(getLifecyclePolicyRequest.timeout()); + return request; + } + + static Request putLifecyclePolicy(PutLifecyclePolicyRequest putLifecycleRequest) throws IOException { + String endpoint = new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_ilm/policy") + .addPathPartAsIs(putLifecycleRequest.getName()) + .build(); + Request request = new Request(HttpPut.METHOD_NAME, endpoint); + RequestConverters.Params params = new RequestConverters.Params(request); + params.withMasterTimeout(putLifecycleRequest.masterNodeTimeout()); + params.withTimeout(putLifecycleRequest.timeout()); + request.setEntity(RequestConverters.createEntity(putLifecycleRequest, RequestConverters.REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request deleteLifecyclePolicy(DeleteLifecyclePolicyRequest deleteLifecyclePolicyRequest) { + Request request = new Request(HttpDelete.METHOD_NAME, + new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_ilm/policy") + .addPathPartAsIs(deleteLifecyclePolicyRequest.getLifecyclePolicy()) + .build()); + RequestConverters.Params params = new RequestConverters.Params(request); + params.withMasterTimeout(deleteLifecyclePolicyRequest.masterNodeTimeout()); + params.withTimeout(deleteLifecyclePolicyRequest.timeout()); + return request; + } + + static Request removeIndexLifecyclePolicy(RemoveIndexLifecyclePolicyRequest removePolicyRequest) { + String[] indices = removePolicyRequest.indices() == null ? + Strings.EMPTY_ARRAY : removePolicyRequest.indices().toArray(new String[] {}); + Request request = new Request(HttpPost.METHOD_NAME, + new RequestConverters.EndpointBuilder() + .addCommaSeparatedPathParts(indices) + .addPathPartAsIs("_ilm", "remove") + .build()); + RequestConverters.Params params = new RequestConverters.Params(request); + params.withIndicesOptions(removePolicyRequest.indicesOptions()); + params.withMasterTimeout(removePolicyRequest.masterNodeTimeout()); + return request; + } + + static Request startILM(StartILMRequest startILMRequest) { + Request request = new Request(HttpPost.METHOD_NAME, + new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_ilm") + .addPathPartAsIs("start") + .build()); + RequestConverters.Params params = new RequestConverters.Params(request); + params.withMasterTimeout(startILMRequest.masterNodeTimeout()); + params.withTimeout(startILMRequest.timeout()); + return request; + } + + static Request stopILM(StopILMRequest stopILMRequest) { + Request request = new Request(HttpPost.METHOD_NAME, + new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_ilm") + .addPathPartAsIs("stop") + .build()); + RequestConverters.Params params = new RequestConverters.Params(request); + params.withMasterTimeout(stopILMRequest.masterNodeTimeout()); + params.withTimeout(stopILMRequest.timeout()); + return request; + } + + static Request lifecycleManagementStatus(LifecycleManagementStatusRequest lifecycleManagementStatusRequest){ + Request request = new Request(HttpGet.METHOD_NAME, + new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_ilm") + .addPathPartAsIs("status") + .build()); + RequestConverters.Params params = new RequestConverters.Params(request); + params.withMasterTimeout(lifecycleManagementStatusRequest.masterNodeTimeout()); + params.withTimeout(lifecycleManagementStatusRequest.timeout()); + return request; + } + + static Request explainLifecycle(ExplainLifecycleRequest explainLifecycleRequest) { + String[] indices = explainLifecycleRequest.indices() == null ? Strings.EMPTY_ARRAY : explainLifecycleRequest.indices(); + Request request = new Request(HttpGet.METHOD_NAME, + new RequestConverters.EndpointBuilder() + .addCommaSeparatedPathParts(indices) + .addPathPartAsIs("_ilm") + .addPathPartAsIs("explain") + .build()); + RequestConverters.Params params = new RequestConverters.Params(request); + params.withIndicesOptions(explainLifecycleRequest.indicesOptions()); + params.withMasterTimeout(explainLifecycleRequest.masterNodeTimeout()); + return request; + } + + static Request retryLifecycle(RetryLifecyclePolicyRequest retryLifecyclePolicyRequest) { + Request request = new Request(HttpPost.METHOD_NAME, + new RequestConverters.EndpointBuilder() + .addCommaSeparatedPathParts(retryLifecyclePolicyRequest.getIndices()) + .addPathPartAsIs("_ilm") + .addPathPartAsIs("retry") + .build()); + RequestConverters.Params params = new RequestConverters.Params(request); + params.withMasterTimeout(retryLifecyclePolicyRequest.masterNodeTimeout()); + params.withTimeout(retryLifecyclePolicyRequest.timeout()); + return request; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java index 69b1c4a91e087..be513b0985c55 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java @@ -49,6 +49,7 @@ import org.elasticsearch.client.ml.PreviewDatafeedRequest; import org.elasticsearch.client.ml.PutCalendarRequest; import org.elasticsearch.client.ml.PutDatafeedRequest; +import org.elasticsearch.client.ml.PutFilterRequest; import org.elasticsearch.client.ml.PutJobRequest; import org.elasticsearch.client.ml.StartDatafeedRequest; import org.elasticsearch.client.ml.StopDatafeedRequest; @@ -463,4 +464,16 @@ static Request deleteCalendar(DeleteCalendarRequest deleteCalendarRequest) { Request request = new Request(HttpDelete.METHOD_NAME, endpoint); return request; } + + static Request putFilter(PutFilterRequest putFilterRequest) throws IOException { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("filters") + .addPathPart(putFilterRequest.getMlFilter().getId()) + .build(); + Request request = new Request(HttpPut.METHOD_NAME, endpoint); + request.setEntity(createEntity(putFilterRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java index 0b7647b7d579f..3f7a938f9ce1a 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java @@ -61,6 +61,8 @@ import org.elasticsearch.client.ml.PutCalendarResponse; import org.elasticsearch.client.ml.PutDatafeedRequest; import org.elasticsearch.client.ml.PutDatafeedResponse; +import org.elasticsearch.client.ml.PutFilterRequest; +import org.elasticsearch.client.ml.PutFilterResponse; import org.elasticsearch.client.ml.PutJobRequest; import org.elasticsearch.client.ml.PutJobResponse; import org.elasticsearch.client.ml.StartDatafeedRequest; @@ -1166,4 +1168,43 @@ public void deleteCalendarAsync(DeleteCalendarRequest request, RequestOptions op listener, Collections.emptySet()); } + + /** + * Creates a new Machine Learning Filter + *

+ * For additional info + * see ML PUT Filter documentation + * + * @param request The PutFilterRequest containing the {@link org.elasticsearch.client.ml.job.config.MlFilter} settings + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return PutFilterResponse with enclosed {@link org.elasticsearch.client.ml.job.config.MlFilter} object + * @throws IOException when there is a serialization issue sending the request or receiving the response + */ + public PutFilterResponse putFilter(PutFilterRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::putFilter, + options, + PutFilterResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Creates a new Machine Learning Filter asynchronously and notifies listener on completion + *

+ * For additional info + * see ML PUT Filter documentation + * + * @param request The request containing the {@link org.elasticsearch.client.ml.job.config.MlFilter} settings + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void putFilterAsync(PutFilterRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::putFilter, + options, + PutFilterResponse::fromXContent, + listener, + Collections.emptySet()); + } + } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/NodesResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/NodesResponse.java new file mode 100644 index 0000000000000..24a04bd2da8d0 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/NodesResponse.java @@ -0,0 +1,60 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; + +/** + * Base class for responses that are node responses. These responses always contain the cluster + * name and the {@link NodesResponseHeader}. + */ +public abstract class NodesResponse { + + private final NodesResponseHeader header; + private final String clusterName; + + protected NodesResponse(NodesResponseHeader header, String clusterName) { + this.header = header; + this.clusterName = clusterName; + } + + /** + * Get the cluster name associated with all of the nodes. + * + * @return Never {@code null}. + */ + public String getClusterName() { + return clusterName; + } + + /** + * Gets information about the number of total, successful and failed nodes the request was run on. + * Also includes exceptions if relevant. + */ + public NodesResponseHeader getHeader() { + return header; + } + + public static void declareCommonNodesResponseParsing(ConstructingObjectParser parser) { + parser.declareObject(ConstructingObjectParser.constructorArg(), NodesResponseHeader::fromXContent, new ParseField("_nodes")); + parser.declareString(ConstructingObjectParser.constructorArg(), new ParseField("cluster_name")); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index aae2cfccf521a..ca5cffa31000c 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -49,6 +49,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.update.UpdateRequest; +import org.elasticsearch.client.core.CountRequest; import org.elasticsearch.client.security.RefreshPolicy; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.Nullable; @@ -84,6 +85,7 @@ import java.net.URI; import java.net.URISyntaxException; import java.nio.charset.Charset; +import java.util.List; import java.util.Locale; import java.util.StringJoiner; @@ -447,6 +449,16 @@ static Request multiSearchTemplate(MultiSearchTemplateRequest multiSearchTemplat return request; } + static Request count(CountRequest countRequest) throws IOException { + Request request = new Request(HttpPost.METHOD_NAME, endpoint(countRequest.indices(), countRequest.types(), "_count")); + Params params = new Params(request); + params.withRouting(countRequest.routing()); + params.withPreference(countRequest.preference()); + params.withIndicesOptions(countRequest.indicesOptions()); + request.setEntity(createEntity(countRequest.source(), REQUEST_BODY_CONTENT_TYPE)); + return request; + } + static Request explain(ExplainRequest explainRequest) throws IOException { Request request = new Request(HttpGet.METHOD_NAME, endpoint(explainRequest.index(), explainRequest.type(), explainRequest.id(), "_explain")); @@ -1022,7 +1034,12 @@ EndpointBuilder addCommaSeparatedPathParts(String[] parts) { return this; } - EndpointBuilder addPathPartAsIs(String... parts) { + EndpointBuilder addCommaSeparatedPathParts(List parts) { + addPathPart(String.join(",", parts)); + return this; + } + + EndpointBuilder addPathPartAsIs(String ... parts) { for (String part : parts) { if (Strings.hasLength(part)) { joiner.add(part); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index c89bb2bfd787a..6bdfaed1f0855 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -57,6 +57,8 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; +import org.elasticsearch.client.core.CountRequest; +import org.elasticsearch.client.core.CountResponse; import org.elasticsearch.client.core.TermVectorsResponse; import org.elasticsearch.client.core.TermVectorsRequest; import org.elasticsearch.common.CheckedConsumer; @@ -157,6 +159,8 @@ import org.elasticsearch.search.aggregations.metrics.tophits.TopHitsAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.valuecount.ParsedValueCount; import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCountAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.mad.MedianAbsoluteDeviationAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.mad.ParsedMedianAbsoluteDeviation; import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue; import org.elasticsearch.search.aggregations.pipeline.ParsedSimpleValue; import org.elasticsearch.search.aggregations.pipeline.bucketmetrics.InternalBucketMetricValue; @@ -217,6 +221,7 @@ public class RestHighLevelClient implements Closeable { private final WatcherClient watcherClient = new WatcherClient(this); private final GraphClient graphClient = new GraphClient(this); private final LicenseClient licenseClient = new LicenseClient(this); + private final IndexLifecycleClient indexLifecycleClient = new IndexLifecycleClient(this); private final MigrationClient migrationClient = new MigrationClient(this); private final MachineLearningClient machineLearningClient = new MachineLearningClient(this); private final SecurityClient securityClient = new SecurityClient(this); @@ -366,6 +371,16 @@ public final XPackClient xpack() { */ public LicenseClient license() { return licenseClient; } + /** + * Provides methods for accessing the Elastic Licensed Index Lifecycle APIs that are shipped with the default distribution of + * Elasticsearch. All of these APIs will 404 if run against the OSS distribution of Elasticsearch. + *

+ * See the X-Pack APIs on elastic.co for more information. + */ + public IndexLifecycleClient indexLifecycle() { + return indexLifecycleClient; + } + /** * Provides methods for accessing the Elastic Licensed Licensing APIs that * are shipped with the default distribution of Elasticsearch. All of @@ -910,6 +925,31 @@ public final void indexAsync(IndexRequest indexRequest, ActionListenerCount API on elastic.co + * @param countRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public final CountResponse count(CountRequest countRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity(countRequest, RequestConverters::count, options, CountResponse::fromXContent, + emptySet()); + } + + /** + * Asynchronously executes a count request using the Count API. + * See Count API on elastic.co + * @param countRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public final void countAsync(CountRequest countRequest, RequestOptions options, ActionListener listener) { + performRequestAsyncAndParseEntity(countRequest, RequestConverters::count, options,CountResponse::fromXContent, + listener, emptySet()); + } + /** * Updates a document using the Update API. * See Update API on elastic.co @@ -1886,6 +1926,7 @@ static List getDefaultNamedXContents() { map.put(InternalTDigestPercentiles.NAME, (p, c) -> ParsedTDigestPercentiles.fromXContent(p, (String) c)); map.put(InternalTDigestPercentileRanks.NAME, (p, c) -> ParsedTDigestPercentileRanks.fromXContent(p, (String) c)); map.put(PercentilesBucketPipelineAggregationBuilder.NAME, (p, c) -> ParsedPercentilesBucket.fromXContent(p, (String) c)); + map.put(MedianAbsoluteDeviationAggregationBuilder.NAME, (p, c) -> ParsedMedianAbsoluteDeviation.fromXContent(p, (String) c)); map.put(MinAggregationBuilder.NAME, (p, c) -> ParsedMin.fromXContent(p, (String) c)); map.put(MaxAggregationBuilder.NAME, (p, c) -> ParsedMax.fromXContent(p, (String) c)); map.put(SumAggregationBuilder.NAME, (p, c) -> ParsedSum.fromXContent(p, (String) c)); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupClient.java index cb33384c12b48..62c7d1e9ab92c 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupClient.java @@ -20,6 +20,8 @@ package org.elasticsearch.client; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.rollup.DeleteRollupJobRequest; +import org.elasticsearch.client.rollup.DeleteRollupJobResponse; import org.elasticsearch.client.rollup.GetRollupCapsRequest; import org.elasticsearch.client.rollup.GetRollupCapsResponse; import org.elasticsearch.client.rollup.GetRollupJobRequest; @@ -114,6 +116,40 @@ public void startRollupJobAsync(StartRollupJobRequest request, RequestOptions op listener, Collections.emptySet()); } + /** + * Delete a rollup job from the cluster + * See + * the docs for more. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public DeleteRollupJobResponse deleteRollupJob(DeleteRollupJobRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + RollupRequestConverters::deleteJob, + options, + DeleteRollupJobResponse::fromXContent, + Collections.emptySet()); + } + /** + * Asynchronously delete a rollup job from the cluster + * See + * The docs for details. + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void deleteRollupJobAsync(DeleteRollupJobRequest request, + RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + RollupRequestConverters::deleteJob, + options, + DeleteRollupJobResponse::fromXContent, + listener, Collections.emptySet()); + } + /** * Get a rollup job from the cluster. * See diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupRequestConverters.java index 3f780eca059ef..8d1f07dae4fdf 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RollupRequestConverters.java @@ -18,10 +18,12 @@ */ package org.elasticsearch.client; +import org.apache.http.client.methods.HttpDelete; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; import org.elasticsearch.client.rollup.GetRollupCapsRequest; +import org.elasticsearch.client.rollup.DeleteRollupJobRequest; import org.elasticsearch.client.rollup.GetRollupJobRequest; import org.elasticsearch.client.rollup.PutRollupJobRequest; import org.elasticsearch.client.rollup.StartRollupJobRequest; @@ -74,4 +76,16 @@ static Request getRollupCaps(final GetRollupCapsRequest getRollupCapsRequest) th request.setEntity(createEntity(getRollupCapsRequest, REQUEST_BODY_CONTENT_TYPE)); return request; } + + static Request deleteJob(final DeleteRollupJobRequest deleteRollupJobRequest) throws IOException { + String endpoint = new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("rollup") + .addPathPartAsIs("job") + .addPathPart(deleteRollupJobRequest.getId()) + .build(); + Request request = new Request(HttpDelete.METHOD_NAME, endpoint); + request.setEntity(createEntity(deleteRollupJobRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java index e604814a3bce5..75103b7ad5602 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java @@ -20,7 +20,11 @@ package org.elasticsearch.client; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.security.AuthenticateRequest; +import org.elasticsearch.client.security.AuthenticateResponse; import org.elasticsearch.client.security.ChangePasswordRequest; +import org.elasticsearch.client.security.ClearRealmCacheRequest; +import org.elasticsearch.client.security.ClearRealmCacheResponse; import org.elasticsearch.client.security.ClearRolesCacheRequest; import org.elasticsearch.client.security.ClearRolesCacheResponse; import org.elasticsearch.client.security.CreateTokenRequest; @@ -211,13 +215,69 @@ public void disableUserAsync(DisableUserRequest request, RequestOptions options, } /** - * Clears the native roles cache for a set of roles. + * Authenticate the current user and return all the information about the authenticated user. + * See + * the docs for more. + * + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the responsee from the authenticate user call + */ + public AuthenticateResponse authenticate(RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(AuthenticateRequest.INSTANCE, AuthenticateRequest::getRequest, options, + AuthenticateResponse::fromXContent, emptySet()); + } + + /** + * Authenticate the current user asynchronously and return all the information about the authenticated user. + * See + * the docs for more. + * + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void authenticateAsync(RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(AuthenticateRequest.INSTANCE, AuthenticateRequest::getRequest, options, + AuthenticateResponse::fromXContent, listener, emptySet()); + } + + /** + * Clears the cache in one or more realms. + * See + * the docs for more. + * + * @param request the request with the realm names and usernames to clear the cache for + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response from the clear realm cache call + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public ClearRealmCacheResponse clearRealmCache(ClearRealmCacheRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, SecurityRequestConverters::clearRealmCache, options, + ClearRealmCacheResponse::fromXContent, emptySet()); + } + + /** + * Clears the cache in one or more realms asynchronously. + * See + * the docs for more. + * + * @param request the request with the realm names and usernames to clear the cache for + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void clearRealmCacheAsync(ClearRealmCacheRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::clearRealmCache, options, + ClearRealmCacheResponse::fromXContent, listener, emptySet()); + } + + /** + * Clears the roles cache for a set of roles. * See * the docs for more. * * @param request the request with the roles for which the cache should be cleared. * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized - * @return the response from the enable user call + * @return the response from the clear roles cache call * @throws IOException in case there is a problem sending the request or parsing back the response */ public ClearRolesCacheResponse clearRolesCache(ClearRolesCacheRequest request, RequestOptions options) throws IOException { @@ -226,7 +286,7 @@ public ClearRolesCacheResponse clearRolesCache(ClearRolesCacheRequest request, R } /** - * Clears the native roles cache for a set of roles asynchronously. + * Clears the roles cache for a set of roles asynchronously. * See * the docs for more. * diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityRequestConverters.java index 2feb15704333e..fe0a9833cb672 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityRequestConverters.java @@ -23,6 +23,7 @@ import org.apache.http.client.methods.HttpDelete; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.client.security.ClearRealmCacheRequest; import org.elasticsearch.client.security.ClearRolesCacheRequest; import org.elasticsearch.client.security.CreateTokenRequest; import org.elasticsearch.client.security.DeleteRoleMappingRequest; @@ -111,6 +112,23 @@ private static Request setUserEnabled(SetUserEnabledRequest setUserEnabledReques return request; } + static Request clearRealmCache(ClearRealmCacheRequest clearRealmCacheRequest) { + RequestConverters.EndpointBuilder builder = new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_xpack/security/realm"); + if (clearRealmCacheRequest.getRealms().isEmpty() == false) { + builder.addCommaSeparatedPathParts(clearRealmCacheRequest.getRealms().toArray(Strings.EMPTY_ARRAY)); + } else { + builder.addPathPart("_all"); + } + final String endpoint = builder.addPathPartAsIs("_clear_cache").build(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + if (clearRealmCacheRequest.getUsernames().isEmpty() == false) { + RequestConverters.Params params = new RequestConverters.Params(request); + params.putParam("usernames", Strings.collectionToCommaDelimitedString(clearRealmCacheRequest.getUsernames())); + } + return request; + } + static Request clearRolesCache(ClearRolesCacheRequest disableCacheRequest) { String endpoint = new RequestConverters.EndpointBuilder() .addPathPartAsIs("_xpack/security/role") diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/TimedRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/TimedRequest.java index c26a7ba48ca17..60ecea39ae093 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/TimedRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/TimedRequest.java @@ -28,7 +28,7 @@ * Please note, any requests that use a ackTimeout should set timeout as they * represent the same backing field on the server. */ -public class TimedRequest implements Validatable { +public abstract class TimedRequest implements Validatable { public static final TimeValue DEFAULT_ACK_TIMEOUT = timeValueSeconds(30); public static final TimeValue DEFAULT_MASTER_NODE_TIMEOUT = TimeValue.timeValueSeconds(30); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/XPackClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/XPackClient.java index a20dfd1ba328a..1e8a5328355a0 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/XPackClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/XPackClient.java @@ -94,4 +94,5 @@ public void usageAsync(XPackUsageRequest request, RequestOptions options, Action restHighLevelClient.performRequestAsyncAndParseEntity(request, XPackRequestConverters::usage, options, XPackUsageResponse::fromXContent, listener, emptySet()); } + } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/CountRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/CountRequest.java new file mode 100644 index 0000000000000..6d4589c7861f6 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/CountRequest.java @@ -0,0 +1,206 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.core; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.Strings; +import org.elasticsearch.search.builder.SearchSourceBuilder; + +import java.util.Arrays; +import java.util.Objects; + +import static org.elasticsearch.action.search.SearchRequest.DEFAULT_INDICES_OPTIONS; + +/** + * Encapsulates a request to _count API against one, several or all indices. + */ +public final class CountRequest extends ActionRequest implements IndicesRequest.Replaceable { + + private String[] indices = Strings.EMPTY_ARRAY; + private String[] types = Strings.EMPTY_ARRAY; + private String routing; + private String preference; + private SearchSourceBuilder searchSourceBuilder; + private IndicesOptions indicesOptions = DEFAULT_INDICES_OPTIONS; + + public CountRequest() { + this.searchSourceBuilder = new SearchSourceBuilder(); + } + + /** + * Constructs a new count request against the indices. No indices provided here means that count will execute on all indices. + */ + public CountRequest(String... indices) { + this(indices, new SearchSourceBuilder()); + } + + /** + * Constructs a new search request against the provided indices with the given search source. + */ + public CountRequest(String[] indices, SearchSourceBuilder searchSourceBuilder) { + indices(indices); + this.searchSourceBuilder = searchSourceBuilder; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + /** + * Sets the indices the count will be executed on. + */ + public CountRequest indices(String... indices) { + Objects.requireNonNull(indices, "indices must not be null"); + for (String index : indices) { + Objects.requireNonNull(index, "index must not be null"); + } + this.indices = indices; + return this; + } + + /** + * The source of the count request. + */ + public CountRequest source(SearchSourceBuilder searchSourceBuilder) { + this.searchSourceBuilder = Objects.requireNonNull(searchSourceBuilder, "source must not be null"); + return this; + } + + /** + * The document types to execute the count against. Defaults to be executed against all types. + * + * @deprecated Types are going away, prefer filtering on a type. + */ + @Deprecated + public CountRequest types(String... types) { + Objects.requireNonNull(types, "types must not be null"); + for (String type : types) { + Objects.requireNonNull(type, "type must not be null"); + } + this.types = types; + return this; + } + + /** + * The routing values to control the shards that the search will be executed on. + */ + public CountRequest routing(String routing) { + this.routing = routing; + return this; + } + + /** + * A comma separated list of routing values to control the shards the count will be executed on. + */ + public CountRequest routing(String... routings) { + this.routing = Strings.arrayToCommaDelimitedString(routings); + return this; + } + + /** + * Returns the indices options used to resolve indices. They tell for instance whether a single index is accepted, whether an empty + * array will be converted to _all, and how wildcards will be expanded if needed. + * + * @see org.elasticsearch.action.support.IndicesOptions + */ + public CountRequest indicesOptions(IndicesOptions indicesOptions) { + this.indicesOptions = Objects.requireNonNull(indicesOptions, "indicesOptions must not be null"); + return this; + } + + /** + * Sets the preference to execute the count. Defaults to randomize across shards. Can be set to {@code _local} to prefer local shards + * or a custom value, which guarantees that the same order will be used across different requests. + */ + public CountRequest preference(String preference) { + this.preference = preference; + return this; + } + + public IndicesOptions indicesOptions() { + return this.indicesOptions; + } + + public String routing() { + return this.routing; + } + + public String preference() { + return this.preference; + } + + public String[] indices() { + return Arrays.copyOf(this.indices, this.indices.length); + } + + public Float minScore() { + return this.searchSourceBuilder.minScore(); + } + + public CountRequest minScore(Float minScore) { + this.searchSourceBuilder.minScore(minScore); + return this; + } + + public int terminateAfter() { + return this.searchSourceBuilder.terminateAfter(); + } + + public CountRequest terminateAfter(int terminateAfter) { + this.searchSourceBuilder.terminateAfter(terminateAfter); + return this; + } + + public String[] types() { + return Arrays.copyOf(this.types, this.types.length); + } + + public SearchSourceBuilder source() { + return this.searchSourceBuilder; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + CountRequest that = (CountRequest) o; + return Objects.equals(indicesOptions, that.indicesOptions) && + Arrays.equals(indices, that.indices) && + Arrays.equals(types, that.types) && + Objects.equals(routing, that.routing) && + Objects.equals(preference, that.preference); + } + + @Override + public int hashCode() { + int result = Objects.hash(indicesOptions, routing, preference); + result = 31 * result + Arrays.hashCode(indices); + result = 31 * result + Arrays.hashCode(types); + return result; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/CountResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/CountResponse.java new file mode 100644 index 0000000000000..f97f79127e690 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/CountResponse.java @@ -0,0 +1,236 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.core; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.rest.RestStatus; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; + +/** + * A response to _count API request. + */ +public final class CountResponse extends ActionResponse { + + static final ParseField COUNT = new ParseField("count"); + static final ParseField TERMINATED_EARLY = new ParseField("terminated_early"); + static final ParseField SHARDS = new ParseField("_shards"); + + private final long count; + private final Boolean terminatedEarly; + private final ShardStats shardStats; + + public CountResponse(long count, Boolean terminatedEarly, ShardStats shardStats) { + this.count = count; + this.terminatedEarly = terminatedEarly; + this.shardStats = shardStats; + } + + public ShardStats getShardStats() { + return shardStats; + } + + /** + * Number of documents matching request. + */ + public long getCount() { + return count; + } + + /** + * The total number of shards the search was executed on. + */ + public int getTotalShards() { + return shardStats.totalShards; + } + + /** + * The successful number of shards the search was executed on. + */ + public int getSuccessfulShards() { + return shardStats.successfulShards; + } + + /** + * The number of shards skipped due to pre-filtering + */ + public int getSkippedShards() { + return shardStats.skippedShards; + } + + /** + * The failed number of shards the search was executed on. + */ + public int getFailedShards() { + return shardStats.shardFailures.length; + } + + /** + * The failures that occurred during the search. + */ + public ShardSearchFailure[] getShardFailures() { + return shardStats.shardFailures; + } + + public RestStatus status() { + return RestStatus.status(shardStats.successfulShards, shardStats.totalShards, shardStats.shardFailures); + } + + public static CountResponse fromXContent(XContentParser parser) throws IOException { + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser::getTokenLocation); + parser.nextToken(); + ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.currentToken(), parser::getTokenLocation); + String currentName = parser.currentName(); + Boolean terminatedEarly = null; + long count = 0; + ShardStats shardStats = new ShardStats(-1, -1,0, ShardSearchFailure.EMPTY_ARRAY); + + for (XContentParser.Token token = parser.nextToken(); token != XContentParser.Token.END_OBJECT; token = parser.nextToken()) { + if (token == XContentParser.Token.FIELD_NAME) { + currentName = parser.currentName(); + } else if (token.isValue()) { + if (COUNT.match(currentName, parser.getDeprecationHandler())) { + count = parser.longValue(); + } else if (TERMINATED_EARLY.match(currentName, parser.getDeprecationHandler())) { + terminatedEarly = parser.booleanValue(); + } else { + parser.skipChildren(); + } + } else if (token == XContentParser.Token.START_OBJECT) { + if (SHARDS.match(currentName, parser.getDeprecationHandler())) { + shardStats = ShardStats.fromXContent(parser); + } else { + parser.skipChildren(); + } + } + } + return new CountResponse(count, terminatedEarly, shardStats); + } + + @Override + public String toString() { + String s = "{" + + "count=" + count + + (isTerminatedEarly() != null ? ", terminatedEarly=" + terminatedEarly : "") + + ", " + shardStats + + '}'; + return s; + } + + public Boolean isTerminatedEarly() { + return terminatedEarly; + } + + /** + * Encapsulates _shards section of count api response. + */ + public static final class ShardStats { + + static final ParseField FAILED = new ParseField("failed"); + static final ParseField SKIPPED = new ParseField("skipped"); + static final ParseField TOTAL = new ParseField("total"); + static final ParseField SUCCESSFUL = new ParseField("successful"); + static final ParseField FAILURES = new ParseField("failures"); + + private final int successfulShards; + private final int totalShards; + private final int skippedShards; + private final ShardSearchFailure[] shardFailures; + + public ShardStats(int successfulShards, int totalShards, int skippedShards, ShardSearchFailure[] shardFailures) { + this.successfulShards = successfulShards; + this.totalShards = totalShards; + this.skippedShards = skippedShards; + this.shardFailures = Arrays.copyOf(shardFailures, shardFailures.length); + } + + public int getSuccessfulShards() { + return successfulShards; + } + + public int getTotalShards() { + return totalShards; + } + + public int getSkippedShards() { + return skippedShards; + } + + public ShardSearchFailure[] getShardFailures() { + return Arrays.copyOf(shardFailures, shardFailures.length, ShardSearchFailure[].class); + } + + static ShardStats fromXContent(XContentParser parser) throws IOException { + int successfulShards = -1; + int totalShards = -1; + int skippedShards = 0; //BWC @see org.elasticsearch.action.search.SearchResponse + List failures = new ArrayList<>(); + XContentParser.Token token; + String currentName = parser.currentName(); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentName = parser.currentName(); + } else if (token.isValue()) { + if (FAILED.match(currentName, parser.getDeprecationHandler())) { + parser.intValue(); + } else if (SKIPPED.match(currentName, parser.getDeprecationHandler())) { + skippedShards = parser.intValue(); + } else if (TOTAL.match(currentName, parser.getDeprecationHandler())) { + totalShards = parser.intValue(); + } else if (SUCCESSFUL.match(currentName, parser.getDeprecationHandler())) { + successfulShards = parser.intValue(); + } else { + parser.skipChildren(); + } + } else if (token == XContentParser.Token.START_ARRAY) { + if (FAILURES.match(currentName, parser.getDeprecationHandler())) { + while ((parser.nextToken()) != XContentParser.Token.END_ARRAY) { + failures.add(ShardSearchFailure.fromXContent(parser)); + } + } else { + parser.skipChildren(); + } + } else { + parser.skipChildren(); + } + } + return new ShardStats(successfulShards, totalShards, skippedShards, failures.toArray(new ShardSearchFailure[failures.size()])); + } + + @Override + public String toString() { + return "_shards : {" + + "total=" + totalShards + + ", successful=" + successfulShards + + ", skipped=" + skippedShards + + ", failed=" + (shardFailures != null && shardFailures.length > 0 ? shardFailures.length : 0 ) + + (shardFailures != null && shardFailures.length > 0 ? ", failures: " + Arrays.asList(shardFailures): "") + + '}'; + } + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/GraphExploreResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/GraphExploreResponse.java index 2b5d1c7ecf4be..dddc4bedfe466 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/GraphExploreResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/GraphExploreResponse.java @@ -47,7 +47,7 @@ * * @see GraphExploreRequest */ -public class GraphExploreResponse implements ToXContentObject { +public class GraphExploreResponse implements ToXContentObject { private long tookInMillis; private boolean timedOut = false; @@ -94,14 +94,30 @@ public Collection getConnections() { return connections.values(); } + public Collection getConnectionIds() { + return connections.keySet(); + } + + public Connection getConnection(ConnectionId connectionId) { + return connections.get(connectionId); + } + public Collection getVertices() { return vertices.values(); } - + + public Collection getVertexIds() { + return vertices.keySet(); + } + public Vertex getVertex(VertexId id) { return vertices.get(id); } + public boolean isReturnDetailedInfo() { + return returnDetailedInfo; + } + private static final ParseField TOOK = new ParseField("took"); private static final ParseField TIMED_OUT = new ParseField("timed_out"); private static final ParseField VERTICES = new ParseField("vertices"); @@ -190,7 +206,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> ShardSearchFailure.fromXContent(p), FAILURES); } - public static GraphExploreResponse fromXContext(XContentParser parser) throws IOException { + public static GraphExploreResponse fromXContent(XContentParser parser) throws IOException { return PARSER.apply(parser, null); } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/Vertex.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/Vertex.java index 852372209da9c..54b0b5223277d 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/Vertex.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/Vertex.java @@ -220,6 +220,14 @@ public VertexId(String field, String term) { this.term = term; } + public String getField() { + return field; + } + + public String getTerm() { + return term; + } + @Override public boolean equals(Object o) { if (this == o) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/AllocateAction.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/AllocateAction.java new file mode 100644 index 0000000000000..702db15b965c7 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/AllocateAction.java @@ -0,0 +1,147 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Collections; +import java.util.Map; +import java.util.Objects; + +public class AllocateAction implements LifecycleAction, ToXContentObject { + + public static final String NAME = "allocate"; + static final ParseField NUMBER_OF_REPLICAS_FIELD = new ParseField("number_of_replicas"); + static final ParseField INCLUDE_FIELD = new ParseField("include"); + static final ParseField EXCLUDE_FIELD = new ParseField("exclude"); + static final ParseField REQUIRE_FIELD = new ParseField("require"); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, + a -> new AllocateAction((Integer) a[0], (Map) a[1], (Map) a[2], (Map) a[3])); + + static { + PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), NUMBER_OF_REPLICAS_FIELD); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> p.mapStrings(), INCLUDE_FIELD); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> p.mapStrings(), EXCLUDE_FIELD); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> p.mapStrings(), REQUIRE_FIELD); + } + + private final Integer numberOfReplicas; + private final Map include; + private final Map exclude; + private final Map require; + + public static AllocateAction parse(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public AllocateAction(Integer numberOfReplicas, Map include, Map exclude, Map require) { + if (include == null) { + this.include = Collections.emptyMap(); + } else { + this.include = include; + } + if (exclude == null) { + this.exclude = Collections.emptyMap(); + } else { + this.exclude = exclude; + } + if (require == null) { + this.require = Collections.emptyMap(); + } else { + this.require = require; + } + if (this.include.isEmpty() && this.exclude.isEmpty() && this.require.isEmpty() && numberOfReplicas == null) { + throw new IllegalArgumentException( + "At least one of " + INCLUDE_FIELD.getPreferredName() + ", " + EXCLUDE_FIELD.getPreferredName() + " or " + + REQUIRE_FIELD.getPreferredName() + "must contain attributes for action " + NAME); + } + if (numberOfReplicas != null && numberOfReplicas < 0) { + throw new IllegalArgumentException("[" + NUMBER_OF_REPLICAS_FIELD.getPreferredName() + "] must be >= 0"); + } + this.numberOfReplicas = numberOfReplicas; + } + + public Integer getNumberOfReplicas() { + return numberOfReplicas; + } + + public Map getInclude() { + return include; + } + + public Map getExclude() { + return exclude; + } + + public Map getRequire() { + return require; + } + + @Override + public String getName() { + return NAME; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + if (numberOfReplicas != null) { + builder.field(NUMBER_OF_REPLICAS_FIELD.getPreferredName(), numberOfReplicas); + } + builder.field(INCLUDE_FIELD.getPreferredName(), include); + builder.field(EXCLUDE_FIELD.getPreferredName(), exclude); + builder.field(REQUIRE_FIELD.getPreferredName(), require); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(numberOfReplicas, include, exclude, require); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + AllocateAction other = (AllocateAction) obj; + return Objects.equals(numberOfReplicas, other.numberOfReplicas) && + Objects.equals(include, other.include) && + Objects.equals(exclude, other.exclude) && + Objects.equals(require, other.require); + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/DeleteAction.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/DeleteAction.java new file mode 100644 index 0000000000000..299b0ac582771 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/DeleteAction.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; + +public class DeleteAction implements LifecycleAction, ToXContentObject { + public static final String NAME = "delete"; + + private static final ObjectParser PARSER = new ObjectParser<>(NAME, DeleteAction::new); + + public static DeleteAction parse(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public DeleteAction() { + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + builder.endObject(); + return builder; + } + + @Override + public String getName() { + return NAME; + } + + @Override + public int hashCode() { + return 1; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + return true; + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/DeleteLifecyclePolicyRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/DeleteLifecyclePolicyRequest.java new file mode 100644 index 0000000000000..fc029f37ac928 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/DeleteLifecyclePolicyRequest.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.client.TimedRequest; +import org.elasticsearch.common.Strings; + +import java.util.Objects; + +public class DeleteLifecyclePolicyRequest extends TimedRequest { + + private final String lifecyclePolicy; + + public DeleteLifecyclePolicyRequest(String lifecyclePolicy) { + if (Strings.isNullOrEmpty(lifecyclePolicy)) { + throw new IllegalArgumentException("lifecycle name must be present"); + } + this.lifecyclePolicy = lifecyclePolicy; + } + + public String getLifecyclePolicy() { + return lifecyclePolicy; + } + + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + DeleteLifecyclePolicyRequest that = (DeleteLifecyclePolicyRequest) o; + return Objects.equals(getLifecyclePolicy(), that.getLifecyclePolicy()); + } + + @Override + public int hashCode() { + return Objects.hash(getLifecyclePolicy()); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ExplainLifecycleRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ExplainLifecycleRequest.java new file mode 100644 index 0000000000000..9d9e80bf1eeee --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ExplainLifecycleRequest.java @@ -0,0 +1,92 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.TimedRequest; +import org.elasticsearch.client.ValidationException; +import org.elasticsearch.common.Strings; + +import java.util.Arrays; +import java.util.Objects; +import java.util.Optional; + +/** + * The request object used by the Explain Lifecycle API. + * + * Multiple indices may be queried in the same request using the + * {@link #indices(String...)} method + */ +public class ExplainLifecycleRequest extends TimedRequest { + + private String[] indices = Strings.EMPTY_ARRAY; + private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen(); + + public ExplainLifecycleRequest() { + super(); + } + + public ExplainLifecycleRequest indices(String... indices) { + this.indices = indices; + return this; + } + + public String[] indices() { + return indices; + } + + public ExplainLifecycleRequest indicesOptions(IndicesOptions indicesOptions) { + this.indicesOptions = indicesOptions; + return this; + } + + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + @Override + public Optional validate() { + return Optional.empty(); + } + + @Override + public int hashCode() { + return Objects.hash(Arrays.hashCode(indices()), indicesOptions()); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + ExplainLifecycleRequest other = (ExplainLifecycleRequest) obj; + return Objects.deepEquals(indices(), other.indices()) && + Objects.equals(indicesOptions(), other.indicesOptions()); + } + + @Override + public String toString() { + return "ExplainLifecycleRequest [indices()=" + Arrays.toString(indices()) + ", indicesOptions()=" + indicesOptions() + "]"; + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ExplainLifecycleResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ExplainLifecycleResponse.java new file mode 100644 index 0000000000000..de2803afe5415 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ExplainLifecycleResponse.java @@ -0,0 +1,109 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * The response object returned by the Explain Lifecycle API. + * + * Since the API can be run over multiple indices the response provides a map of + * index to the explanation of the lifecycle status for that index. + */ +public class ExplainLifecycleResponse implements ToXContentObject { + + private static final ParseField INDICES_FIELD = new ParseField("indices"); + + private Map indexResponses; + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "explain_lifecycle_response", a -> new ExplainLifecycleResponse(((List) a[0]).stream() + .collect(Collectors.toMap(IndexLifecycleExplainResponse::getIndex, Function.identity())))); + static { + PARSER.declareNamedObjects(ConstructingObjectParser.constructorArg(), (p, c, n) -> IndexLifecycleExplainResponse.PARSER.apply(p, c), + INDICES_FIELD); + } + + public static ExplainLifecycleResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public ExplainLifecycleResponse(Map indexResponses) { + this.indexResponses = indexResponses; + } + + /** + * @return a map of the responses from each requested index. The maps key is + * the index name and the value is the + * {@link IndexLifecycleExplainResponse} describing the current + * lifecycle status of that index + */ + public Map getIndexResponses() { + return indexResponses; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startObject(INDICES_FIELD.getPreferredName()); + for (IndexLifecycleExplainResponse indexResponse : indexResponses.values()) { + builder.field(indexResponse.getIndex(), indexResponse); + } + builder.endObject(); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(indexResponses); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + ExplainLifecycleResponse other = (ExplainLifecycleResponse) obj; + return Objects.equals(indexResponses, other.indexResponses); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ForceMergeAction.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ForceMergeAction.java new file mode 100644 index 0000000000000..eb564b7cd27b6 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ForceMergeAction.java @@ -0,0 +1,97 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +public class ForceMergeAction implements LifecycleAction, ToXContentObject { + public static final String NAME = "forcemerge"; + private static final ParseField MAX_NUM_SEGMENTS_FIELD = new ParseField("max_num_segments"); + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, + false, a -> { + int maxNumSegments = (int) a[0]; + return new ForceMergeAction(maxNumSegments); + }); + + static { + PARSER.declareInt(ConstructingObjectParser.constructorArg(), MAX_NUM_SEGMENTS_FIELD); + } + + private final int maxNumSegments; + + public static ForceMergeAction parse(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public ForceMergeAction(int maxNumSegments) { + if (maxNumSegments <= 0) { + throw new IllegalArgumentException("[" + MAX_NUM_SEGMENTS_FIELD.getPreferredName() + + "] must be a positive integer"); + } + this.maxNumSegments = maxNumSegments; + } + + public int getMaxNumSegments() { + return maxNumSegments; + } + + @Override + public String getName() { + return NAME; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(MAX_NUM_SEGMENTS_FIELD.getPreferredName(), maxNumSegments); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(maxNumSegments); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + ForceMergeAction other = (ForceMergeAction) obj; + return Objects.equals(maxNumSegments, other.maxNumSegments); + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/GetLifecyclePolicyRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/GetLifecyclePolicyRequest.java new file mode 100644 index 0000000000000..af17a3ea48cf9 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/GetLifecyclePolicyRequest.java @@ -0,0 +1,61 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.client.TimedRequest; +import org.elasticsearch.common.Strings; + +import java.util.Arrays; + +public class GetLifecyclePolicyRequest extends TimedRequest { + + private final String[] policyNames; + + public GetLifecyclePolicyRequest(String... policyNames) { + if (policyNames == null) { + this.policyNames = Strings.EMPTY_ARRAY; + } else { + for (String name : policyNames) { + if (name == null) { + throw new IllegalArgumentException("cannot include null policy name"); + } + } + this.policyNames = policyNames; + } + } + + public String[] getPolicyNames() { + return policyNames; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + GetLifecyclePolicyRequest request = (GetLifecyclePolicyRequest) o; + return Arrays.equals(getPolicyNames(), request.getPolicyNames()); + } + + @Override + public int hashCode() { + return Arrays.hashCode(getPolicyNames()); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/GetLifecyclePolicyResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/GetLifecyclePolicyResponse.java new file mode 100644 index 0000000000000..fc007cb5aebd4 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/GetLifecyclePolicyResponse.java @@ -0,0 +1,91 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + + +package org.elasticsearch.client.indexlifecycle; + +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.XContentParserUtils.ensureExpectedToken; + +public class GetLifecyclePolicyResponse implements ToXContentObject { + + private final ImmutableOpenMap policies; + + public GetLifecyclePolicyResponse(ImmutableOpenMap policies) { + this.policies = policies; + } + + public ImmutableOpenMap getPolicies() { + return policies; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + for (ObjectObjectCursor stringLifecyclePolicyObjectObjectCursor : policies) { + builder.field(stringLifecyclePolicyObjectObjectCursor.key, stringLifecyclePolicyObjectObjectCursor.value); + } + builder.endObject(); + return builder; + } + + public static GetLifecyclePolicyResponse fromXContent(XContentParser parser) throws IOException { + ImmutableOpenMap.Builder policies = ImmutableOpenMap.builder(); + + if (parser.currentToken() == null) { + parser.nextToken(); + } + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser::getTokenLocation); + parser.nextToken(); + + while (!parser.isClosed()) { + if (parser.currentToken() == XContentParser.Token.START_OBJECT) { + String policyName = parser.currentName(); + LifecyclePolicyMetadata policyDefinion = LifecyclePolicyMetadata.parse(parser, policyName); + policies.put(policyName, policyDefinion); + } else { + parser.nextToken(); + } + } + + return new GetLifecyclePolicyResponse(policies.build()); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + GetLifecyclePolicyResponse that = (GetLifecyclePolicyResponse) o; + return Objects.equals(getPolicies(), that.getPolicies()); + } + + @Override + public int hashCode() { + return Objects.hash(getPolicies()); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/IndexLifecycleExplainResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/IndexLifecycleExplainResponse.java new file mode 100644 index 0000000000000..58ba7e63c03a7 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/IndexLifecycleExplainResponse.java @@ -0,0 +1,263 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; + +import java.io.IOException; +import java.util.Objects; + +public class IndexLifecycleExplainResponse implements ToXContentObject { + + private static final ParseField INDEX_FIELD = new ParseField("index"); + private static final ParseField MANAGED_BY_ILM_FIELD = new ParseField("managed"); + private static final ParseField POLICY_NAME_FIELD = new ParseField("policy"); + private static final ParseField LIFECYCLE_DATE_MILLIS_FIELD = new ParseField("lifecycle_date_millis"); + private static final ParseField LIFECYCLE_DATE_FIELD = new ParseField("lifecycle_date"); + private static final ParseField PHASE_FIELD = new ParseField("phase"); + private static final ParseField ACTION_FIELD = new ParseField("action"); + private static final ParseField STEP_FIELD = new ParseField("step"); + private static final ParseField FAILED_STEP_FIELD = new ParseField("failed_step"); + private static final ParseField PHASE_TIME_MILLIS_FIELD = new ParseField("phase_time_millis"); + private static final ParseField PHASE_TIME_FIELD = new ParseField("phase_time"); + private static final ParseField ACTION_TIME_MILLIS_FIELD = new ParseField("action_time_millis"); + private static final ParseField ACTION_TIME_FIELD = new ParseField("action_time"); + private static final ParseField STEP_TIME_MILLIS_FIELD = new ParseField("step_time_millis"); + private static final ParseField STEP_TIME_FIELD = new ParseField("step_time"); + private static final ParseField STEP_INFO_FIELD = new ParseField("step_info"); + private static final ParseField PHASE_EXECUTION_INFO = new ParseField("phase_execution"); + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "index_lifecycle_explain_response", + a -> new IndexLifecycleExplainResponse( + (String) a[0], + (boolean) a[1], + (String) a[2], + (long) (a[3] == null ? -1L: a[3]), + (String) a[4], + (String) a[5], + (String) a[6], + (String) a[7], + (long) (a[8] == null ? -1L: a[8]), + (long) (a[9] == null ? -1L: a[9]), + (long) (a[10] == null ? -1L: a[10]), + (BytesReference) a[11], + (PhaseExecutionInfo) a[12])); + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), INDEX_FIELD); + PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), MANAGED_BY_ILM_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), POLICY_NAME_FIELD); + PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), LIFECYCLE_DATE_MILLIS_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), PHASE_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), ACTION_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), STEP_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), FAILED_STEP_FIELD); + PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), PHASE_TIME_MILLIS_FIELD); + PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), ACTION_TIME_MILLIS_FIELD); + PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), STEP_TIME_MILLIS_FIELD); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> { + XContentBuilder builder = JsonXContent.contentBuilder(); + builder.copyCurrentStructure(p); + return BytesArray.bytes(builder); + }, STEP_INFO_FIELD); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> PhaseExecutionInfo.parse(p, ""), + PHASE_EXECUTION_INFO); + } + + private final String index; + private final String policyName; + private final String phase; + private final String action; + private final String step; + private final String failedStep; + private final long lifecycleDate; + private final long phaseTime; + private final long actionTime; + private final long stepTime; + private final boolean managedByILM; + private final BytesReference stepInfo; + private final PhaseExecutionInfo phaseExecutionInfo; + + public static IndexLifecycleExplainResponse newManagedIndexResponse(String index, String policyName, long lifecycleDate, + String phase, String action, String step, String failedStep, + long phaseTime, long actionTime, long stepTime, + BytesReference stepInfo, PhaseExecutionInfo phaseExecutionInfo) { + return new IndexLifecycleExplainResponse(index, true, policyName, lifecycleDate, phase, action, step, failedStep, phaseTime, + actionTime, stepTime, stepInfo, phaseExecutionInfo); + } + + public static IndexLifecycleExplainResponse newUnmanagedIndexResponse(String index) { + return new IndexLifecycleExplainResponse(index, false, null, -1L, null, null, null, null, -1L, -1L, -1L, null, null); + } + + private IndexLifecycleExplainResponse(String index, boolean managedByILM, String policyName, long lifecycleDate, + String phase, String action, String step, String failedStep, long phaseTime, long actionTime, + long stepTime, BytesReference stepInfo, PhaseExecutionInfo phaseExecutionInfo) { + if (managedByILM) { + if (policyName == null) { + throw new IllegalArgumentException("[" + POLICY_NAME_FIELD.getPreferredName() + "] cannot be null for managed index"); + } + } else { + if (policyName != null || lifecycleDate >= 0 || phase != null || action != null || step != null || failedStep != null + || phaseTime >= 0 || actionTime >= 0 || stepTime >= 0 || stepInfo != null || phaseExecutionInfo != null) { + throw new IllegalArgumentException( + "Unmanaged index response must only contain fields: [" + MANAGED_BY_ILM_FIELD + ", " + INDEX_FIELD + "]"); + } + } + this.index = index; + this.policyName = policyName; + this.managedByILM = managedByILM; + this.lifecycleDate = lifecycleDate; + this.phase = phase; + this.action = action; + this.step = step; + this.phaseTime = phaseTime; + this.actionTime = actionTime; + this.stepTime = stepTime; + this.failedStep = failedStep; + this.stepInfo = stepInfo; + this.phaseExecutionInfo = phaseExecutionInfo; + } + + public String getIndex() { + return index; + } + + public boolean managedByILM() { + return managedByILM; + } + + public String getPolicyName() { + return policyName; + } + + public long getLifecycleDate() { + return lifecycleDate; + } + + public String getPhase() { + return phase; + } + + public long getPhaseTime() { + return phaseTime; + } + + public String getAction() { + return action; + } + + public long getActionTime() { + return actionTime; + } + + public String getStep() { + return step; + } + + public long getStepTime() { + return stepTime; + } + + public String getFailedStep() { + return failedStep; + } + + public BytesReference getStepInfo() { + return stepInfo; + } + + public PhaseExecutionInfo getPhaseExecutionInfo() { + return phaseExecutionInfo; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(INDEX_FIELD.getPreferredName(), index); + builder.field(MANAGED_BY_ILM_FIELD.getPreferredName(), managedByILM); + if (managedByILM) { + builder.field(POLICY_NAME_FIELD.getPreferredName(), policyName); + builder.timeField(LIFECYCLE_DATE_MILLIS_FIELD.getPreferredName(), LIFECYCLE_DATE_FIELD.getPreferredName(), lifecycleDate); + builder.field(PHASE_FIELD.getPreferredName(), phase); + builder.timeField(PHASE_TIME_MILLIS_FIELD.getPreferredName(), PHASE_TIME_FIELD.getPreferredName(), phaseTime); + builder.field(ACTION_FIELD.getPreferredName(), action); + builder.timeField(ACTION_TIME_MILLIS_FIELD.getPreferredName(), ACTION_TIME_FIELD.getPreferredName(), actionTime); + builder.field(STEP_FIELD.getPreferredName(), step); + builder.timeField(STEP_TIME_MILLIS_FIELD.getPreferredName(), STEP_TIME_FIELD.getPreferredName(), stepTime); + if (Strings.hasLength(failedStep)) { + builder.field(FAILED_STEP_FIELD.getPreferredName(), failedStep); + } + if (stepInfo != null && stepInfo.length() > 0) { + builder.rawField(STEP_INFO_FIELD.getPreferredName(), stepInfo.streamInput(), XContentType.JSON); + } + if (phaseExecutionInfo != null) { + builder.field(PHASE_EXECUTION_INFO.getPreferredName(), phaseExecutionInfo); + } + } + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(index, managedByILM, policyName, lifecycleDate, phase, action, step, failedStep, phaseTime, actionTime, + stepTime, stepInfo, phaseExecutionInfo); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + IndexLifecycleExplainResponse other = (IndexLifecycleExplainResponse) obj; + return Objects.equals(index, other.index) && + Objects.equals(managedByILM, other.managedByILM) && + Objects.equals(policyName, other.policyName) && + Objects.equals(lifecycleDate, other.lifecycleDate) && + Objects.equals(phase, other.phase) && + Objects.equals(action, other.action) && + Objects.equals(step, other.step) && + Objects.equals(failedStep, other.failedStep) && + Objects.equals(phaseTime, other.phaseTime) && + Objects.equals(actionTime, other.actionTime) && + Objects.equals(stepTime, other.stepTime) && + Objects.equals(stepInfo, other.stepInfo) && + Objects.equals(phaseExecutionInfo, other.phaseExecutionInfo); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + +} + diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/IndexLifecycleNamedXContentProvider.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/IndexLifecycleNamedXContentProvider.java new file mode 100644 index 0000000000000..22935f197731c --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/IndexLifecycleNamedXContentProvider.java @@ -0,0 +1,56 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.plugins.spi.NamedXContentProvider; + +import java.util.Arrays; +import java.util.List; + +public class IndexLifecycleNamedXContentProvider implements NamedXContentProvider { + + + @Override + public List getNamedXContentParsers() { + return Arrays.asList( + // ILM + new NamedXContentRegistry.Entry(LifecycleAction.class, + new ParseField(AllocateAction.NAME), + AllocateAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, + new ParseField(DeleteAction.NAME), + DeleteAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, + new ParseField(ForceMergeAction.NAME), + ForceMergeAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, + new ParseField(ReadOnlyAction.NAME), + ReadOnlyAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, + new ParseField(RolloverAction.NAME), + RolloverAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, + new ParseField(ShrinkAction.NAME), + ShrinkAction::parse) + ); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/migration/IndexUpgradeInfoResponseTests.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecycleAction.java similarity index 78% rename from client/rest-high-level/src/test/java/org/elasticsearch/client/migration/IndexUpgradeInfoResponseTests.java rename to client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecycleAction.java index 8106043c08b3a..3787d26f5f889 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/migration/IndexUpgradeInfoResponseTests.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecycleAction.java @@ -16,11 +16,15 @@ * specific language governing permissions and limitations * under the License. */ +package org.elasticsearch.client.indexlifecycle; -package org.elasticsearch.client.migration; - -import org.elasticsearch.test.ESTestCase; +/** + * interface for index lifecycle management actions + */ +public interface LifecycleAction { -public class IndexUpgradeInfoResponseTests extends ESTestCase { - // TODO: add to cross XPack-HLRC serialization test + /** + * @return the name of this action + */ + String getName(); } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecycleManagementStatusRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecycleManagementStatusRequest.java new file mode 100644 index 0000000000000..5db3d2d8c4e11 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecycleManagementStatusRequest.java @@ -0,0 +1,28 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.client.TimedRequest; + +/** + * A {@link TimedRequest} to get the current status of index lifecycle management. + */ +public class LifecycleManagementStatusRequest extends TimedRequest { +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecycleManagementStatusResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecycleManagementStatusResponse.java new file mode 100644 index 0000000000000..c1586d7e1c738 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecycleManagementStatusResponse.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.util.Objects; + +/** + * The current status of index lifecycle management. See {@link OperationMode} for available statuses. + */ +public class LifecycleManagementStatusResponse { + + private final OperationMode operationMode; + private static final String OPERATION_MODE = "operation_mode"; + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + OPERATION_MODE, a -> new LifecycleManagementStatusResponse((String) a[0])); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField(OPERATION_MODE)); + } + + //package private for testing + LifecycleManagementStatusResponse(String operationMode) { + this.operationMode = OperationMode.fromString(operationMode); + } + + public OperationMode getOperationMode() { + return operationMode; + } + + public static LifecycleManagementStatusResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + LifecycleManagementStatusResponse that = (LifecycleManagementStatusResponse) o; + return operationMode == that.operationMode; + } + + @Override + public int hashCode() { + return Objects.hash(operationMode); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecyclePolicy.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecyclePolicy.java new file mode 100644 index 0000000000000..2dc4e3644d1e4 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecyclePolicy.java @@ -0,0 +1,145 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * Represents the lifecycle of an index from creation to deletion. A + * {@link LifecyclePolicy} is made up of a set of {@link Phase}s which it will + * move through. + */ +public class LifecyclePolicy implements ToXContentObject { + static final ParseField PHASES_FIELD = new ParseField("phases"); + + @SuppressWarnings("unchecked") + public static ConstructingObjectParser PARSER = new ConstructingObjectParser<>("lifecycle_policy", false, + (a, name) -> { + List phases = (List) a[0]; + Map phaseMap = phases.stream().collect(Collectors.toMap(Phase::getName, Function.identity())); + return new LifecyclePolicy(name, phaseMap); + }); + private static Map> ALLOWED_ACTIONS = new HashMap<>(); + + static { + PARSER.declareNamedObjects(ConstructingObjectParser.constructorArg(), (p, c, n) -> Phase.parse(p, n), v -> { + throw new IllegalArgumentException("ordered " + PHASES_FIELD.getPreferredName() + " are not supported"); + }, PHASES_FIELD); + + ALLOWED_ACTIONS.put("hot", Sets.newHashSet(RolloverAction.NAME)); + ALLOWED_ACTIONS.put("warm", Sets.newHashSet(AllocateAction.NAME, ForceMergeAction.NAME, ReadOnlyAction.NAME, ShrinkAction.NAME)); + ALLOWED_ACTIONS.put("cold", Sets.newHashSet(AllocateAction.NAME)); + ALLOWED_ACTIONS.put("delete", Sets.newHashSet(DeleteAction.NAME)); + } + + private final String name; + private final Map phases; + + /** + * @param name + * the name of this {@link LifecyclePolicy} + * @param phases + * a {@link Map} of {@link Phase}s which make up this + * {@link LifecyclePolicy}. + */ + public LifecyclePolicy(String name, Map phases) { + phases.values().forEach(phase -> { + if (ALLOWED_ACTIONS.containsKey(phase.getName()) == false) { + throw new IllegalArgumentException("Lifecycle does not support phase [" + phase.getName() + "]"); + } + phase.getActions().forEach((actionName, action) -> { + if (ALLOWED_ACTIONS.get(phase.getName()).contains(actionName) == false) { + throw new IllegalArgumentException("invalid action [" + actionName + "] " + + "defined in phase [" + phase.getName() +"]"); + } + }); + }); + this.name = name; + this.phases = phases; + } + + public static LifecyclePolicy parse(XContentParser parser, String name) { + return PARSER.apply(parser, name); + } + + /** + * @return the name of this {@link LifecyclePolicy} + */ + public String getName() { + return name; + } + + /** + * @return the {@link Phase}s for this {@link LifecyclePolicy} in the order + * in which they will be executed. + */ + public Map getPhases() { + return phases; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startObject(PHASES_FIELD.getPreferredName()); + for (Phase phase : phases.values()) { + builder.field(phase.getName(), phase); + } + builder.endObject(); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(name, phases); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + LifecyclePolicy other = (LifecyclePolicy) obj; + return Objects.equals(name, other.name) && + Objects.equals(phases, other.phases); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecyclePolicyMetadata.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecyclePolicyMetadata.java new file mode 100644 index 0000000000000..84de81437065d --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/LifecyclePolicyMetadata.java @@ -0,0 +1,117 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.time.Instant; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.util.Objects; + +public class LifecyclePolicyMetadata implements ToXContentObject { + + static final ParseField POLICY = new ParseField("policy"); + static final ParseField VERSION = new ParseField("version"); + static final ParseField MODIFIED_DATE = new ParseField("modified_date"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("policy_metadata", + a -> { + LifecyclePolicy policy = (LifecyclePolicy) a[0]; + return new LifecyclePolicyMetadata(policy, (long) a[1], ZonedDateTime.parse((String) a[2]).toInstant().toEpochMilli()); + }); + static { + PARSER.declareObject(ConstructingObjectParser.constructorArg(), LifecyclePolicy::parse, POLICY); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), VERSION); + PARSER.declareString(ConstructingObjectParser.constructorArg(), MODIFIED_DATE); + } + + public static LifecyclePolicyMetadata parse(XContentParser parser, String name) { + return PARSER.apply(parser, name); + } + + private final LifecyclePolicy policy; + private final long version; + private final long modifiedDate; + + public LifecyclePolicyMetadata(LifecyclePolicy policy, long version, long modifiedDate) { + this.policy = policy; + this.version = version; + this.modifiedDate = modifiedDate; + } + + public LifecyclePolicy getPolicy() { + return policy; + } + + public String getName() { + return policy.getName(); + } + + public long getVersion() { + return version; + } + + public long getModifiedDate() { + return modifiedDate; + } + + public String getModifiedDateString() { + ZonedDateTime modifiedDateTime = ZonedDateTime.ofInstant(Instant.ofEpochMilli(modifiedDate), ZoneOffset.UTC); + return modifiedDateTime.toString(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(POLICY.getPreferredName(), policy); + builder.field(VERSION.getPreferredName(), version); + builder.field(MODIFIED_DATE.getPreferredName(), + ZonedDateTime.ofInstant(Instant.ofEpochMilli(modifiedDate), ZoneOffset.UTC).toString()); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(policy, version, modifiedDate); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + LifecyclePolicyMetadata other = (LifecyclePolicyMetadata) obj; + return Objects.equals(policy, other.policy) && + Objects.equals(version, other.version) && + Objects.equals(modifiedDate, other.modifiedDate); + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/OperationMode.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/OperationMode.java new file mode 100644 index 0000000000000..81634e5824ec8 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/OperationMode.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.action.admin.indices.shrink.ShrinkAction; + +import java.util.EnumSet; +import java.util.Locale; + +/** + * Enum representing the different modes that Index Lifecycle Service can operate in. + */ +public enum OperationMode { + /** + * This represents a state where no policies are executed + */ + STOPPED { + @Override + public boolean isValidChange(OperationMode nextMode) { + return nextMode == RUNNING; + } + }, + + /** + * this represents a state where only sensitive actions (like {@link ShrinkAction}) will be executed + * until they finish, at which point the operation mode will move to STOPPED. + */ + STOPPING { + @Override + public boolean isValidChange(OperationMode nextMode) { + return nextMode == RUNNING || nextMode == STOPPED; + } + }, + + /** + * Normal operation where all policies are executed as normal. + */ + RUNNING { + @Override + public boolean isValidChange(OperationMode nextMode) { + return nextMode == STOPPING; + } + }; + + public abstract boolean isValidChange(OperationMode nextMode); + + static OperationMode fromString(String string) { + return EnumSet.allOf(OperationMode.class).stream() + .filter(e -> string.equalsIgnoreCase(e.name())).findFirst() + .orElseThrow(() -> new IllegalArgumentException(String.format(Locale.ROOT, "%s is not a valid operation_mode", string))); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/Phase.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/Phase.java new file mode 100644 index 0000000000000..0c19d39c85964 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/Phase.java @@ -0,0 +1,143 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.function.Function; +import java.util.stream.Collectors; + +/** + * Represents set of {@link LifecycleAction}s which should be executed at a + * particular point in the lifecycle of an index. + */ +public class Phase implements ToXContentObject { + + static final ParseField MIN_AGE = new ParseField("min_age"); + static final ParseField ACTIONS_FIELD = new ParseField("actions"); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("phase", false, + (a, name) -> new Phase(name, (TimeValue) a[0], ((List) a[1]).stream() + .collect(Collectors.toMap(LifecycleAction::getName, Function.identity())))); + static { + PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> TimeValue.parseTimeValue(p.text(), MIN_AGE.getPreferredName()), MIN_AGE, ValueType.VALUE); + PARSER.declareNamedObjects(ConstructingObjectParser.constructorArg(), + (p, c, n) -> p.namedObject(LifecycleAction.class, n, null), v -> { + throw new IllegalArgumentException("ordered " + ACTIONS_FIELD.getPreferredName() + " are not supported"); + }, ACTIONS_FIELD); + } + + public static Phase parse(XContentParser parser, String name) { + return PARSER.apply(parser, name); + } + + private final String name; + private final Map actions; + private final TimeValue minimumAge; + + /** + * @param name + * the name of this {@link Phase}. + * @param minimumAge + * the age of the index when the index should move to this + * {@link Phase}. + * @param actions + * a {@link Map} of the {@link LifecycleAction}s to run when + * during this {@link Phase}. The keys in this map are the associated + * action names. + */ + public Phase(String name, TimeValue minimumAge, Map actions) { + this.name = name; + if (minimumAge == null) { + this.minimumAge = TimeValue.ZERO; + } else { + this.minimumAge = minimumAge; + } + this.actions = actions; + } + + /** + * @return the age of the index when the index should move to this + * {@link Phase}. + */ + public TimeValue getMinimumAge() { + return minimumAge; + } + + /** + * @return the name of this {@link Phase} + */ + public String getName() { + return name; + } + + /** + * @return a {@link Map} of the {@link LifecycleAction}s to run when during + * this {@link Phase}. + */ + public Map getActions() { + return actions; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(MIN_AGE.getPreferredName(), minimumAge.getStringRep()); + builder.field(ACTIONS_FIELD.getPreferredName(), actions); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(name, minimumAge, actions); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + Phase other = (Phase) obj; + return Objects.equals(name, other.name) && + Objects.equals(minimumAge, other.minimumAge) && + Objects.equals(actions, other.actions); + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/PhaseExecutionInfo.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/PhaseExecutionInfo.java new file mode 100644 index 0000000000000..802ca8834cdd3 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/PhaseExecutionInfo.java @@ -0,0 +1,130 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +/** + * This class contains information about the current phase being executed by Index + * Lifecycle Management on the specific index. + */ +public class PhaseExecutionInfo implements ToXContentObject { + private static final ParseField POLICY_NAME_FIELD = new ParseField("policy"); + private static final ParseField PHASE_DEFINITION_FIELD = new ParseField("phase_definition"); + private static final ParseField VERSION_FIELD = new ParseField("version"); + private static final ParseField MODIFIED_DATE_IN_MILLIS_FIELD = new ParseField("modified_date_in_millis"); + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "phase_execution_info", false, + (a, name) -> new PhaseExecutionInfo((String) a[0], (Phase) a[1], (long) a[2], (long) a[3])); + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), POLICY_NAME_FIELD); + PARSER.declareObject(ConstructingObjectParser.optionalConstructorArg(), Phase::parse, PHASE_DEFINITION_FIELD); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), VERSION_FIELD); + PARSER.declareLong(ConstructingObjectParser.constructorArg(), MODIFIED_DATE_IN_MILLIS_FIELD); + } + + public static PhaseExecutionInfo parse(XContentParser parser, String name) { + return PARSER.apply(parser, name); + } + + private final String policyName; + private final Phase phase; + private final long version; + private final long modifiedDate; + + /** + * This class holds information about the current phase that is being executed + * + * @param policyName the name of the policy being executed, this may not be the current policy assigned to an index + * @param phase the current phase definition executed + * @param version the version of the policyName being executed + * @param modifiedDate the time the executing version of the phase was modified + */ + public PhaseExecutionInfo(String policyName, Phase phase, long version, long modifiedDate) { + this.policyName = policyName; + this.phase = phase; + this.version = version; + this.modifiedDate = modifiedDate; + } + + public String getPolicyName() { + return policyName; + } + + public Phase getPhase() { + return phase; + } + + public long getVersion() { + return version; + } + + public long getModifiedDate() { + return modifiedDate; + } + + @Override + public int hashCode() { + return Objects.hash(policyName, phase, version, modifiedDate); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + PhaseExecutionInfo other = (PhaseExecutionInfo) obj; + return Objects.equals(policyName, other.policyName) && + Objects.equals(phase, other.phase) && + Objects.equals(version, other.version) && + Objects.equals(modifiedDate, other.modifiedDate); + } + + @Override + public String toString() { + return Strings.toString(this, false, true); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(POLICY_NAME_FIELD.getPreferredName(), policyName); + if (phase != null) { + builder.field(PHASE_DEFINITION_FIELD.getPreferredName(), phase); + } + builder.field(VERSION_FIELD.getPreferredName(), version); + builder.timeField(MODIFIED_DATE_IN_MILLIS_FIELD.getPreferredName(), "modified_date", modifiedDate); + builder.endObject(); + return builder; + } +} + diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/PutLifecyclePolicyRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/PutLifecyclePolicyRequest.java new file mode 100644 index 0000000000000..ddfcc6bf6e65a --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/PutLifecyclePolicyRequest.java @@ -0,0 +1,72 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.client.TimedRequest; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +public class PutLifecyclePolicyRequest extends TimedRequest implements ToXContentObject { + + private final LifecyclePolicy policy; + + public PutLifecyclePolicyRequest(LifecyclePolicy policy) { + if (policy == null) { + throw new IllegalArgumentException("policy definition cannot be null"); + } + if (Strings.isNullOrEmpty(policy.getName())) { + throw new IllegalArgumentException("policy name must be present"); + } + this.policy = policy; + } + + public String getName() { + return policy.getName(); + } + + public LifecyclePolicy getLifecyclePolicy() { + return policy; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("policy", policy); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + PutLifecyclePolicyRequest that = (PutLifecyclePolicyRequest) o; + return Objects.equals(getLifecyclePolicy(), that.getLifecyclePolicy()); + } + + @Override + public int hashCode() { + return Objects.hash(getLifecyclePolicy()); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ReadOnlyAction.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ReadOnlyAction.java new file mode 100644 index 0000000000000..7734e792bbc5b --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ReadOnlyAction.java @@ -0,0 +1,73 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; + +public class ReadOnlyAction implements LifecycleAction, ToXContentObject { + public static final String NAME = "readonly"; + + private static final ObjectParser PARSER = new ObjectParser<>(NAME, false, ReadOnlyAction::new); + + public static ReadOnlyAction parse(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public ReadOnlyAction() { + } + + @Override + public String getName() { + return NAME; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return ReadOnlyAction.class.hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + return true; + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/RemoveIndexLifecyclePolicyRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/RemoveIndexLifecyclePolicyRequest.java new file mode 100644 index 0000000000000..88bdf4dd6868d --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/RemoveIndexLifecyclePolicyRequest.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.TimedRequest; + +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +public class RemoveIndexLifecyclePolicyRequest extends TimedRequest { + + private final List indices; + private final IndicesOptions indicesOptions; + + public RemoveIndexLifecyclePolicyRequest(List indices) { + this(indices, IndicesOptions.strictExpandOpen()); + } + + public RemoveIndexLifecyclePolicyRequest(List indices, IndicesOptions indicesOptions) { + this.indices = Collections.unmodifiableList(Objects.requireNonNull(indices)); + this.indicesOptions = Objects.requireNonNull(indicesOptions); + } + + public List indices() { + return indices; + } + + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + @Override + public int hashCode() { + return Objects.hash(indices, indicesOptions); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + RemoveIndexLifecyclePolicyRequest other = (RemoveIndexLifecyclePolicyRequest) obj; + return Objects.deepEquals(indices, other.indices) && + Objects.equals(indicesOptions, other.indicesOptions); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/RemoveIndexLifecyclePolicyResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/RemoveIndexLifecyclePolicyResponse.java new file mode 100644 index 0000000000000..3aae1537faa29 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/RemoveIndexLifecyclePolicyResponse.java @@ -0,0 +1,80 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +public class RemoveIndexLifecyclePolicyResponse { + + public static final ParseField HAS_FAILURES_FIELD = new ParseField("has_failures"); + public static final ParseField FAILED_INDEXES_FIELD = new ParseField("failed_indexes"); + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "change_policy_for_index_response", true, args -> new RemoveIndexLifecyclePolicyResponse((List)args[0])); + static { + PARSER.declareStringArray(ConstructingObjectParser.constructorArg(), FAILED_INDEXES_FIELD); + // Needs to be declared but not used in constructing the response object + PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), HAS_FAILURES_FIELD); + } + + private final List failedIndexes; + + public RemoveIndexLifecyclePolicyResponse(List failedIndexes) { + if (failedIndexes == null) { + throw new IllegalArgumentException(FAILED_INDEXES_FIELD.getPreferredName() + " cannot be null"); + } + this.failedIndexes = Collections.unmodifiableList(failedIndexes); + } + + public List getFailedIndexes() { + return failedIndexes; + } + + public boolean hasFailures() { + return failedIndexes.isEmpty() == false; + } + + public static RemoveIndexLifecyclePolicyResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + @Override + public int hashCode() { + return Objects.hash(failedIndexes); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + RemoveIndexLifecyclePolicyResponse other = (RemoveIndexLifecyclePolicyResponse) obj; + return Objects.equals(failedIndexes, other.failedIndexes); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/RetryLifecyclePolicyRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/RetryLifecyclePolicyRequest.java new file mode 100644 index 0000000000000..6f3acaf19aaea --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/RetryLifecyclePolicyRequest.java @@ -0,0 +1,58 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import org.elasticsearch.client.TimedRequest; + +public class RetryLifecyclePolicyRequest extends TimedRequest { + + private final List indices; + + public RetryLifecyclePolicyRequest(String... indices) { + if (indices.length == 0) { + throw new IllegalArgumentException("Must at least specify one index to retry"); + } + this.indices = Arrays.asList(indices); + } + + public List getIndices() { + return indices; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + RetryLifecyclePolicyRequest that = (RetryLifecyclePolicyRequest) o; + return indices.size() == that.indices.size() && indices.containsAll(that.indices); + } + + @Override + public int hashCode() { + return Objects.hash(indices); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/RolloverAction.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/RolloverAction.java new file mode 100644 index 0000000000000..0cc9dcf234969 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/RolloverAction.java @@ -0,0 +1,123 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + + +public class RolloverAction implements LifecycleAction, ToXContentObject { + public static final String NAME = "rollover"; + private static final ParseField MAX_SIZE_FIELD = new ParseField("max_size"); + private static final ParseField MAX_DOCS_FIELD = new ParseField("max_docs"); + private static final ParseField MAX_AGE_FIELD = new ParseField("max_age"); + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, + a -> new RolloverAction((ByteSizeValue) a[0], (TimeValue) a[1], (Long) a[2])); + static { + PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_SIZE_FIELD.getPreferredName()), MAX_SIZE_FIELD, ValueType.VALUE); + PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), + (p, c) -> TimeValue.parseTimeValue(p.text(), MAX_AGE_FIELD.getPreferredName()), MAX_AGE_FIELD, ValueType.VALUE); + PARSER.declareLong(ConstructingObjectParser.optionalConstructorArg(), MAX_DOCS_FIELD); + } + + private final ByteSizeValue maxSize; + private final Long maxDocs; + private final TimeValue maxAge; + + public static RolloverAction parse(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public RolloverAction(ByteSizeValue maxSize, TimeValue maxAge, Long maxDocs) { + if (maxSize == null && maxAge == null && maxDocs == null) { + throw new IllegalArgumentException("At least one rollover condition must be set."); + } + this.maxSize = maxSize; + this.maxAge = maxAge; + this.maxDocs = maxDocs; + } + public ByteSizeValue getMaxSize() { + return maxSize; + } + + public TimeValue getMaxAge() { + return maxAge; + } + + public Long getMaxDocs() { + return maxDocs; + } + + @Override + public String getName() { + return NAME; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (maxSize != null) { + builder.field(MAX_SIZE_FIELD.getPreferredName(), maxSize.getStringRep()); + } + if (maxAge != null) { + builder.field(MAX_AGE_FIELD.getPreferredName(), maxAge.getStringRep()); + } + if (maxDocs != null) { + builder.field(MAX_DOCS_FIELD.getPreferredName(), maxDocs); + } + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(maxSize, maxAge, maxDocs); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + RolloverAction other = (RolloverAction) obj; + return Objects.equals(maxSize, other.maxSize) && + Objects.equals(maxAge, other.maxAge) && + Objects.equals(maxDocs, other.maxDocs); + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ShrinkAction.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ShrinkAction.java new file mode 100644 index 0000000000000..345356380145e --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/ShrinkAction.java @@ -0,0 +1,89 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +public class ShrinkAction implements LifecycleAction, ToXContentObject { + public static final String NAME = "shrink"; + private static final ParseField NUMBER_OF_SHARDS_FIELD = new ParseField("number_of_shards"); + + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>(NAME, a -> new ShrinkAction((Integer) a[0])); + + static { + PARSER.declareInt(ConstructingObjectParser.constructorArg(), NUMBER_OF_SHARDS_FIELD); + } + + private int numberOfShards; + + public static ShrinkAction parse(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + public ShrinkAction(int numberOfShards) { + if (numberOfShards <= 0) { + throw new IllegalArgumentException("[" + NUMBER_OF_SHARDS_FIELD.getPreferredName() + "] must be greater than 0"); + } + this.numberOfShards = numberOfShards; + } + + int getNumberOfShards() { + return numberOfShards; + } + + @Override + public String getName() { + return NAME; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(NUMBER_OF_SHARDS_FIELD.getPreferredName(), numberOfShards); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ShrinkAction that = (ShrinkAction) o; + return Objects.equals(numberOfShards, that.numberOfShards); + } + + @Override + public int hashCode() { + return Objects.hash(numberOfShards); + } + + @Override + public String toString() { + return Strings.toString(this); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/StartILMRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/StartILMRequest.java new file mode 100644 index 0000000000000..84cc844a92a98 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/StartILMRequest.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.client.TimedRequest; + +public class StartILMRequest extends TimedRequest { + + public StartILMRequest() { + } + + @Override + public int hashCode() { + return 64; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + return true; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/StopILMRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/StopILMRequest.java new file mode 100644 index 0000000000000..1695fc0dd7aea --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/indexlifecycle/StopILMRequest.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.client.TimedRequest; + +public class StopILMRequest extends TimedRequest { + + public StopILMRequest() { + } + + @Override + public int hashCode() { + return 75; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (obj.getClass() != getClass()) { + return false; + } + return true; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/license/StartBasicResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/license/StartBasicResponse.java index f6ab026402462..c2596f3e38a4e 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/license/StartBasicResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/license/StartBasicResponse.java @@ -84,14 +84,13 @@ public class StartBasicResponse { } } return new Tuple<>(message, acknowledgeMessages); - }, - new ParseField("acknowledge")); + }, new ParseField("acknowledge")); } private Map acknowledgeMessages; private String acknowledgeMessage; - enum Status { + public enum Status { GENERATED_BASIC(true, null, RestStatus.OK), ALREADY_USING_BASIC(false, "Operation failed: Current license is basic.", RestStatus.FORBIDDEN), NEED_ACKNOWLEDGEMENT(false, "Operation failed: Needs acknowledgement.", RestStatus.OK); @@ -141,6 +140,10 @@ public StartBasicResponse() { this.acknowledgeMessage = acknowledgeMessage; } + public Status getStatus() { + return status; + } + public boolean isAcknowledged() { return status != StartBasicResponse.Status.NEED_ACKNOWLEDGEMENT; } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/migration/UpgradeActionRequired.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/migration/UpgradeActionRequired.java index e743d10529e1a..26b7b1e815d7f 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/migration/UpgradeActionRequired.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/migration/UpgradeActionRequired.java @@ -18,17 +18,12 @@ */ package org.elasticsearch.client.migration; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; - -import java.io.IOException; import java.util.Locale; /** * Indicates the type of the upgrade required for the index */ -public enum UpgradeActionRequired implements Writeable { +public enum UpgradeActionRequired { NOT_APPLICABLE, // Indicates that the check is not applicable to this index type, the next check will be performed UP_TO_DATE, // Indicates that the check finds this index to be up to date - no additional checks are required REINDEX, // The index should be reindex @@ -38,15 +33,6 @@ public static UpgradeActionRequired fromString(String value) { return UpgradeActionRequired.valueOf(value.toUpperCase(Locale.ROOT)); } - public static UpgradeActionRequired readFromStream(StreamInput in) throws IOException { - return in.readEnum(UpgradeActionRequired.class); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeEnum(this); - } - @Override public String toString() { return name().toLowerCase(Locale.ROOT); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutFilterRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutFilterRequest.java new file mode 100644 index 0000000000000..5414c86258111 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutFilterRequest.java @@ -0,0 +1,84 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.ml.job.config.MlFilter; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * Request to create a new Machine Learning MlFilter given a {@link MlFilter} configuration + */ +public class PutFilterRequest extends ActionRequest implements ToXContentObject { + + private final MlFilter filter; + + /** + * Construct a new PutMlFilterRequest + * + * @param filter a {@link MlFilter} configuration to create + */ + public PutFilterRequest(MlFilter filter) { + this.filter = filter; + } + + public MlFilter getMlFilter() { + return filter; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return filter.toXContent(builder, params); + } + + @Override + public boolean equals(Object object) { + if (this == object) { + return true; + } + + if (object == null || getClass() != object.getClass()) { + return false; + } + + PutFilterRequest request = (PutFilterRequest) object; + return Objects.equals(filter, request.filter); + } + + @Override + public int hashCode() { + return Objects.hash(filter); + } + + @Override + public final String toString() { + return Strings.toString(this); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutFilterResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutFilterResponse.java new file mode 100644 index 0000000000000..56164bd5be08e --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutFilterResponse.java @@ -0,0 +1,70 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.config.MlFilter; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +/** + * Response containing the newly created {@link MlFilter} + */ +public class PutFilterResponse implements ToXContentObject { + + private MlFilter filter; + + public static PutFilterResponse fromXContent(XContentParser parser) throws IOException { + return new PutFilterResponse(MlFilter.PARSER.parse(parser, null).build()); + } + + PutFilterResponse(MlFilter filter) { + this.filter = filter; + } + + public MlFilter getResponse() { + return filter; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + filter.toXContent(builder, params); + return builder; + } + + @Override + public boolean equals(Object object) { + if (this == object) { + return true; + } + if (object == null || getClass() != object.getClass()) { + return false; + } + PutFilterResponse response = (PutFilterResponse) object; + return Objects.equals(filter, response.filter); + } + + @Override + public int hashCode() { + return Objects.hash(filter); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/MlFilter.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/MlFilter.java index e0d1bd0849b3b..1b67fc4459b50 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/MlFilter.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/MlFilter.java @@ -32,6 +32,14 @@ import java.util.SortedSet; import java.util.TreeSet; +/** + * An MlFilter Object + * + * A filter contains a list of strings. + * It can be used by one or more jobs. + * + * Specifically, filters are referenced in the custom_rules property of detector configuration objects. + */ public class MlFilter implements ToXContentObject { public static final ParseField TYPE = new ParseField("type"); @@ -105,6 +113,10 @@ public int hashCode() { return Objects.hash(id, description, items); } + /** + * Creates a new Builder object for creating an MlFilter object + * @param filterId The ID of the filter to create + */ public static Builder builder(String filterId) { return new Builder().setId(filterId); } @@ -118,6 +130,10 @@ public static class Builder { private Builder() { } + /** + * Set the ID of the filter + * @param id The id desired + */ public Builder setId(String id) { this.id = Objects.requireNonNull(id); return this; @@ -128,6 +144,10 @@ public String getId() { return id; } + /** + * Set the description of the filter + * @param description The description desired + */ public Builder setDescription(String description) { this.description = description; return this; @@ -143,6 +163,13 @@ public Builder setItems(List items) { return this; } + /** + * The items of the filter. + * + * A wildcard * can be used at the beginning or the end of an item. Up to 10000 items are allowed in each filter. + * + * @param items String list of items to be applied in the filter + */ public Builder setItems(String... items) { setItems(Arrays.asList(items)); return this; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/AcknowledgedResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/AcknowledgedResponse.java index 4e279844afc59..8a588b24d536c 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/AcknowledgedResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/AcknowledgedResponse.java @@ -32,9 +32,9 @@ import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; public abstract class AcknowledgedResponse implements ToXContentObject { + private final boolean acknowledged; protected static final String PARSE_FIELD_NAME = "acknowledged"; - private final boolean acknowledged; public AcknowledgedResponse(final boolean acknowledged) { this.acknowledged = acknowledged; @@ -83,4 +83,5 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par protected String getFieldName() { return PARSE_FIELD_NAME; } + } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/DeleteRollupJobRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/DeleteRollupJobRequest.java new file mode 100644 index 0000000000000..9b7a322b23827 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/DeleteRollupJobRequest.java @@ -0,0 +1,79 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.rollup; + +import org.elasticsearch.client.Validatable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + + +public class DeleteRollupJobRequest implements Validatable, ToXContentObject { + + private static final ParseField ID_FIELD = new ParseField("id"); + private final String id; + + + public DeleteRollupJobRequest(String id) { + this.id = Objects.requireNonNull(id, "id parameter must not be null"); + } + + public String getId() { + return id; + } + + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("request", a -> { + return new DeleteRollupJobRequest((String) a[0]); + }); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), ID_FIELD); + } + + public static DeleteRollupJobRequest fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(ID_FIELD.getPreferredName(), this.id); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + DeleteRollupJobRequest that = (DeleteRollupJobRequest) o; + return Objects.equals(id, that.id); + } + + @Override + public int hashCode() { + return Objects.hash(id); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/DeleteRollupJobResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/DeleteRollupJobResponse.java new file mode 100644 index 0000000000000..adba15aaca552 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/DeleteRollupJobResponse.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.rollup; + +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; + +public class DeleteRollupJobResponse extends AcknowledgedResponse { + + public DeleteRollupJobResponse(boolean acknowledged) { + super(acknowledged); + } + + private static final ConstructingObjectParser PARSER = AcknowledgedResponse + .generateParser("delete_rollup_job_response", DeleteRollupJobResponse::new, AcknowledgedResponse.PARSE_FIELD_NAME); + + public static DeleteRollupJobResponse fromXContent(final XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/PutRollupJobResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/PutRollupJobResponse.java index 60fc2842100cf..ea26343c76635 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/PutRollupJobResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/rollup/PutRollupJobResponse.java @@ -23,17 +23,18 @@ import java.io.IOException; - public class PutRollupJobResponse extends AcknowledgedResponse { + public PutRollupJobResponse(boolean acknowledged) { super(acknowledged); } private static final ConstructingObjectParser PARSER = AcknowledgedResponse - .generateParser("delete_rollup_job_response", PutRollupJobResponse::new, AcknowledgedResponse.PARSE_FIELD_NAME); + .generateParser("delete_rollup_job_response", PutRollupJobResponse::new, AcknowledgedResponse.PARSE_FIELD_NAME); public static PutRollupJobResponse fromXContent(final XContentParser parser) throws IOException { return PARSER.parse(parser, null); } + } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/AuthenticateRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/AuthenticateRequest.java new file mode 100644 index 0000000000000..2aefa97cb8bf1 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/AuthenticateRequest.java @@ -0,0 +1,41 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.apache.http.client.methods.HttpGet; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Validatable; + +/** + * Empty request object required to make the authenticate call. The authenticate call + * retrieves metadata about the authenticated user. + */ +public final class AuthenticateRequest implements Validatable { + + public static final AuthenticateRequest INSTANCE = new AuthenticateRequest(); + + private AuthenticateRequest() { + } + + public Request getRequest() { + return new Request(HttpGet.METHOD_NAME, "/_xpack/security/_authenticate"); + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/AuthenticateResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/AuthenticateResponse.java new file mode 100644 index 0000000000000..62f1cc0955bd1 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/AuthenticateResponse.java @@ -0,0 +1,109 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.client.security.user.User; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * The response for the authenticate call. The response contains two fields: a + * user field and a boolean flag signaling if the user is enabled or not. The + * user object contains all user metadata which Elasticsearch uses to map roles, + * etc. + */ +public final class AuthenticateResponse { + + static final ParseField USERNAME = new ParseField("username"); + static final ParseField ROLES = new ParseField("roles"); + static final ParseField METADATA = new ParseField("metadata"); + static final ParseField FULL_NAME = new ParseField("full_name"); + static final ParseField EMAIL = new ParseField("email"); + static final ParseField ENABLED = new ParseField("enabled"); + + @SuppressWarnings("unchecked") + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "client_security_authenticate_response", + a -> new AuthenticateResponse(new User((String) a[0], ((List) a[1]), (Map) a[2], + (String) a[3], (String) a[4]), (Boolean) a[5])); + static { + PARSER.declareString(constructorArg(), USERNAME); + PARSER.declareStringArray(constructorArg(), ROLES); + PARSER.>declareObject(constructorArg(), (parser, c) -> parser.map(), METADATA); + PARSER.declareStringOrNull(optionalConstructorArg(), FULL_NAME); + PARSER.declareStringOrNull(optionalConstructorArg(), EMAIL); + PARSER.declareBoolean(constructorArg(), ENABLED); + } + + private final User user; + private final boolean enabled; + + public AuthenticateResponse(User user, boolean enabled) { + this.user = user; + this.enabled = enabled; + } + + /** + * @return The effective user. This is the authenticated user, or, when + * submitting requests on behalf of other users, it is the + * impersonated user. + */ + public User getUser() { + return user; + } + + /** + * @return whether the user is enabled or not + */ + public boolean enabled() { + return enabled; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final AuthenticateResponse that = (AuthenticateResponse) o; + return user.equals(that.user) && enabled == that.enabled; + } + + @Override + public int hashCode() { + return Objects.hash(user, enabled); + } + + public static AuthenticateResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/ClearRealmCacheRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/ClearRealmCacheRequest.java new file mode 100644 index 0000000000000..268fc4a1de6e0 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/ClearRealmCacheRequest.java @@ -0,0 +1,56 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.client.Validatable; + +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +/** + * Request for clearing the cache of one or more realms + */ +public final class ClearRealmCacheRequest implements Validatable { + + private final List realms; + private final List usernames; + + /** + * Create a new request to clear cache of realms + * @param realms the realms to clear the cache of. Must not be {@code null}. An empty list + * indicates that all realms should have their caches cleared. + * @param usernames the usernames to clear the cache of. Must not be {@code null}. An empty + * list indicates that every user in the listed realms should have their cache + * cleared. + */ + public ClearRealmCacheRequest(List realms, List usernames) { + this.realms = Collections.unmodifiableList(Objects.requireNonNull(realms, "the realms list must not be null")); + this.usernames = Collections.unmodifiableList(Objects.requireNonNull(usernames, "usernames list must no be null")); + } + + public List getRealms() { + return realms; + } + + public List getUsernames() { + return usernames; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/ClearRealmCacheResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/ClearRealmCacheResponse.java new file mode 100644 index 0000000000000..ce1495f9ef2b8 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/ClearRealmCacheResponse.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.client.NodesResponseHeader; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; + +/** + * Response for a clear realm cache request. The response includes a header that contains the + * number of successful and failed nodes. + */ +public final class ClearRealmCacheResponse extends SecurityNodesResponse { + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("clear_realm_cache_response_parser", + args -> new ClearRealmCacheResponse((List) args[0], (NodesResponseHeader) args[1], (String) args[2])); + + static { + SecurityNodesResponse.declareCommonNodesResponseParsing(PARSER); + } + + public ClearRealmCacheResponse(List nodes, NodesResponseHeader header, String clusterName) { + super(nodes, header, clusterName); + } + + public static ClearRealmCacheResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/ClearRolesCacheResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/ClearRolesCacheResponse.java index b6b864a37e226..c7df7e0f492e4 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/ClearRolesCacheResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/ClearRolesCacheResponse.java @@ -20,18 +20,16 @@ package org.elasticsearch.client.security; import org.elasticsearch.client.NodesResponseHeader; -import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; import java.util.List; -import java.util.Objects; /** - * The response object that will be returned when clearing the cache of native roles + * The response object that will be returned when clearing the roles cache */ -public final class ClearRolesCacheResponse { +public final class ClearRolesCacheResponse extends SecurityNodesResponse { @SuppressWarnings("unchecked") private static final ConstructingObjectParser PARSER = @@ -39,68 +37,11 @@ public final class ClearRolesCacheResponse { args -> new ClearRolesCacheResponse((List)args[0], (NodesResponseHeader) args[1], (String) args[2])); static { - PARSER.declareNamedObjects(ConstructingObjectParser.constructorArg(), (p, c, n) -> Node.PARSER.apply(p, n), - new ParseField("nodes")); - PARSER.declareObject(ConstructingObjectParser.constructorArg(), NodesResponseHeader::fromXContent, new ParseField("_nodes")); - PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("cluster_name")); + SecurityNodesResponse.declareCommonNodesResponseParsing(PARSER); } - private final List nodes; - private final NodesResponseHeader header; - private final String clusterName; - public ClearRolesCacheResponse(List nodes, NodesResponseHeader header, String clusterName) { - this.nodes = nodes; - this.header = header; - this.clusterName = Objects.requireNonNull(clusterName, "cluster name must be provided"); - } - - /** returns a list of nodes in which the cache was cleared */ - public List getNodes() { - return nodes; - } - - /** - * Get the cluster name associated with all of the nodes. - * - * @return Never {@code null}. - */ - public String getClusterName() { - return clusterName; - } - - /** - * Gets information about the number of total, successful and failed nodes the request was run on. - * Also includes exceptions if relevant. - */ - public NodesResponseHeader getHeader() { - return header; - } - - public static class Node { - - private static final ConstructingObjectParser PARSER = - new ConstructingObjectParser<>("clear_roles_cache_response_node", false, (args, id) -> new Node(id, (String) args[0])); - - static { - PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("name")); - } - - private final String id; - private final String name; - - public Node(String id, String name) { - this.id = id; - this.name = name; - } - - public String getId() { - return id; - } - - public String getName() { - return name; - } + super(nodes, header, clusterName); } public static ClearRolesCacheResponse fromXContent(XContentParser parser) throws IOException { diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/SecurityNodesResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/SecurityNodesResponse.java new file mode 100644 index 0000000000000..22b9e8220e743 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/SecurityNodesResponse.java @@ -0,0 +1,79 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.client.NodesResponse; +import org.elasticsearch.client.NodesResponseHeader; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; + +import java.util.List; + +/** + * Base class for security responses that are node responses. Security uses a common pattern in the + * response so this class is present to avoid duplication. + */ +public abstract class SecurityNodesResponse extends NodesResponse { + + private final List nodes; + + SecurityNodesResponse(List nodes, NodesResponseHeader header, String clusterName) { + super(header, clusterName); + this.nodes = nodes; + } + + /** returns a list of nodes in which the cache was cleared */ + public List getNodes() { + return nodes; + } + + public static class Node { + + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("clear_roles_cache_response_node", false, + (args, id) -> new ClearRolesCacheResponse.Node(id, (String) args[0])); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), new ParseField("name")); + } + + private final String id; + private final String name; + + public Node(String id, String name) { + this.id = id; + this.name = name; + } + + public String getId() { + return id; + } + + public String getName() { + return name; + } + } + + public static void declareCommonNodesResponseParsing(ConstructingObjectParser parser) { + parser.declareNamedObjects(ConstructingObjectParser.constructorArg(), (p, c, n) -> Node.PARSER.apply(p, n), + new ParseField("nodes")); + NodesResponse.declareCommonNodesResponseParsing(parser); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/User.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/User.java new file mode 100644 index 0000000000000..977780b46b79b --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/user/User.java @@ -0,0 +1,135 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security.user; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; + +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.Objects; + + +/** + * An authenticated user + */ +public final class User { + + private final String username; + private final Collection roles; + private final Map metadata; + @Nullable private final String fullName; + @Nullable private final String email; + + public User(String username, Collection roles, Map metadata, @Nullable String fullName, + @Nullable String email) { + Objects.requireNonNull(username, "`username` cannot be null"); + Objects.requireNonNull(roles, "`roles` cannot be null. Pass an empty collection instead."); + Objects.requireNonNull(roles, "`metadata` cannot be null. Pass an empty map instead."); + this.username = username; + this.roles = roles; + this.metadata = Collections.unmodifiableMap(metadata); + this.fullName = fullName; + this.email = email; + } + + /** + * @return The principal of this user - effectively serving as the + * unique identity of the user. Can never be {@code null}. + */ + public String username() { + return this.username; + } + + /** + * @return The roles this user is associated with. The roles are + * identified by their unique names and each represents as + * set of permissions. Can never be {@code null}. + */ + public Collection roles() { + return this.roles; + } + + /** + * @return The metadata that is associated with this user. Can never be {@code null}. + */ + public Map metadata() { + return metadata; + } + + /** + * @return The full name of this user. May be {@code null}. + */ + public @Nullable String fullName() { + return fullName; + } + + /** + * @return The email of this user. May be {@code null}. + */ + public @Nullable String email() { + return email; + } + + @Override + public String toString() { + final StringBuilder sb = new StringBuilder(); + sb.append("User[username=").append(username); + sb.append(",roles=[").append(Strings.collectionToCommaDelimitedString(roles)).append("]"); + sb.append(",metadata=").append(metadata); + sb.append(",fullName=").append(fullName); + sb.append(",email=").append(email); + sb.append("]"); + return sb.toString(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o instanceof User == false) { + return false; + } + + final User user = (User) o; + + if (!username.equals(user.username)) { + return false; + } + if (!roles.equals(user.roles)) { + return false; + } + if (!metadata.equals(user.metadata)) { + return false; + } + if (fullName != null ? !fullName.equals(user.fullName) : user.fullName != null) { + return false; + } + return !(email != null ? !email.equals(user.email) : user.email != null); + } + + @Override + public int hashCode() { + return Objects.hash(username, roles, metadata, fullName, email); + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/xpack/XPackInfoResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/xpack/XPackInfoResponse.java index fadffab95f378..f9a92d2fbbe02 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/xpack/XPackInfoResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/xpack/XPackInfoResponse.java @@ -18,13 +18,9 @@ */ package org.elasticsearch.client.xpack; -import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.ToXContentObject; @@ -253,7 +249,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } } - public static class BuildInfo implements ToXContentObject, Writeable { + public static class BuildInfo implements ToXContentObject { private final String hash; private final String timestamp; @@ -262,16 +258,6 @@ public BuildInfo(String hash, String timestamp) { this.timestamp = timestamp; } - public BuildInfo(StreamInput input) throws IOException { - this(input.readString(), input.readString()); - } - - @Override - public void writeTo(StreamOutput output) throws IOException { - output.writeString(hash); - output.writeString(timestamp); - } - public String getHash() { return hash; } @@ -310,7 +296,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } } - public static class FeatureSetsInfo implements ToXContentObject, Writeable { + public static class FeatureSetsInfo implements ToXContentObject { private final Map featureSets; public FeatureSetsInfo(Set featureSets) { @@ -321,24 +307,6 @@ public FeatureSetsInfo(Set featureSets) { this.featureSets = Collections.unmodifiableMap(map); } - public FeatureSetsInfo(StreamInput in) throws IOException { - int size = in.readVInt(); - Map featureSets = new HashMap<>(size); - for (int i = 0; i < size; i++) { - FeatureSet featureSet = new FeatureSet(in); - featureSets.put(featureSet.name, featureSet); - } - this.featureSets = Collections.unmodifiableMap(featureSets); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(featureSets.size()); - for (FeatureSet featureSet : featureSets.values()) { - featureSet.writeTo(out); - } - } - public Map getFeatureSets() { return featureSets; } @@ -366,7 +334,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder.endObject(); } - public static class FeatureSet implements ToXContentObject, Writeable { + public static class FeatureSet implements ToXContentObject { private final String name; @Nullable private final String description; private final boolean available; @@ -382,22 +350,6 @@ public FeatureSet(String name, @Nullable String description, boolean available, this.nativeCodeInfo = nativeCodeInfo; } - public FeatureSet(StreamInput in) throws IOException { - this(in.readString(), in.readOptionalString(), in.readBoolean(), in.readBoolean(), - in.getVersion().onOrAfter(Version.V_5_4_0) ? in.readMap() : null); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(name); - out.writeOptionalString(description); - out.writeBoolean(available); - out.writeBoolean(enabled); - if (out.getVersion().onOrAfter(Version.V_5_4_0)) { - out.writeMap(nativeCodeInfo); - } - } - public String name() { return name; } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/xpack/XPackUsageResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/xpack/XPackUsageResponse.java index b51a2d7de9fbd..2f9c99cc65e09 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/xpack/XPackUsageResponse.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/xpack/XPackUsageResponse.java @@ -34,7 +34,7 @@ public class XPackUsageResponse { private final Map> usages; - private XPackUsageResponse(Map> usages) throws IOException { + private XPackUsageResponse(Map> usages) { this.usages = usages; } diff --git a/client/rest-high-level/src/main/resources/META-INF/services/org.elasticsearch.plugins.spi.NamedXContentProvider b/client/rest-high-level/src/main/resources/META-INF/services/org.elasticsearch.plugins.spi.NamedXContentProvider new file mode 100644 index 0000000000000..4204a868246a5 --- /dev/null +++ b/client/rest-high-level/src/main/resources/META-INF/services/org.elasticsearch.plugins.spi.NamedXContentProvider @@ -0,0 +1 @@ +org.elasticsearch.client.indexlifecycle.IndexLifecycleNamedXContentProvider \ No newline at end of file diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java index 8dba3cafd0b44..083529ae214ef 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java @@ -77,16 +77,42 @@ protected static Resp execute(Req request, SyncMethod syn } } + /** + * Executes the provided request using either the sync method or its async + * variant, both provided as functions. This variant is used when the call does + * not have a request object (only headers and the request path). + */ + protected static Resp execute(SyncMethodNoRequest syncMethodNoRequest, AsyncMethodNoRequest asyncMethodNoRequest, + RequestOptions requestOptions) throws IOException { + if (randomBoolean()) { + return syncMethodNoRequest.execute(requestOptions); + } else { + PlainActionFuture future = PlainActionFuture.newFuture(); + asyncMethodNoRequest.execute(requestOptions, future); + return future.actionGet(); + } + } + @FunctionalInterface protected interface SyncMethod { Response execute(Request request, RequestOptions options) throws IOException; } + @FunctionalInterface + protected interface SyncMethodNoRequest { + Response execute(RequestOptions options) throws IOException; + } + @FunctionalInterface protected interface AsyncMethod { void execute(Request request, RequestOptions options, ActionListener listener); } + @FunctionalInterface + protected interface AsyncMethodNoRequest { + void execute(RequestOptions options, ActionListener listener); + } + /** * Executes the provided request using either the sync method or its async variant, both provided as functions */ diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndexLifecycleIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndexLifecycleIT.java new file mode 100644 index 0000000000000..f2040bc88da34 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndexLifecycleIT.java @@ -0,0 +1,286 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.indexlifecycle.AllocateAction; +import org.elasticsearch.client.indexlifecycle.DeleteAction; +import org.elasticsearch.client.indexlifecycle.DeleteLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.ExplainLifecycleRequest; +import org.elasticsearch.client.indexlifecycle.ExplainLifecycleResponse; +import org.elasticsearch.client.indexlifecycle.ForceMergeAction; +import org.elasticsearch.client.indexlifecycle.GetLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.GetLifecyclePolicyResponse; +import org.elasticsearch.client.indexlifecycle.IndexLifecycleExplainResponse; +import org.elasticsearch.client.indexlifecycle.LifecycleAction; +import org.elasticsearch.client.indexlifecycle.LifecycleManagementStatusRequest; +import org.elasticsearch.client.indexlifecycle.LifecycleManagementStatusResponse; +import org.elasticsearch.client.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.client.indexlifecycle.LifecyclePolicyMetadata; +import org.elasticsearch.client.indexlifecycle.OperationMode; +import org.elasticsearch.client.indexlifecycle.Phase; +import org.elasticsearch.client.indexlifecycle.PhaseExecutionInfo; +import org.elasticsearch.client.indexlifecycle.PutLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.RetryLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.RemoveIndexLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.RemoveIndexLifecyclePolicyResponse; +import org.elasticsearch.client.indexlifecycle.RolloverAction; +import org.elasticsearch.client.indexlifecycle.ShrinkAction; +import org.elasticsearch.client.indexlifecycle.StartILMRequest; +import org.elasticsearch.client.indexlifecycle.StopILMRequest; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.hamcrest.Matchers; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.elasticsearch.client.indexlifecycle.LifecyclePolicyTests.createRandomPolicy; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasItems; +import static org.hamcrest.Matchers.is; + +public class IndexLifecycleIT extends ESRestHighLevelClientTestCase { + + public void testRemoveIndexLifecyclePolicy() throws Exception { + String policyName = randomAlphaOfLength(10); + LifecyclePolicy policy = createRandomPolicy(policyName); + PutLifecyclePolicyRequest putRequest = new PutLifecyclePolicyRequest(policy); + assertAcked(execute(putRequest, highLevelClient().indexLifecycle()::putLifecyclePolicy, + highLevelClient().indexLifecycle()::putLifecyclePolicyAsync)); + + createIndex("foo", Settings.builder().put("index.lifecycle.name", policyName).build()); + createIndex("baz", Settings.builder().put("index.lifecycle.name", policyName).build()); + createIndex("rbh", Settings.builder().put("index.lifecycle.name", policyName).build()); + + GetSettingsRequest getSettingsRequest = new GetSettingsRequest().indices("foo", "baz", "rbh"); + GetSettingsResponse settingsResponse = highLevelClient().indices().getSettings(getSettingsRequest, RequestOptions.DEFAULT); + assertThat(settingsResponse.getSetting("foo", "index.lifecycle.name"), equalTo(policyName)); + assertThat(settingsResponse.getSetting("baz", "index.lifecycle.name"), equalTo(policyName)); + assertThat(settingsResponse.getSetting("rbh", "index.lifecycle.name"), equalTo(policyName)); + + List indices = new ArrayList<>(); + indices.add("foo"); + indices.add("rbh"); + RemoveIndexLifecyclePolicyRequest removeReq = new RemoveIndexLifecyclePolicyRequest(indices); + RemoveIndexLifecyclePolicyResponse removeResp = execute(removeReq, highLevelClient().indexLifecycle()::removeIndexLifecyclePolicy, + highLevelClient().indexLifecycle()::removeIndexLifecyclePolicyAsync); + assertThat(removeResp.hasFailures(), is(false)); + assertThat(removeResp.getFailedIndexes().isEmpty(), is(true)); + + getSettingsRequest = new GetSettingsRequest().indices("foo", "baz", "rbh"); + settingsResponse = highLevelClient().indices().getSettings(getSettingsRequest, RequestOptions.DEFAULT); + assertNull(settingsResponse.getSetting("foo", "index.lifecycle.name")); + assertThat(settingsResponse.getSetting("baz", "index.lifecycle.name"), equalTo(policyName)); + assertNull(settingsResponse.getSetting("rbh", "index.lifecycle.name")); + } + + public void testStartStopILM() throws Exception { + String policyName = randomAlphaOfLength(10); + LifecyclePolicy policy = createRandomPolicy(policyName); + PutLifecyclePolicyRequest putRequest = new PutLifecyclePolicyRequest(policy); + assertAcked(execute(putRequest, highLevelClient().indexLifecycle()::putLifecyclePolicy, + highLevelClient().indexLifecycle()::putLifecyclePolicyAsync)); + + createIndex("foo", Settings.builder().put("index.lifecycle.name", "bar").build()); + createIndex("baz", Settings.builder().put("index.lifecycle.name", "eggplant").build()); + createIndex("squash", Settings.EMPTY); + + LifecycleManagementStatusRequest statusRequest = new LifecycleManagementStatusRequest(); + LifecycleManagementStatusResponse statusResponse = execute( + statusRequest, + highLevelClient().indexLifecycle()::lifecycleManagementStatus, + highLevelClient().indexLifecycle()::lifecycleManagementStatusAsync); + assertEquals(statusResponse.getOperationMode(), OperationMode.RUNNING); + + StopILMRequest stopReq = new StopILMRequest(); + AcknowledgedResponse stopResponse = execute(stopReq, highLevelClient().indexLifecycle()::stopILM, + highLevelClient().indexLifecycle()::stopILMAsync); + assertTrue(stopResponse.isAcknowledged()); + + + statusResponse = execute(statusRequest, highLevelClient().indexLifecycle()::lifecycleManagementStatus, + highLevelClient().indexLifecycle()::lifecycleManagementStatusAsync); + assertThat(statusResponse.getOperationMode(), + Matchers.anyOf(equalTo(OperationMode.STOPPING), + equalTo(OperationMode.STOPPED))); + + StartILMRequest startReq = new StartILMRequest(); + AcknowledgedResponse startResponse = execute(startReq, highLevelClient().indexLifecycle()::startILM, + highLevelClient().indexLifecycle()::startILMAsync); + assertTrue(startResponse.isAcknowledged()); + + statusResponse = execute(statusRequest, highLevelClient().indexLifecycle()::lifecycleManagementStatus, + highLevelClient().indexLifecycle()::lifecycleManagementStatusAsync); + assertEquals(statusResponse.getOperationMode(), OperationMode.RUNNING); + } + + public void testExplainLifecycle() throws Exception { + Map lifecyclePhases = new HashMap<>(); + Map hotActions = Collections.singletonMap( + RolloverAction.NAME, + new RolloverAction(null, TimeValue.timeValueHours(50 * 24), null)); + Phase hotPhase = new Phase("hot", randomFrom(TimeValue.ZERO, null), hotActions); + lifecyclePhases.put("hot", hotPhase); + + Map warmActions = new HashMap<>(); + warmActions.put(AllocateAction.NAME, new AllocateAction(null, null, null, Collections.singletonMap("_name", "node-1"))); + warmActions.put(ShrinkAction.NAME, new ShrinkAction(1)); + warmActions.put(ForceMergeAction.NAME, new ForceMergeAction(1000)); + lifecyclePhases.put("warm", new Phase("warm", TimeValue.timeValueSeconds(1000), warmActions)); + + Map coldActions = new HashMap<>(); + coldActions.put(AllocateAction.NAME, new AllocateAction(0, null, null, null)); + lifecyclePhases.put("cold", new Phase("cold", TimeValue.timeValueSeconds(2000), coldActions)); + + Map deleteActions = Collections.singletonMap(DeleteAction.NAME, new DeleteAction()); + lifecyclePhases.put("delete", new Phase("delete", TimeValue.timeValueSeconds(3000), deleteActions)); + + LifecyclePolicy policy = new LifecyclePolicy(randomAlphaOfLength(10), lifecyclePhases); + PutLifecyclePolicyRequest putRequest = new PutLifecyclePolicyRequest(policy); + AcknowledgedResponse putResponse = execute(putRequest, highLevelClient().indexLifecycle()::putLifecyclePolicy, + highLevelClient().indexLifecycle()::putLifecyclePolicyAsync); + assertTrue(putResponse.isAcknowledged()); + GetLifecyclePolicyRequest getRequest = new GetLifecyclePolicyRequest(policy.getName()); + GetLifecyclePolicyResponse getResponse = execute(getRequest, highLevelClient().indexLifecycle()::getLifecyclePolicy, + highLevelClient().indexLifecycle()::getLifecyclePolicyAsync); + long expectedPolicyModifiedDate = getResponse.getPolicies().get(policy.getName()).getModifiedDate(); + + + createIndex("foo-01", Settings.builder().put("index.lifecycle.name", policy.getName()) + .put("index.lifecycle.rollover_alias", "foo-alias").build(), "", "\"foo-alias\" : {}"); + + createIndex("baz-01", Settings.builder().put("index.lifecycle.name", policy.getName()) + .put("index.lifecycle.rollover_alias", "baz-alias").build(), "", "\"baz-alias\" : {}"); + + createIndex("squash", Settings.EMPTY); + + ExplainLifecycleRequest req = new ExplainLifecycleRequest(); + req.indices("foo-01", "baz-01", "squash"); + ExplainLifecycleResponse response = execute(req, highLevelClient().indexLifecycle()::explainLifecycle, + highLevelClient().indexLifecycle()::explainLifecycleAsync); + Map indexResponses = response.getIndexResponses(); + assertEquals(3, indexResponses.size()); + IndexLifecycleExplainResponse fooResponse = indexResponses.get("foo-01"); + assertNotNull(fooResponse); + assertTrue(fooResponse.managedByILM()); + assertEquals("foo-01", fooResponse.getIndex()); + assertEquals("hot", fooResponse.getPhase()); + assertEquals("rollover", fooResponse.getAction()); + assertEquals("attempt_rollover", fooResponse.getStep()); + assertEquals(new PhaseExecutionInfo(policy.getName(), new Phase("", hotPhase.getMinimumAge(), hotPhase.getActions()), + 1L, expectedPolicyModifiedDate), fooResponse.getPhaseExecutionInfo()); + IndexLifecycleExplainResponse bazResponse = indexResponses.get("baz-01"); + assertNotNull(bazResponse); + assertTrue(bazResponse.managedByILM()); + assertEquals("baz-01", bazResponse.getIndex()); + assertEquals("hot", bazResponse.getPhase()); + assertEquals("rollover", bazResponse.getAction()); + assertEquals("attempt_rollover", bazResponse.getStep()); + IndexLifecycleExplainResponse squashResponse = indexResponses.get("squash"); + assertNotNull(squashResponse); + assertFalse(squashResponse.managedByILM()); + assertEquals("squash", squashResponse.getIndex()); + } + + public void testDeleteLifecycle() throws IOException { + String policyName = randomAlphaOfLength(10); + LifecyclePolicy policy = createRandomPolicy(policyName); + PutLifecyclePolicyRequest putRequest = new PutLifecyclePolicyRequest(policy); + assertAcked(execute(putRequest, highLevelClient().indexLifecycle()::putLifecyclePolicy, + highLevelClient().indexLifecycle()::putLifecyclePolicyAsync)); + + DeleteLifecyclePolicyRequest deleteRequest = new DeleteLifecyclePolicyRequest(policy.getName()); + assertAcked(execute(deleteRequest, highLevelClient().indexLifecycle()::deleteLifecyclePolicy, + highLevelClient().indexLifecycle()::deleteLifecyclePolicyAsync)); + + GetLifecyclePolicyRequest getRequest = new GetLifecyclePolicyRequest(policyName); + ElasticsearchStatusException ex = expectThrows(ElasticsearchStatusException.class, + () -> execute(getRequest, highLevelClient().indexLifecycle()::getLifecyclePolicy, + highLevelClient().indexLifecycle()::getLifecyclePolicyAsync)); + assertEquals(404, ex.status().getStatus()); + } + + public void testPutLifecycle() throws IOException { + String name = randomAlphaOfLengthBetween(5, 20); + LifecyclePolicy policy = createRandomPolicy(name); + PutLifecyclePolicyRequest putRequest = new PutLifecyclePolicyRequest(policy); + + assertAcked(execute(putRequest, highLevelClient().indexLifecycle()::putLifecyclePolicy, + highLevelClient().indexLifecycle()::putLifecyclePolicyAsync)); + + GetLifecyclePolicyRequest getRequest = new GetLifecyclePolicyRequest(name); + GetLifecyclePolicyResponse response = execute(getRequest, highLevelClient().indexLifecycle()::getLifecyclePolicy, + highLevelClient().indexLifecycle()::getLifecyclePolicyAsync); + assertEquals(policy, response.getPolicies().get(name).getPolicy()); + } + + public void testGetMultipleLifecyclePolicies() throws IOException { + int numPolicies = randomIntBetween(1, 10); + String[] policyNames = new String[numPolicies]; + LifecyclePolicy[] policies = new LifecyclePolicy[numPolicies]; + for (int i = 0; i < numPolicies; i++) { + policyNames[i] = "policy-" + randomAlphaOfLengthBetween(5, 10); + policies[i] = createRandomPolicy(policyNames[i]); + PutLifecyclePolicyRequest putRequest = new PutLifecyclePolicyRequest(policies[i]); + assertAcked(execute(putRequest, highLevelClient().indexLifecycle()::putLifecyclePolicy, + highLevelClient().indexLifecycle()::putLifecyclePolicyAsync)); + } + + GetLifecyclePolicyRequest getRequest = new GetLifecyclePolicyRequest(randomFrom(policyNames, null)); + GetLifecyclePolicyResponse response = execute(getRequest, highLevelClient().indexLifecycle()::getLifecyclePolicy, + highLevelClient().indexLifecycle()::getLifecyclePolicyAsync); + List retrievedPolicies = Arrays.stream(response.getPolicies().values().toArray()) + .map(p -> ((LifecyclePolicyMetadata) p).getPolicy()).collect(Collectors.toList()); + assertThat(retrievedPolicies, hasItems(policies)); + } + + public void testRetryLifecycleStep() throws IOException { + String policyName = randomAlphaOfLength(10); + LifecyclePolicy policy = createRandomPolicy(policyName); + PutLifecyclePolicyRequest putRequest = new PutLifecyclePolicyRequest(policy); + assertAcked(execute(putRequest, highLevelClient().indexLifecycle()::putLifecyclePolicy, + highLevelClient().indexLifecycle()::putLifecyclePolicyAsync)); + createIndex("retry", Settings.builder().put("index.lifecycle.name", policy.getName()).build()); + RetryLifecyclePolicyRequest retryRequest = new RetryLifecyclePolicyRequest("retry"); + ElasticsearchStatusException ex = expectThrows(ElasticsearchStatusException.class, + () -> execute( + retryRequest, highLevelClient().indexLifecycle()::retryLifecycleStep, + highLevelClient().indexLifecycle()::retryLifecycleStepAsync + ) + ); + assertEquals(400, ex.status().getStatus()); + assertEquals( + "Elasticsearch exception [type=illegal_argument_exception, reason=cannot retry an action for an index [retry]" + + " that has not encountered an error when running a Lifecycle Policy]", + ex.getRootCause().getMessage() + ); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndexLifecycleRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndexLifecycleRequestConvertersTests.java new file mode 100644 index 0000000000000..1af29701bc7c8 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndexLifecycleRequestConvertersTests.java @@ -0,0 +1,171 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpDelete; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.indexlifecycle.DeleteLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.ExplainLifecycleRequest; +import org.elasticsearch.client.indexlifecycle.GetLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.LifecycleManagementStatusRequest; +import org.elasticsearch.client.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.client.indexlifecycle.PutLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.RemoveIndexLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.RetryLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.StartILMRequest; +import org.elasticsearch.client.indexlifecycle.StopILMRequest; +import org.elasticsearch.common.Strings; +import org.elasticsearch.test.ESTestCase; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.client.RequestConvertersTests.randomIndicesNames; +import static org.elasticsearch.client.RequestConvertersTests.setRandomIndicesOptions; +import static org.elasticsearch.client.RequestConvertersTests.setRandomMasterTimeout; +import static org.elasticsearch.client.RequestConvertersTests.setRandomTimeoutTimeValue; +import static org.elasticsearch.client.indexlifecycle.LifecyclePolicyTests.createRandomPolicy; +import static org.hamcrest.CoreMatchers.equalTo; + +public class IndexLifecycleRequestConvertersTests extends ESTestCase { + + public void testGetLifecyclePolicy() { + String[] policies = rarely() ? null : randomIndicesNames(0, 10); + GetLifecyclePolicyRequest req = new GetLifecyclePolicyRequest(policies); + Map expectedParams = new HashMap<>(); + setRandomMasterTimeout(req::setMasterTimeout, TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT, expectedParams); + setRandomTimeoutTimeValue(req::setTimeout, TimedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + + Request request = IndexLifecycleRequestConverters.getLifecyclePolicy(req); + assertEquals(request.getMethod(), HttpGet.METHOD_NAME); + String policiesStr = Strings.arrayToCommaDelimitedString(policies); + assertEquals(request.getEndpoint(), "/_ilm/policy" + (policiesStr.isEmpty() ? "" : ("/" + policiesStr))); + assertEquals(request.getParameters(), expectedParams); + } + + public void testPutLifecyclePolicy() throws Exception { + String name = randomAlphaOfLengthBetween(2, 20); + LifecyclePolicy policy = createRandomPolicy(name); + PutLifecyclePolicyRequest req = new PutLifecyclePolicyRequest(policy); + Map expectedParams = new HashMap<>(); + setRandomMasterTimeout(req::setMasterTimeout, TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT, expectedParams); + setRandomTimeoutTimeValue(req::setTimeout, TimedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + + Request request = IndexLifecycleRequestConverters.putLifecyclePolicy(req); + assertEquals(HttpPut.METHOD_NAME, request.getMethod()); + assertEquals("/_ilm/policy/" + name, request.getEndpoint()); + assertEquals(expectedParams, request.getParameters()); + } + + public void testDeleteLifecycle() { + String lifecycleName = randomAlphaOfLengthBetween(2,20); + DeleteLifecyclePolicyRequest req = new DeleteLifecyclePolicyRequest(lifecycleName); + Map expectedParams = new HashMap<>(); + setRandomMasterTimeout(req::setMasterTimeout, TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT, expectedParams); + setRandomTimeoutTimeValue(req::setTimeout, TimedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + + Request request = IndexLifecycleRequestConverters.deleteLifecyclePolicy(req); + assertEquals(request.getMethod(), HttpDelete.METHOD_NAME); + assertEquals(request.getEndpoint(), "/_ilm/policy/" + lifecycleName); + assertEquals(request.getParameters(), expectedParams); + } + + public void testRemoveIndexLifecyclePolicy() { + Map expectedParams = new HashMap<>(); + String[] indices = randomIndicesNames(0, 10); + IndicesOptions indicesOptions = setRandomIndicesOptions(IndicesOptions.strictExpandOpen(), expectedParams); + RemoveIndexLifecyclePolicyRequest req = new RemoveIndexLifecyclePolicyRequest(Arrays.asList(indices), indicesOptions); + setRandomMasterTimeout(req::setMasterTimeout, TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT, expectedParams); + + Request request = IndexLifecycleRequestConverters.removeIndexLifecyclePolicy(req); + assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); + String idxString = Strings.arrayToCommaDelimitedString(indices); + assertThat(request.getEndpoint(), equalTo("/" + (idxString.isEmpty() ? "" : (idxString + "/")) + "_ilm/remove")); + assertThat(request.getParameters(), equalTo(expectedParams)); + } + + public void testStartILM() throws Exception { + StartILMRequest req = new StartILMRequest(); + Map expectedParams = new HashMap<>(); + setRandomMasterTimeout(req::setMasterTimeout, TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT, expectedParams); + setRandomTimeoutTimeValue(req::setTimeout, TimedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + + Request request = IndexLifecycleRequestConverters.startILM(req); + assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); + assertThat(request.getEndpoint(), equalTo("/_ilm/start")); + assertThat(request.getParameters(), equalTo(expectedParams)); + } + + public void testStopILM() throws Exception { + StopILMRequest req = new StopILMRequest(); + Map expectedParams = new HashMap<>(); + setRandomMasterTimeout(req::setMasterTimeout, TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT, expectedParams); + setRandomTimeoutTimeValue(req::setTimeout, TimedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + + Request request = IndexLifecycleRequestConverters.stopILM(req); + assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); + assertThat(request.getEndpoint(), equalTo("/_ilm/stop")); + assertThat(request.getParameters(), equalTo(expectedParams)); + } + + public void testLifecycleManagementStatus() throws Exception { + LifecycleManagementStatusRequest req = new LifecycleManagementStatusRequest(); + Map expectedParams = new HashMap<>(); + setRandomMasterTimeout(req::setMasterTimeout, TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT, expectedParams); + setRandomTimeoutTimeValue(req::setTimeout, TimedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + + Request request = IndexLifecycleRequestConverters.lifecycleManagementStatus(req); + assertThat(request.getMethod(), equalTo(HttpGet.METHOD_NAME)); + assertThat(request.getEndpoint(), equalTo("/_ilm/status")); + assertThat(request.getParameters(), equalTo(expectedParams)); + } + + public void testExplainLifecycle() throws Exception { + ExplainLifecycleRequest req = new ExplainLifecycleRequest(); + String[] indices = rarely() ? null : randomIndicesNames(0, 10); + req.indices(indices); + Map expectedParams = new HashMap<>(); + setRandomMasterTimeout(req, expectedParams); + setRandomIndicesOptions(req::indicesOptions, req::indicesOptions, expectedParams); + + Request request = IndexLifecycleRequestConverters.explainLifecycle(req); + assertThat(request.getMethod(), equalTo(HttpGet.METHOD_NAME)); + String idxString = Strings.arrayToCommaDelimitedString(indices); + assertThat(request.getEndpoint(), equalTo("/" + (idxString.isEmpty() ? "" : (idxString + "/")) + "_ilm/explain")); + assertThat(request.getParameters(), equalTo(expectedParams)); + } + + public void testRetryLifecycle() throws Exception { + String[] indices = randomIndicesNames(1, 10); + RetryLifecyclePolicyRequest req = new RetryLifecyclePolicyRequest(indices); + Map expectedParams = new HashMap<>(); + setRandomMasterTimeout(req::setMasterTimeout, TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT, expectedParams); + setRandomTimeoutTimeValue(req::setTimeout, TimedRequest.DEFAULT_ACK_TIMEOUT, expectedParams); + Request request = IndexLifecycleRequestConverters.retryLifecycle(req); + assertThat(request.getMethod(), equalTo(HttpPost.METHOD_NAME)); + String idxString = Strings.arrayToCommaDelimitedString(indices); + assertThat(request.getEndpoint(), equalTo("/" + (idxString.isEmpty() ? "" : (idxString + "/")) + "_ilm/retry")); + assertThat(request.getParameters(), equalTo(expectedParams)); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index 2c421597c68d0..927183ba7715f 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -534,6 +534,9 @@ public void testUpdateAliases() throws IOException { IndicesAliasesRequest aliasesAddRequest = new IndicesAliasesRequest(); AliasActions addAction = new AliasActions(AliasActions.Type.ADD).index(index).aliases(alias); + if (randomBoolean()) { + addAction.writeIndex(randomBoolean()); + } addAction.routing("routing").searchRouting("search_routing").filter("{\"term\":{\"year\":2016}}"); aliasesAddRequest.addAliasAction(addAction); AcknowledgedResponse aliasesAddResponse = execute(aliasesAddRequest, highLevelClient().indices()::updateAliases, @@ -548,6 +551,8 @@ public void testUpdateAliases() throws IOException { Map filter = (Map) getAlias.get("filter"); Map term = (Map) filter.get("term"); assertEquals(2016, term.get("year")); + Boolean isWriteIndex = (Boolean) getAlias.get("is_write_index"); + assertThat(isWriteIndex, equalTo(addAction.writeIndex())); String alias2 = "alias2"; IndicesAliasesRequest aliasesAddRemoveRequest = new IndicesAliasesRequest(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java index d4fdfb2c995df..bb0dbf6680127 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java @@ -45,6 +45,7 @@ import org.elasticsearch.client.ml.PreviewDatafeedRequest; import org.elasticsearch.client.ml.PutCalendarRequest; import org.elasticsearch.client.ml.PutDatafeedRequest; +import org.elasticsearch.client.ml.PutFilterRequest; import org.elasticsearch.client.ml.PutJobRequest; import org.elasticsearch.client.ml.StartDatafeedRequest; import org.elasticsearch.client.ml.StartDatafeedRequestTests; @@ -59,6 +60,8 @@ import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.client.ml.job.config.JobUpdate; import org.elasticsearch.client.ml.job.config.JobUpdateTests; +import org.elasticsearch.client.ml.job.config.MlFilter; +import org.elasticsearch.client.ml.job.config.MlFilterTests; import org.elasticsearch.client.ml.job.util.PageParams; import org.elasticsearch.common.Strings; import org.elasticsearch.common.unit.TimeValue; @@ -511,6 +514,20 @@ public void testDeleteCalendar() { assertEquals("/_xpack/ml/calendars/" + deleteCalendarRequest.getCalendarId(), request.getEndpoint()); } + public void testPutFilter() throws IOException { + MlFilter filter = MlFilterTests.createRandom("foo"); + PutFilterRequest putFilterRequest = new PutFilterRequest(filter); + + Request request = MLRequestConverters.putFilter(putFilterRequest); + + assertEquals(HttpPut.METHOD_NAME, request.getMethod()); + assertThat(request.getEndpoint(), equalTo("/_xpack/ml/filters/foo")); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) { + MlFilter parsedFilter = MlFilter.PARSER.apply(parser, null).build(); + assertThat(parsedFilter, equalTo(filter)); + } + } + private static Job createValidJob(String jobId) { AnalysisConfig.Builder analysisConfig = AnalysisConfig.builder(Collections.singletonList( Detector.builder().setFunction("count").build())); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java index 951e6209d6b8c..ff3218795e435 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java @@ -58,6 +58,8 @@ import org.elasticsearch.client.ml.PutCalendarResponse; import org.elasticsearch.client.ml.PutDatafeedRequest; import org.elasticsearch.client.ml.PutDatafeedResponse; +import org.elasticsearch.client.ml.PutFilterRequest; +import org.elasticsearch.client.ml.PutFilterResponse; import org.elasticsearch.client.ml.PutJobRequest; import org.elasticsearch.client.ml.PutJobResponse; import org.elasticsearch.client.ml.StartDatafeedRequest; @@ -78,6 +80,7 @@ import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.client.ml.job.config.JobState; import org.elasticsearch.client.ml.job.config.JobUpdate; +import org.elasticsearch.client.ml.job.config.MlFilter; import org.elasticsearch.client.ml.job.stats.JobStats; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; @@ -859,6 +862,22 @@ public void testDeleteCalendar() throws IOException { assertThat(exception.status().getStatus(), equalTo(404)); } + public void testFilterJob() throws Exception { + String filterId = "filter-job-test"; + MlFilter mlFilter = MlFilter.builder(filterId) + .setDescription(randomAlphaOfLength(10)) + .setItems(generateRandomStringArray(10, 10, false, false)) + .build(); + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + + PutFilterResponse putFilterResponse = execute(new PutFilterRequest(mlFilter), + machineLearningClient::putFilter, + machineLearningClient::putFilterAsync); + MlFilter createdFilter = putFilterResponse.getResponse(); + + assertThat(createdFilter, equalTo(mlFilter)); + } + public static String randomValidJobId() { CodepointSetGenerator generator = new CodepointSetGenerator("abcdefghijklmnopqrstuvwxyz0123456789".toCharArray()); return generator.ofCodePointsLength(random(), 10, 10); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RankEvalIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RankEvalIT.java index e6fdeffa142e9..06ae5e2090c65 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RankEvalIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RankEvalIT.java @@ -109,9 +109,8 @@ public void testRankEvalRequest() throws IOException { // now try this when test2 is closed client().performRequest(new Request("POST", "index2/_close")); - rankEvalRequest.indicesOptions(IndicesOptions.fromParameters(null, "true", null, SearchRequest.DEFAULT_INDICES_OPTIONS)); - response = execute(rankEvalRequest, highLevelClient()::rankEval, highLevelClient()::rankEvalAsync, - highLevelClient()::rankEval, highLevelClient()::rankEvalAsync); + rankEvalRequest.indicesOptions(IndicesOptions.fromParameters(null, "true", null, "false", SearchRequest.DEFAULT_INDICES_OPTIONS)); + response = execute(rankEvalRequest, highLevelClient()::rankEval, highLevelClient()::rankEvalAsync); } private static List createTestEvaluationSpec() { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java index 32b07316f6a9e..84aabb97ae798 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java @@ -56,6 +56,7 @@ import org.elasticsearch.client.core.TermVectorsRequest; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.client.RequestConverters.EndpointBuilder; +import org.elasticsearch.client.core.CountRequest; import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; @@ -980,6 +981,72 @@ public void testSearchNullIndicesAndTypes() { expectThrows(NullPointerException.class, () -> new SearchRequest().types((String[]) null)); } + public void testCountNotNullSource() throws IOException { + //as we create SearchSourceBuilder in CountRequest constructor + CountRequest countRequest = new CountRequest(); + Request request = RequestConverters.count(countRequest); + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); + assertEquals("/_count", request.getEndpoint()); + assertNotNull(request.getEntity()); + } + + public void testCount() throws Exception { + String[] indices = randomIndicesNames(0, 5); + CountRequest countRequest = new CountRequest(indices); + + int numTypes = randomIntBetween(0, 5); + String[] types = new String[numTypes]; + for (int i = 0; i < numTypes; i++) { + types[i] = "type-" + randomAlphaOfLengthBetween(2, 5); + } + countRequest.types(types); + + Map expectedParams = new HashMap<>(); + setRandomCountParams(countRequest, expectedParams); + setRandomIndicesOptions(countRequest::indicesOptions, countRequest::indicesOptions, expectedParams); + + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + if (frequently()) { + if (randomBoolean()) { + searchSourceBuilder.minScore(randomFloat()); + } + } + countRequest.source(searchSourceBuilder); + Request request = RequestConverters.count(countRequest); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + String index = String.join(",", indices); + if (Strings.hasLength(index)) { + endpoint.add(index); + } + String type = String.join(",", types); + if (Strings.hasLength(type)) { + endpoint.add(type); + } + endpoint.add("_count"); + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); + assertEquals(endpoint.toString(), request.getEndpoint()); + assertEquals(expectedParams, request.getParameters()); + assertToXContentBody(searchSourceBuilder, request.getEntity()); + } + + public void testCountNullIndicesAndTypes() { + expectThrows(NullPointerException.class, () -> new CountRequest((String[]) null)); + expectThrows(NullPointerException.class, () -> new CountRequest().indices((String[]) null)); + expectThrows(NullPointerException.class, () -> new CountRequest().types((String[]) null)); + } + + private static void setRandomCountParams(CountRequest countRequest, + Map expectedParams) { + if (randomBoolean()) { + countRequest.routing(randomAlphaOfLengthBetween(3, 10)); + expectedParams.put("routing", countRequest.routing()); + } + if (randomBoolean()) { + countRequest.preference(randomAlphaOfLengthBetween(3, 10)); + expectedParams.put("preference", countRequest.preference()); + } + } + public void testMultiSearch() throws IOException { int numberOfSearchRequests = randomIntBetween(0, 32); MultiSearchRequest multiSearchRequest = new MultiSearchRequest(); @@ -1000,7 +1067,8 @@ public void testMultiSearch() throws IOException { IndicesOptions msearchDefault = new MultiSearchRequest().indicesOptions(); searchRequest.indicesOptions(IndicesOptions.fromOptions(randomlyGenerated.ignoreUnavailable(), randomlyGenerated.allowNoIndices(), randomlyGenerated.expandWildcardsOpen(), randomlyGenerated.expandWildcardsClosed(), - msearchDefault.allowAliasesToMultipleIndices(), msearchDefault.forbidClosedIndices(), msearchDefault.ignoreAliases())); + msearchDefault.allowAliasesToMultipleIndices(), msearchDefault.forbidClosedIndices(), msearchDefault.ignoreAliases(), + msearchDefault.ignoreThrottled())); multiSearchRequest.add(searchRequest); } @@ -1610,6 +1678,24 @@ static void setRandomIndicesOptions(Consumer setter, Supplier expectedParams) { + if (randomBoolean()) { + indicesOptions = IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()); + } + expectedParams.put("ignore_unavailable", Boolean.toString(indicesOptions.ignoreUnavailable())); + expectedParams.put("allow_no_indices", Boolean.toString(indicesOptions.allowNoIndices())); + if (indicesOptions.expandWildcardsOpen() && indicesOptions.expandWildcardsClosed()) { + expectedParams.put("expand_wildcards", "open,closed"); + } else if (indicesOptions.expandWildcardsOpen()) { + expectedParams.put("expand_wildcards", "open"); + } else if (indicesOptions.expandWildcardsClosed()) { + expectedParams.put("expand_wildcards", "closed"); + } else { + expectedParams.put("expand_wildcards", "none"); + } + return indicesOptions; + } + static void setRandomIncludeDefaults(GetIndexRequest request, Map expectedParams) { if (randomBoolean()) { boolean includeDefaults = randomBoolean(); @@ -1660,6 +1746,17 @@ static void setRandomTimeout(Consumer setter, TimeValue defaultTimeout, } } + static void setRandomTimeoutTimeValue(Consumer setter, TimeValue defaultTimeout, + Map expectedParams) { + if (randomBoolean()) { + TimeValue timeout = TimeValue.parseTimeValue(randomTimeValue(), "random_timeout"); + setter.accept(timeout); + expectedParams.put("timeout", timeout.getStringRep()); + } else { + expectedParams.put("timeout", defaultTimeout.getStringRep()); + } + } + static void setRandomMasterTimeout(MasterNodeRequest request, Map expectedParams) { setRandomMasterTimeout(request::masterNodeTimeout, expectedParams); } @@ -1680,6 +1777,16 @@ static void setRandomMasterTimeout(Consumer setter, Map } } + static void setRandomMasterTimeout(Consumer setter, TimeValue defaultTimeout, Map expectedParams) { + if (randomBoolean()) { + TimeValue masterTimeout = TimeValue.parseTimeValue(randomTimeValue(), "random_master_timeout"); + setter.accept(masterTimeout); + expectedParams.put("master_timeout", masterTimeout.getStringRep()); + } else { + expectedParams.put("master_timeout", defaultTimeout.getStringRep()); + } + } + static void setRandomWaitForActiveShards(Consumer setter, Map expectedParams) { setRandomWaitForActiveShards(setter, ActiveShardCount.DEFAULT, expectedParams); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java index b66a96f786597..4adec512b5d8f 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.client; import com.fasterxml.jackson.core.JsonParseException; - import org.apache.http.Header; import org.apache.http.HttpEntity; import org.apache.http.HttpHost; @@ -50,6 +49,13 @@ import org.elasticsearch.action.search.SearchResponseSections; import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.client.indexlifecycle.AllocateAction; +import org.elasticsearch.client.indexlifecycle.DeleteAction; +import org.elasticsearch.client.indexlifecycle.ForceMergeAction; +import org.elasticsearch.client.indexlifecycle.LifecycleAction; +import org.elasticsearch.client.indexlifecycle.ReadOnlyAction; +import org.elasticsearch.client.indexlifecycle.RolloverAction; +import org.elasticsearch.client.indexlifecycle.ShrinkAction; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.bytes.BytesReference; @@ -619,7 +625,7 @@ public void testDefaultNamedXContents() { public void testProvidedNamedXContents() { List namedXContents = RestHighLevelClient.getProvidedNamedXContents(); - assertEquals(10, namedXContents.size()); + assertEquals(16, namedXContents.size()); Map, Integer> categories = new HashMap<>(); List names = new ArrayList<>(); for (NamedXContentRegistry.Entry namedXContent : namedXContents) { @@ -629,7 +635,7 @@ public void testProvidedNamedXContents() { categories.put(namedXContent.categoryClass, counter + 1); } } - assertEquals(3, categories.size()); + assertEquals(4, categories.size()); assertEquals(Integer.valueOf(2), categories.get(Aggregation.class)); assertTrue(names.contains(ChildrenAggregationBuilder.NAME)); assertTrue(names.contains(MatrixStatsAggregationBuilder.NAME)); @@ -643,6 +649,13 @@ public void testProvidedNamedXContents() { assertTrue(names.contains(MeanReciprocalRank.NAME)); assertTrue(names.contains(DiscountedCumulativeGain.NAME)); assertTrue(names.contains(ExpectedReciprocalRank.NAME)); + assertEquals(Integer.valueOf(6), categories.get(LifecycleAction.class)); + assertTrue(names.contains(AllocateAction.NAME)); + assertTrue(names.contains(DeleteAction.NAME)); + assertTrue(names.contains(ForceMergeAction.NAME)); + assertTrue(names.contains(ReadOnlyAction.NAME)); + assertTrue(names.contains(RolloverAction.NAME)); + assertTrue(names.contains(ShrinkAction.NAME)); } public void testMethodWithHeadersArgumentAreDeprecated() { @@ -664,7 +677,6 @@ public void testApiNamingConventions() throws Exception { //this list should be empty once the high-level client is feature complete String[] notYetSupportedApi = new String[]{ "cluster.remote_info", - "count", "create", "get_source", "indices.delete_alias", @@ -737,7 +749,7 @@ public void testApiNamingConventions() throws Exception { methods.containsKey(apiName.substring(0, apiName.length() - 6))); assertThat("async method [" + method + "] should return void", method.getReturnType(), equalTo(Void.TYPE)); assertEquals("async method [" + method + "] should not throw any exceptions", 0, method.getExceptionTypes().length); - if (apiName.equals("security.get_ssl_certificates_async")) { + if (apiName.equals("security.authenticate_async") || apiName.equals("security.get_ssl_certificates_async")) { assertEquals(2, method.getParameterTypes().length); assertThat(method.getParameterTypes()[0], equalTo(RequestOptions.class)); assertThat(method.getParameterTypes()[1], equalTo(ActionListener.class)); @@ -762,7 +774,8 @@ public void testApiNamingConventions() throws Exception { assertEquals("incorrect number of exceptions for method [" + method + "]", 1, method.getExceptionTypes().length); //a few methods don't accept a request object as argument - if (apiName.equals("ping") || apiName.equals("info") || apiName.equals("security.get_ssl_certificates")) { + if (apiName.equals("ping") || apiName.equals("info") || apiName.equals("security.get_ssl_certificates") + || apiName.equals("security.authenticate")) { assertEquals("incorrect number of arguments for method [" + method + "]", 1, method.getParameterTypes().length); assertThat("the parameter to method [" + method + "] is the wrong type", method.getParameterTypes()[0], equalTo(RequestOptions.class)); @@ -788,7 +801,8 @@ public void testApiNamingConventions() throws Exception { apiName.startsWith("watcher.") == false && apiName.startsWith("graph.") == false && apiName.startsWith("migration.") == false && - apiName.startsWith("security.") == false) { + apiName.startsWith("security.") == false && + apiName.startsWith("index_lifecycle.") == false) { apiNotFound.add(apiName); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupIT.java index ed992a95fe1bd..6b790659a6f8a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RollupIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.client; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; +import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.bulk.BulkItemResponse; @@ -34,6 +35,8 @@ import org.elasticsearch.client.rollup.GetRollupJobResponse; import org.elasticsearch.client.rollup.GetRollupJobResponse.IndexerState; import org.elasticsearch.client.rollup.GetRollupJobResponse.JobWrapper; +import org.elasticsearch.client.rollup.DeleteRollupJobRequest; +import org.elasticsearch.client.rollup.DeleteRollupJobResponse; import org.elasticsearch.client.rollup.PutRollupJobRequest; import org.elasticsearch.client.rollup.PutRollupJobResponse; import org.elasticsearch.client.rollup.RollableIndexCaps; @@ -53,6 +56,7 @@ import org.elasticsearch.search.aggregations.metrics.min.MinAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.sum.SumAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCountAggregationBuilder; +import org.junit.Before; import java.util.Arrays; import java.util.Collections; @@ -69,18 +73,35 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThan; public class RollupIT extends ESRestHighLevelClientTestCase { + double sum = 0.0d; + int max = Integer.MIN_VALUE; + int min = Integer.MAX_VALUE; private static final List SUPPORTED_METRICS = Arrays.asList(MaxAggregationBuilder.NAME, MinAggregationBuilder.NAME, - SumAggregationBuilder.NAME, AvgAggregationBuilder.NAME, ValueCountAggregationBuilder.NAME); - - public void testPutStartAndGetRollupJob() throws Exception { - double sum = 0.0d; - int max = Integer.MIN_VALUE; - int min = Integer.MAX_VALUE; + SumAggregationBuilder.NAME, AvgAggregationBuilder.NAME, ValueCountAggregationBuilder.NAME); + + private String id; + private String indexPattern; + private String rollupIndex; + private String cron; + private int pageSize; + private int numDocs; + + @Before + public void init() throws Exception { + id = randomAlphaOfLength(10); + indexPattern = randomFrom("docs", "d*", "doc*"); + rollupIndex = randomFrom("rollup", "test"); + cron = "*/1 * * * * ?"; + numDocs = indexDocs(); + pageSize = randomIntBetween(numDocs, numDocs * 10); + } + public int indexDocs() throws Exception { final BulkRequest bulkRequest = new BulkRequest(); bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); for (int minute = 0; minute < 60; minute++) { @@ -120,12 +141,33 @@ public void testPutStartAndGetRollupJob() throws Exception { RefreshResponse refreshResponse = highLevelClient().indices().refresh(new RefreshRequest("docs"), RequestOptions.DEFAULT); assertEquals(0, refreshResponse.getFailedShards()); + return numDocs; + } - final String id = randomAlphaOfLength(10); - final String indexPattern = randomFrom("docs", "d*", "doc*"); - final String rollupIndex = randomFrom("rollup", "test"); - final String cron = "*/1 * * * * ?"; - final int pageSize = randomIntBetween(numDocs, numDocs * 10); + + public void testDeleteRollupJob() throws Exception { + final GroupConfig groups = new GroupConfig(new DateHistogramGroupConfig("date", DateHistogramInterval.DAY)); + final List metrics = Collections.singletonList(new MetricConfig("value", SUPPORTED_METRICS)); + final TimeValue timeout = TimeValue.timeValueSeconds(randomIntBetween(30, 600)); + PutRollupJobRequest putRollupJobRequest = + new PutRollupJobRequest(new RollupJobConfig(id, indexPattern, rollupIndex, cron, pageSize, groups, metrics, timeout)); + final RollupClient rollupClient = highLevelClient().rollup(); + PutRollupJobResponse response = execute(putRollupJobRequest, rollupClient::putRollupJob, rollupClient::putRollupJobAsync); + DeleteRollupJobRequest deleteRollupJobRequest = new DeleteRollupJobRequest(id); + DeleteRollupJobResponse deleteRollupJobResponse = highLevelClient().rollup() + .deleteRollupJob(deleteRollupJobRequest, RequestOptions.DEFAULT); + assertTrue(deleteRollupJobResponse.isAcknowledged()); + } + + public void testDeleteMissingRollupJob() { + DeleteRollupJobRequest deleteRollupJobRequest = new DeleteRollupJobRequest(randomAlphaOfLength(10)); + ElasticsearchStatusException responseException = expectThrows(ElasticsearchStatusException.class,() -> highLevelClient().rollup() + .deleteRollupJob(deleteRollupJobRequest, RequestOptions.DEFAULT)); + assertThat(responseException.status().getStatus(), is(404)); + } + + @SuppressWarnings("unchecked") + public void testPutAndGetRollupJob() throws Exception { // TODO expand this to also test with histogram and terms? final GroupConfig groups = new GroupConfig(new DateHistogramGroupConfig("date", DateHistogramInterval.DAY)); final List metrics = Collections.singletonList(new MetricConfig("value", SUPPORTED_METRICS)); @@ -142,9 +184,6 @@ public void testPutStartAndGetRollupJob() throws Exception { StartRollupJobResponse startResponse = execute(startRequest, rollupClient::startRollupJob, rollupClient::startRollupJobAsync); assertTrue(startResponse.isAcknowledged()); - int finalMin = min; - int finalMax = max; - double finalSum = sum; assertBusy(() -> { SearchResponse searchResponse = highLevelClient().search(new SearchRequest(rollupIndex), RequestOptions.DEFAULT); assertEquals(0, searchResponse.getFailedShards()); @@ -162,13 +201,13 @@ public void testPutStartAndGetRollupJob() throws Exception { for (String name : metric.getMetrics()) { Number value = (Number) source.get(metric.getField() + "." + name + ".value"); if ("min".equals(name)) { - assertEquals(finalMin, value.intValue()); + assertEquals(min, value.intValue()); } else if ("max".equals(name)) { - assertEquals(finalMax, value.intValue()); + assertEquals(max, value.intValue()); } else if ("sum".equals(name)) { - assertEquals(finalSum, value.doubleValue(), 0.0d); + assertEquals(sum, value.doubleValue(), 0.0d); } else if ("avg".equals(name)) { - assertEquals(finalSum, value.doubleValue(), 0.0d); + assertEquals(sum, value.doubleValue(), 0.0d); Number avgCount = (Number) source.get(metric.getField() + "." + name + "._count"); assertEquals(numDocs, avgCount.intValue()); } else if ("value_count".equals(name)) { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java index 2458cb9c1f20b..8468636d5cdb8 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java @@ -35,6 +35,8 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchScrollRequest; +import org.elasticsearch.client.core.CountRequest; +import org.elasticsearch.client.core.CountResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.unit.TimeValue; @@ -1249,4 +1251,69 @@ private static void assertSearchHeader(SearchResponse searchResponse) { assertEquals(0, searchResponse.getShardFailures().length); assertEquals(SearchResponse.Clusters.EMPTY, searchResponse.getClusters()); } + + public void testCountOneIndexNoQuery() throws IOException { + CountRequest countRequest = new CountRequest("index"); + CountResponse countResponse = execute(countRequest, highLevelClient()::count, highLevelClient()::countAsync); + assertCountHeader(countResponse); + assertEquals(5, countResponse.getCount()); + } + + public void testCountMultipleIndicesNoQuery() throws IOException { + CountRequest countRequest = new CountRequest("index", "index1"); + CountResponse countResponse = execute(countRequest, highLevelClient()::count, highLevelClient()::countAsync); + assertCountHeader(countResponse); + assertEquals(7, countResponse.getCount()); + } + + public void testCountAllIndicesNoQuery() throws IOException { + CountRequest countRequest = new CountRequest(); + CountResponse countResponse = execute(countRequest, highLevelClient()::count, highLevelClient()::countAsync); + assertCountHeader(countResponse); + assertEquals(12, countResponse.getCount()); + } + + public void testCountOneIndexMatchQuery() throws IOException { + CountRequest countRequest = new CountRequest("index"); + countRequest.source(new SearchSourceBuilder().query(new MatchQueryBuilder("num", 10))); + CountResponse countResponse = execute(countRequest, highLevelClient()::count, highLevelClient()::countAsync); + assertCountHeader(countResponse); + assertEquals(1, countResponse.getCount()); + } + + public void testCountMultipleIndicesMatchQueryUsingConstructor() throws IOException { + + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(new MatchQueryBuilder("field", "value1")); + CountRequest countRequest = new CountRequest(new String[]{"index1", "index2", "index3"}, sourceBuilder); + CountResponse countResponse = execute(countRequest, highLevelClient()::count, highLevelClient()::countAsync); + assertCountHeader(countResponse); + assertEquals(3, countResponse.getCount()); + + } + + public void testCountMultipleIndicesMatchQuery() throws IOException { + + CountRequest countRequest = new CountRequest("index1", "index2", "index3"); + countRequest.source(new SearchSourceBuilder().query(new MatchQueryBuilder("field", "value1"))); + CountResponse countResponse = execute(countRequest, highLevelClient()::count, highLevelClient()::countAsync); + assertCountHeader(countResponse); + assertEquals(3, countResponse.getCount()); + } + + public void testCountAllIndicesMatchQuery() throws IOException { + + CountRequest countRequest = new CountRequest(); + countRequest.source(new SearchSourceBuilder().query(new MatchQueryBuilder("field", "value1"))); + CountResponse countResponse = execute(countRequest, highLevelClient()::count, highLevelClient()::countAsync); + assertCountHeader(countResponse); + assertEquals(3, countResponse.getCount()); + } + + private static void assertCountHeader(CountResponse countResponse) { + assertEquals(0, countResponse.getSkippedShards()); + assertEquals(0, countResponse.getFailedShards()); + assertThat(countResponse.getTotalShards(), greaterThan(0)); + assertEquals(countResponse.getTotalShards(), countResponse.getSuccessfulShards()); + assertEquals(0, countResponse.getShardFailures().length); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityIT.java new file mode 100644 index 0000000000000..74a4d58e2bf77 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityIT.java @@ -0,0 +1,108 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpDelete; +import org.elasticsearch.ElasticsearchStatusException; +import org.elasticsearch.client.security.AuthenticateResponse; +import org.elasticsearch.client.security.PutUserRequest; +import org.elasticsearch.client.security.PutUserResponse; +import org.elasticsearch.client.security.RefreshPolicy; +import org.elasticsearch.common.CharArrays; + +import java.util.Arrays; +import java.util.Base64; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; + +public class SecurityIT extends ESRestHighLevelClientTestCase { + + public void testAuthenticate() throws Exception { + final SecurityClient securityClient = highLevelClient().security(); + // test fixture: put enabled user + final PutUserRequest putUserRequest = randomPutUserRequest(true); + final PutUserResponse putUserResponse = execute(putUserRequest, securityClient::putUser, securityClient::putUserAsync); + assertThat(putUserResponse.isCreated(), is(true)); + + // authenticate correctly + final String basicAuthHeader = basicAuthHeader(putUserRequest.getUsername(), putUserRequest.getPassword()); + final AuthenticateResponse authenticateResponse = execute(securityClient::authenticate, securityClient::authenticateAsync, + authorizationRequestOptions(basicAuthHeader)); + + assertThat(authenticateResponse.getUser().username(), is(putUserRequest.getUsername())); + if (putUserRequest.getRoles().isEmpty()) { + assertThat(authenticateResponse.getUser().roles(), is(empty())); + } else { + assertThat(authenticateResponse.getUser().roles(), contains(putUserRequest.getRoles().toArray())); + } + assertThat(authenticateResponse.getUser().metadata(), is(putUserRequest.getMetadata())); + assertThat(authenticateResponse.getUser().fullName(), is(putUserRequest.getFullName())); + assertThat(authenticateResponse.getUser().email(), is(putUserRequest.getEmail())); + assertThat(authenticateResponse.enabled(), is(true)); + + // delete user + final Request deleteUserRequest = new Request(HttpDelete.METHOD_NAME, "/_xpack/security/user/" + putUserRequest.getUsername()); + highLevelClient().getLowLevelClient().performRequest(deleteUserRequest); + + // authentication no longer works + ElasticsearchStatusException e = expectThrows(ElasticsearchStatusException.class, () -> execute(securityClient::authenticate, + securityClient::authenticateAsync, authorizationRequestOptions(basicAuthHeader))); + assertThat(e.getMessage(), containsString("unable to authenticate user [" + putUserRequest.getUsername() + "]")); + } + + private static PutUserRequest randomPutUserRequest(boolean enabled) { + final String username = randomAlphaOfLengthBetween(1, 4); + final char[] password = randomAlphaOfLengthBetween(6, 10).toCharArray(); + final List roles = Arrays.asList(generateRandomStringArray(3, 3, false, true)); + final String fullName = randomFrom(random(), null, randomAlphaOfLengthBetween(0, 3)); + final String email = randomFrom(random(), null, randomAlphaOfLengthBetween(0, 3)); + final Map metadata; + metadata = new HashMap<>(); + if (randomBoolean()) { + metadata.put("string", null); + } else { + metadata.put("string", randomAlphaOfLengthBetween(0, 4)); + } + if (randomBoolean()) { + metadata.put("string_list", null); + } else { + metadata.put("string_list", Arrays.asList(generateRandomStringArray(4, 4, false, true))); + } + return new PutUserRequest(username, password, roles, fullName, email, enabled, metadata, RefreshPolicy.IMMEDIATE); + } + + private static String basicAuthHeader(String username, char[] password) { + final String concat = new StringBuilder().append(username).append(':').append(password).toString(); + final byte[] concatBytes = CharArrays.toUtf8Bytes(concat.toCharArray()); + return "Basic " + Base64.getEncoder().encodeToString(concatBytes); + } + + private static RequestOptions authorizationRequestOptions(String authorizationHeader) { + final RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder(); + builder.addHeader("Authorization", authorizationHeader); + return builder.build(); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/TimedRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/TimedRequestTests.java new file mode 100644 index 0000000000000..8024aa0188598 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/TimedRequestTests.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.ESTestCase; + +public class TimedRequestTests extends ESTestCase { + + public void testDefaults() { + TimedRequest timedRequest = new TimedRequest(){}; + assertEquals(timedRequest.timeout(), TimedRequest.DEFAULT_ACK_TIMEOUT); + assertEquals(timedRequest.masterNodeTimeout(), TimedRequest.DEFAULT_MASTER_NODE_TIMEOUT); + } + + public void testNonDefaults() { + TimedRequest timedRequest = new TimedRequest(){}; + TimeValue timeout = TimeValue.timeValueSeconds(randomIntBetween(0, 1000)); + TimeValue masterTimeout = TimeValue.timeValueSeconds(randomIntBetween(0,1000)); + timedRequest.setTimeout(timeout); + timedRequest.setMasterTimeout(masterTimeout); + assertEquals(timedRequest.timeout(), timeout); + assertEquals(timedRequest.masterNodeTimeout(), masterTimeout); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/core/CountRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/core/CountRequestTests.java new file mode 100644 index 0000000000000..1030f4401e160 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/core/CountRequestTests.java @@ -0,0 +1,95 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.core; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.util.ArrayUtils; +import org.elasticsearch.index.query.MatchQueryBuilder; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.test.ESTestCase; + +import java.util.ArrayList; +import java.util.List; + +import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; + +//similar to SearchRequestTests as CountRequest inline several members (and functionality) from SearchRequest +public class CountRequestTests extends ESTestCase { + + public void testIllegalArguments() { + CountRequest countRequest = new CountRequest(); + assertNotNull(countRequest.indices()); + assertNotNull(countRequest.indicesOptions()); + assertNotNull(countRequest.types()); + + NullPointerException e = expectThrows(NullPointerException.class, () -> countRequest.indices((String[]) null)); + assertEquals("indices must not be null", e.getMessage()); + e = expectThrows(NullPointerException.class, () -> countRequest.indices((String) null)); + assertEquals("index must not be null", e.getMessage()); + + e = expectThrows(NullPointerException.class, () -> countRequest.indicesOptions(null)); + assertEquals("indicesOptions must not be null", e.getMessage()); + + e = expectThrows(NullPointerException.class, () -> countRequest.types((String[]) null)); + assertEquals("types must not be null", e.getMessage()); + e = expectThrows(NullPointerException.class, () -> countRequest.types((String) null)); + assertEquals("type must not be null", e.getMessage()); + + e = expectThrows(NullPointerException.class, () -> countRequest.source(null)); + assertEquals("source must not be null", e.getMessage()); + + } + + public void testEqualsAndHashcode() { + checkEqualsAndHashCode(createCountRequest(), CountRequestTests::copyRequest, this::mutate); + } + + private CountRequest createCountRequest() { + CountRequest countRequest = new CountRequest("index"); + countRequest.source(new SearchSourceBuilder().query(new MatchQueryBuilder("num", 10))); + return countRequest; + } + + private CountRequest mutate(CountRequest countRequest) { + CountRequest mutation = copyRequest(countRequest); + List mutators = new ArrayList<>(); + mutators.add(() -> mutation.indices(ArrayUtils.concat(countRequest.indices(), new String[]{randomAlphaOfLength(10)}))); + mutators.add(() -> mutation.indicesOptions(randomValueOtherThan(countRequest.indicesOptions(), + () -> IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())))); + mutators.add(() -> mutation.types(ArrayUtils.concat(countRequest.types(), new String[]{randomAlphaOfLength(10)}))); + mutators.add(() -> mutation.preference(randomValueOtherThan(countRequest.preference(), () -> randomAlphaOfLengthBetween(3, 10)))); + mutators.add(() -> mutation.routing(randomValueOtherThan(countRequest.routing(), () -> randomAlphaOfLengthBetween(3, 10)))); + randomFrom(mutators).run(); + return mutation; + } + + private static CountRequest copyRequest(CountRequest countRequest) { + CountRequest result = new CountRequest(); + result.indices(countRequest.indices()); + result.indicesOptions(countRequest.indicesOptions()); + result.types(countRequest.types()); + result.routing(countRequest.routing()); + result.preference(countRequest.preference()); + if (countRequest.source() != null) { + result.source(countRequest.source()); + } + return result; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/core/CountResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/core/CountResponseTests.java new file mode 100644 index 0000000000000..c2fc668d604e5 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/core/CountResponseTests.java @@ -0,0 +1,126 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.core; + +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.rest.action.RestActions; +import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; + +public class CountResponseTests extends ESTestCase { + + // Not comparing XContent for equivalence as we cannot compare the ShardSearchFailure#cause, because it will be wrapped in an outer + // ElasticSearchException. Best effort: try to check that the original message appears somewhere in the rendered xContent + // For more see ShardSearchFailureTests. + public void testFromXContent() throws IOException { + xContentTester( + this::createParser, + this::createTestInstance, + this::toXContent, + CountResponse::fromXContent) + .supportsUnknownFields(false) + .assertEqualsConsumer(this::assertEqualInstances) + .assertToXContentEquivalence(false) + .test(); + } + + private CountResponse createTestInstance() { + long count = 5; + Boolean terminatedEarly = randomBoolean() ? null : randomBoolean(); + int totalShards = randomIntBetween(1, Integer.MAX_VALUE); + int successfulShards = randomIntBetween(0, totalShards); + int skippedShards = randomIntBetween(0, totalShards); + int numFailures = randomIntBetween(1, 5); + ShardSearchFailure[] failures = new ShardSearchFailure[numFailures]; + for (int i = 0; i < failures.length; i++) { + failures[i] = createShardFailureTestItem(); + } + CountResponse.ShardStats shardStats = new CountResponse.ShardStats(successfulShards, totalShards, skippedShards, + randomBoolean() ? ShardSearchFailure.EMPTY_ARRAY : failures); + return new CountResponse(count, terminatedEarly, shardStats); + } + + private void toXContent(CountResponse response, XContentBuilder builder) throws IOException { + builder.startObject(); + builder.field(CountResponse.COUNT.getPreferredName(), response.getCount()); + if (response.isTerminatedEarly() != null) { + builder.field(CountResponse.TERMINATED_EARLY.getPreferredName(), response.isTerminatedEarly()); + } + toXContent(response.getShardStats(), builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + } + + private void toXContent(CountResponse.ShardStats stats, XContentBuilder builder, ToXContent.Params params) throws IOException { + RestActions.buildBroadcastShardsHeader(builder, params, stats.getTotalShards(), stats.getSuccessfulShards(), stats + .getSkippedShards(), stats.getShardFailures().length, stats.getShardFailures()); + } + + @SuppressWarnings("Duplicates") + private static ShardSearchFailure createShardFailureTestItem() { + String randomMessage = randomAlphaOfLengthBetween(3, 20); + Exception ex = new ParsingException(0, 0, randomMessage, new IllegalArgumentException("some bad argument")); + SearchShardTarget searchShardTarget = null; + if (randomBoolean()) { + String nodeId = randomAlphaOfLengthBetween(5, 10); + String indexName = randomAlphaOfLengthBetween(5, 10); + searchShardTarget = new SearchShardTarget(nodeId, + new ShardId(new Index(indexName, IndexMetaData.INDEX_UUID_NA_VALUE), randomInt()), null, null); + } + return new ShardSearchFailure(ex, searchShardTarget); + } + + private void assertEqualInstances(CountResponse expectedInstance, CountResponse newInstance) { + assertEquals(expectedInstance.getCount(), newInstance.getCount()); + assertEquals(expectedInstance.status(), newInstance.status()); + assertEquals(expectedInstance.isTerminatedEarly(), newInstance.isTerminatedEarly()); + assertEquals(expectedInstance.getTotalShards(), newInstance.getTotalShards()); + assertEquals(expectedInstance.getFailedShards(), newInstance.getFailedShards()); + assertEquals(expectedInstance.getSkippedShards(), newInstance.getSkippedShards()); + assertEquals(expectedInstance.getSuccessfulShards(), newInstance.getSuccessfulShards()); + assertEquals(expectedInstance.getShardFailures().length, newInstance.getShardFailures().length); + + ShardSearchFailure[] expectedFailures = expectedInstance.getShardFailures(); + ShardSearchFailure[] newFailures = newInstance.getShardFailures(); + + for (int i = 0; i < newFailures.length; i++) { + ShardSearchFailure parsedFailure = newFailures[i]; + ShardSearchFailure originalFailure = expectedFailures[i]; + assertEquals(originalFailure.index(), parsedFailure.index()); + assertEquals(originalFailure.shard(), parsedFailure.shard()); + assertEquals(originalFailure.shardId(), parsedFailure.shardId()); + String originalMsg = originalFailure.getCause().getMessage(); + assertEquals(parsedFailure.getCause().getMessage(), "Elasticsearch exception [type=parsing_exception, reason=" + + originalMsg + "]"); + String nestedMsg = originalFailure.getCause().getCause().getMessage(); + assertEquals(parsedFailure.getCause().getCause().getMessage(), + "Elasticsearch exception [type=illegal_argument_exception, reason=" + nestedMsg + "]"); + } + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java index cb1890c7789a1..5530c82272698 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java @@ -1545,23 +1545,23 @@ public void afterBulk(long executionId, BulkRequest request, // Not entirely sure if _termvectors belongs to CRUD, and in the absence of a better place, will have it here public void testTermVectors() throws Exception { RestHighLevelClient client = highLevelClient(); - CreateIndexRequest authorsRequest = new CreateIndexRequest("authors").mapping("doc", "user", "type=keyword"); + CreateIndexRequest authorsRequest = new CreateIndexRequest("authors").mapping("_doc", "user", "type=keyword"); CreateIndexResponse authorsResponse = client.indices().create(authorsRequest, RequestOptions.DEFAULT); assertTrue(authorsResponse.isAcknowledged()); - client.index(new IndexRequest("index", "doc", "1").source("user", "kimchy"), RequestOptions.DEFAULT); + client.index(new IndexRequest("index", "_doc", "1").source("user", "kimchy"), RequestOptions.DEFAULT); Response refreshResponse = client().performRequest(new Request("POST", "/authors/_refresh")); assertEquals(200, refreshResponse.getStatusLine().getStatusCode()); { // tag::term-vectors-request - TermVectorsRequest request = new TermVectorsRequest("authors", "doc", "1"); + TermVectorsRequest request = new TermVectorsRequest("authors", "_doc", "1"); request.setFields("user"); // end::term-vectors-request } { // tag::term-vectors-request-artificial - TermVectorsRequest request = new TermVectorsRequest("authors", "doc"); + TermVectorsRequest request = new TermVectorsRequest("authors", "_doc"); XContentBuilder docBuilder = XContentFactory.jsonBuilder(); docBuilder.startObject().field("user", "guest-user").endObject(); request.setDoc(docBuilder); // <1> @@ -1594,7 +1594,7 @@ public void testTermVectors() throws Exception { // end::term-vectors-request-optional-arguments } - TermVectorsRequest request = new TermVectorsRequest("authors", "doc", "1"); + TermVectorsRequest request = new TermVectorsRequest("authors", "_doc", "1"); request.setFields("user"); // tag::term-vectors-execute diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java index cf6a9622c0c72..90337ebf6053e 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java @@ -74,6 +74,8 @@ import org.elasticsearch.client.ml.PutCalendarResponse; import org.elasticsearch.client.ml.PutDatafeedRequest; import org.elasticsearch.client.ml.PutDatafeedResponse; +import org.elasticsearch.client.ml.PutFilterRequest; +import org.elasticsearch.client.ml.PutFilterResponse; import org.elasticsearch.client.ml.PutJobRequest; import org.elasticsearch.client.ml.PutJobResponse; import org.elasticsearch.client.ml.StartDatafeedRequest; @@ -94,6 +96,7 @@ import org.elasticsearch.client.ml.job.config.Detector; import org.elasticsearch.client.ml.job.config.Job; import org.elasticsearch.client.ml.job.config.JobUpdate; +import org.elasticsearch.client.ml.job.config.MlFilter; import org.elasticsearch.client.ml.job.config.ModelPlotConfig; import org.elasticsearch.client.ml.job.config.Operator; import org.elasticsearch.client.ml.job.config.RuleCondition; @@ -2007,4 +2010,58 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } + + public void testCreateFilter() throws Exception { + RestHighLevelClient client = highLevelClient(); + { + // tag::put-filter-config + MlFilter.Builder filterBuilder = MlFilter.builder("my_safe_domains") // <1> + .setDescription("A list of safe domains") // <2> + .setItems("*.google.com", "wikipedia.org"); // <3> + // end::put-filter-config + + // tag::put-filter-request + PutFilterRequest request = new PutFilterRequest(filterBuilder.build()); // <1> + // end::put-filter-request + + // tag::put-filter-execute + PutFilterResponse response = client.machineLearning().putFilter(request, RequestOptions.DEFAULT); + // end::put-filter-execute + + // tag::put-filter-response + MlFilter createdFilter = response.getResponse(); // <1> + // end::put-filter-response + assertThat(createdFilter.getId(), equalTo("my_safe_domains")); + } + { + MlFilter.Builder filterBuilder = MlFilter.builder("safe_domains_async") + .setDescription("A list of safe domains") + .setItems("*.google.com", "wikipedia.org"); + + PutFilterRequest request = new PutFilterRequest(filterBuilder.build()); + // tag::put-filter-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(PutFilterResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::put-filter-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::put-filter-execute-async + client.machineLearning().putFilterAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::put-filter-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/RollupDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/RollupDocumentationIT.java index e196dbc0229ef..e790ddc405eef 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/RollupDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/RollupDocumentationIT.java @@ -41,6 +41,8 @@ import org.elasticsearch.client.rollup.GetRollupJobResponse.JobWrapper; import org.elasticsearch.client.rollup.GetRollupJobResponse.RollupIndexerJobStats; import org.elasticsearch.client.rollup.GetRollupJobResponse.RollupJobStatus; +import org.elasticsearch.client.rollup.DeleteRollupJobRequest; +import org.elasticsearch.client.rollup.DeleteRollupJobResponse; import org.elasticsearch.client.rollup.PutRollupJobRequest; import org.elasticsearch.client.rollup.PutRollupJobResponse; import org.elasticsearch.client.rollup.RollableIndexCaps; @@ -451,4 +453,53 @@ private void waitForPendingRollupTasks() throws Exception { } }); } + + public void testDeleteRollupJob() throws Exception { + RestHighLevelClient client = highLevelClient(); + + String id = "job_2"; + + // tag::rollup-delete-job-request + DeleteRollupJobRequest request = new DeleteRollupJobRequest(id); // <1> + // end::rollup-delete-job-request + try { + // tag::rollup-delete-job-execute + DeleteRollupJobResponse response = client.rollup().deleteRollupJob(request, RequestOptions.DEFAULT); + // end::rollup-delete-job-execute + + // tag::rollup-delete-job-response + response.isAcknowledged(); // <1> + // end::rollup-delete-job-response + } catch (Exception e) { + // Swallow any exception, this test does not test actually cancelling. + } + + + + // tag::rollup-delete-job-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(DeleteRollupJobResponse response) { + boolean acknowledged = response.isAcknowledged(); // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::rollup-delete-job-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::rollup-delete-job-execute-async + client.rollup().deleteRollupJobAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::rollup-delete-job-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + + } + } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java index 8df7fd20a180b..e97620b50d4e6 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java @@ -49,6 +49,8 @@ import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.client.core.CountRequest; +import org.elasticsearch.client.core.CountResponse; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.text.Text; @@ -1284,4 +1286,124 @@ private void indexSearchTestData() throws IOException { assertSame(RestStatus.OK, bulkResponse.status()); assertFalse(bulkResponse.hasFailures()); } + + + @SuppressWarnings({"unused", "unchecked"}) + public void testCount() throws Exception { + indexCountTestData(); + RestHighLevelClient client = highLevelClient(); + { + // tag::count-request-basic + CountRequest countRequest = new CountRequest(); // <1> + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); // <2> + searchSourceBuilder.query(QueryBuilders.matchAllQuery()); // <3> + countRequest.source(searchSourceBuilder); // <4> + // end::count-request-basic + } + { + // tag::count-request-indices-types + CountRequest countRequest = new CountRequest("blog"); // <1> + countRequest.types("doc"); // <2> + // end::count-request-indices-types + // tag::count-request-routing + countRequest.routing("routing"); // <1> + // end::count-request-routing + // tag::count-request-indicesOptions + countRequest.indicesOptions(IndicesOptions.lenientExpandOpen()); // <1> + // end::count-request-indicesOptions + // tag::count-request-preference + countRequest.preference("_local"); // <1> + // end::count-request-preference + assertNotNull(client.count(countRequest, RequestOptions.DEFAULT)); + } + { + // tag::count-source-basics + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder(); // <1> + sourceBuilder.query(QueryBuilders.termQuery("user", "kimchy")); // <2> + // end::count-source-basics + + // tag::count-source-setter + CountRequest countRequest = new CountRequest(); + countRequest.indices("blog", "author"); + countRequest.source(sourceBuilder); + // end::count-source-setter + + // tag::count-execute + CountResponse countResponse = client + .count(countRequest, RequestOptions.DEFAULT); + // end::count-execute + + // tag::count-execute-listener + ActionListener listener = + new ActionListener() { + + @Override + public void onResponse(CountResponse countResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::count-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::count-execute-async + client.countAsync(countRequest, RequestOptions.DEFAULT, listener); // <1> + // end::count-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + + // tag::count-response-1 + long count = countResponse.getCount(); + RestStatus status = countResponse.status(); + Boolean terminatedEarly = countResponse.isTerminatedEarly(); + // end::count-response-1 + + // tag::count-response-2 + int totalShards = countResponse.getTotalShards(); + int skippedShards = countResponse.getSkippedShards(); + int successfulShards = countResponse.getSuccessfulShards(); + int failedShards = countResponse.getFailedShards(); + for (ShardSearchFailure failure : countResponse.getShardFailures()) { + // failures should be handled here + } + // end::count-response-2 + assertNotNull(countResponse); + assertEquals(4, countResponse.getCount()); + } + } + + private static void indexCountTestData() throws IOException { + CreateIndexRequest authorsRequest = new CreateIndexRequest("author") + .mapping("doc", "user", "type=keyword,doc_values=false"); + CreateIndexResponse authorsResponse = highLevelClient().indices().create(authorsRequest, RequestOptions.DEFAULT); + assertTrue(authorsResponse.isAcknowledged()); + + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.add(new IndexRequest("blog", "doc", "1") + .source(XContentType.JSON, "title", "Doubling Down on Open?", "user", + Collections.singletonList("kimchy"), "innerObject", Collections.singletonMap("key", "value"))); + bulkRequest.add(new IndexRequest("blog", "doc", "2") + .source(XContentType.JSON, "title", "Swiftype Joins Forces with Elastic", "user", + Arrays.asList("kimchy", "matt"), "innerObject", Collections.singletonMap("key", "value"))); + bulkRequest.add(new IndexRequest("blog", "doc", "3") + .source(XContentType.JSON, "title", "On Net Neutrality", "user", + Arrays.asList("tyler", "kimchy"), "innerObject", Collections.singletonMap("key", "value"))); + + bulkRequest.add(new IndexRequest("author", "doc", "1") + .source(XContentType.JSON, "user", "kimchy")); + + + bulkRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + BulkResponse bulkResponse = highLevelClient().bulk(bulkRequest, RequestOptions.DEFAULT); + assertSame(RestStatus.OK, bulkResponse.status()); + assertFalse(bulkResponse.hasFailures()); + } + } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java index 7c37c7ef50a7a..fdcaa56fa004d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java @@ -29,7 +29,10 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.client.security.AuthenticateResponse; import org.elasticsearch.client.security.ChangePasswordRequest; +import org.elasticsearch.client.security.ClearRealmCacheRequest; +import org.elasticsearch.client.security.ClearRealmCacheResponse; import org.elasticsearch.client.security.ClearRolesCacheRequest; import org.elasticsearch.client.security.ClearRolesCacheResponse; import org.elasticsearch.client.security.CreateTokenRequest; @@ -50,10 +53,11 @@ import org.elasticsearch.client.security.PutUserRequest; import org.elasticsearch.client.security.PutUserResponse; import org.elasticsearch.client.security.RefreshPolicy; -import org.elasticsearch.client.security.support.CertificateInfo; import org.elasticsearch.client.security.support.expressiondsl.RoleMapperExpression; -import org.elasticsearch.client.security.support.expressiondsl.expressions.AnyRoleMapperExpression; import org.elasticsearch.client.security.support.expressiondsl.fields.FieldRoleMapperExpression; +import org.elasticsearch.client.security.user.User; +import org.elasticsearch.client.security.support.CertificateInfo; +import org.elasticsearch.client.security.support.expressiondsl.expressions.AnyRoleMapperExpression; import org.elasticsearch.common.Strings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.hamcrest.Matchers; @@ -67,13 +71,14 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.not; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.isIn; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; public class SecurityDocumentationIT extends ESRestHighLevelClientTestCase { @@ -379,6 +384,97 @@ public void onFailure(Exception e) { } } + public void testAuthenticate() throws Exception { + RestHighLevelClient client = highLevelClient(); + { + //tag::authenticate-execute + AuthenticateResponse response = client.security().authenticate(RequestOptions.DEFAULT); + //end::authenticate-execute + + //tag::authenticate-response + User user = response.getUser(); // <1> + boolean enabled = response.enabled(); // <2> + //end::authenticate-response + + assertThat(user.username(), is("test_user")); + assertThat(user.roles(), contains(new String[]{"superuser"})); + assertThat(user.fullName(), nullValue()); + assertThat(user.email(), nullValue()); + assertThat(user.metadata().isEmpty(), is(true)); + assertThat(enabled, is(true)); + } + + { + // tag::authenticate-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(AuthenticateResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::authenticate-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::authenticate-execute-async + client.security().authenticateAsync(RequestOptions.DEFAULT, listener); // <1> + // end::authenticate-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + + public void testClearRealmCache() throws Exception { + RestHighLevelClient client = highLevelClient(); + { + //tag::clear-realm-cache-request + ClearRealmCacheRequest request = new ClearRealmCacheRequest(Collections.emptyList(), Collections.emptyList()); + //end::clear-realm-cache-request + //tag::clear-realm-cache-execute + ClearRealmCacheResponse response = client.security().clearRealmCache(request, RequestOptions.DEFAULT); + //end::clear-realm-cache-execute + + assertNotNull(response); + assertThat(response.getNodes(), not(empty())); + + //tag::clear-realm-cache-response + List nodes = response.getNodes(); // <1> + //end::clear-realm-cache-response + } + { + //tag::clear-realm-cache-execute-listener + ClearRealmCacheRequest request = new ClearRealmCacheRequest(Collections.emptyList(), Collections.emptyList()); + ActionListener listener = new ActionListener() { + @Override + public void onResponse(ClearRealmCacheResponse clearRealmCacheResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + //end::clear-realm-cache-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::clear-realm-cache-execute-async + client.security().clearRealmCacheAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::clear-realm-cache-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } public void testClearRolesCache() throws Exception { RestHighLevelClient client = highLevelClient(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/graph/GraphExploreResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/graph/GraphExploreResponseTests.java index b945bf4b7f5fd..5348337f87104 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/graph/GraphExploreResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/graph/GraphExploreResponseTests.java @@ -86,7 +86,7 @@ private static GraphExploreResponse createTestInstanceWithFailures() { @Override protected GraphExploreResponse doParseInstance(XContentParser parser) throws IOException { - return GraphExploreResponse.fromXContext(parser); + return GraphExploreResponse.fromXContent(parser); } @Override diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/AllocateActionTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/AllocateActionTests.java new file mode 100644 index 0000000000000..e44eb0da0e188 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/AllocateActionTests.java @@ -0,0 +1,99 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +public class AllocateActionTests extends AbstractXContentTestCase { + + @Override + protected AllocateAction createTestInstance() { + return randomInstance(); + } + + static AllocateAction randomInstance() { + boolean hasAtLeastOneMap = false; + Map includes; + if (randomBoolean()) { + includes = randomMap(1, 100); + hasAtLeastOneMap = true; + } else { + includes = randomBoolean() ? null : Collections.emptyMap(); + } + Map excludes; + if (randomBoolean()) { + hasAtLeastOneMap = true; + excludes = randomMap(1, 100); + } else { + excludes = randomBoolean() ? null : Collections.emptyMap(); + } + Map requires; + if (hasAtLeastOneMap == false || randomBoolean()) { + requires = randomMap(1, 100); + } else { + requires = randomBoolean() ? null : Collections.emptyMap(); + } + Integer numberOfReplicas = randomBoolean() ? null : randomIntBetween(0, 10); + return new AllocateAction(numberOfReplicas, includes, excludes, requires); + } + + @Override + protected AllocateAction doParseInstance(XContentParser parser) { + return AllocateAction.parse(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + public void testAllMapsNullOrEmpty() { + Map include = randomBoolean() ? null : Collections.emptyMap(); + Map exclude = randomBoolean() ? null : Collections.emptyMap(); + Map require = randomBoolean() ? null : Collections.emptyMap(); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> new AllocateAction(null, include, exclude, require)); + assertEquals("At least one of " + AllocateAction.INCLUDE_FIELD.getPreferredName() + ", " + + AllocateAction.EXCLUDE_FIELD.getPreferredName() + " or " + AllocateAction.REQUIRE_FIELD.getPreferredName() + + "must contain attributes for action " + AllocateAction.NAME, exception.getMessage()); + } + + public void testInvalidNumberOfReplicas() { + Map include = randomMap(1, 5); + Map exclude = randomBoolean() ? null : Collections.emptyMap(); + Map require = randomBoolean() ? null : Collections.emptyMap(); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> new AllocateAction(randomIntBetween(-1000, -1), include, exclude, require)); + assertEquals("[" + AllocateAction.NUMBER_OF_REPLICAS_FIELD.getPreferredName() + "] must be >= 0", exception.getMessage()); + } + + public static Map randomMap(int minEntries, int maxEntries) { + Map map = new HashMap<>(); + int numIncludes = randomIntBetween(minEntries, maxEntries); + for (int i = 0; i < numIncludes; i++) { + map.put(randomAlphaOfLengthBetween(1, 20), randomAlphaOfLengthBetween(1, 20)); + } + return map; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/DeleteActionTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/DeleteActionTests.java new file mode 100644 index 0000000000000..fb7deb97a2787 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/DeleteActionTests.java @@ -0,0 +1,40 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +public class DeleteActionTests extends AbstractXContentTestCase { + + @Override + protected DeleteAction createTestInstance() { + return new DeleteAction(); + } + + @Override + protected DeleteAction doParseInstance(XContentParser parser) { + return DeleteAction.parse(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/DeleteLifecyclePolicyRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/DeleteLifecyclePolicyRequestTests.java new file mode 100644 index 0000000000000..01f6288d81d4b --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/DeleteLifecyclePolicyRequestTests.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.test.ESTestCase; + +public class DeleteLifecyclePolicyRequestTests extends ESTestCase { + + private DeleteLifecyclePolicyRequest createTestInstance() { + return new DeleteLifecyclePolicyRequest(randomAlphaOfLengthBetween(2, 20)); + } + + public void testValidate() { + DeleteLifecyclePolicyRequest req = createTestInstance(); + assertFalse(req.validate().isPresent()); + + } + + public void testValidationFailure() { + expectThrows(IllegalArgumentException.class, () -> new DeleteLifecyclePolicyRequest(randomFrom("", null))); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ExplainLifecycleRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ExplainLifecycleRequestTests.java new file mode 100644 index 0000000000000..933503e629b06 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ExplainLifecycleRequestTests.java @@ -0,0 +1,75 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; + +import java.util.Arrays; + +public class ExplainLifecycleRequestTests extends ESTestCase { + + public void testEqualsAndHashcode() { + EqualsHashCodeTestUtils.checkEqualsAndHashCode(createTestInstance(), this::copy, this::mutateInstance); + } + + private ExplainLifecycleRequest createTestInstance() { + ExplainLifecycleRequest request = new ExplainLifecycleRequest(); + if (randomBoolean()) { + request.indices(generateRandomStringArray(20, 20, false, true)); + } + if (randomBoolean()) { + IndicesOptions indicesOptions = IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), + randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()); + request.indicesOptions(indicesOptions); + } + return request; + } + + private ExplainLifecycleRequest mutateInstance(ExplainLifecycleRequest instance) { + String[] indices = instance.indices(); + IndicesOptions indicesOptions = instance.indicesOptions(); + switch (between(0, 1)) { + case 0: + indices = randomValueOtherThanMany(i -> Arrays.equals(i, instance.indices()), + () -> generateRandomStringArray(20, 10, false, true)); + break; + case 1: + indicesOptions = randomValueOtherThan(indicesOptions, () -> IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), + randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + ExplainLifecycleRequest newRequest = new ExplainLifecycleRequest(); + newRequest.indices(indices); + newRequest.indicesOptions(indicesOptions); + return newRequest; + } + + private ExplainLifecycleRequest copy(ExplainLifecycleRequest original) { + ExplainLifecycleRequest copy = new ExplainLifecycleRequest(); + copy.indices(original.indices()); + copy.indicesOptions(original.indicesOptions()); + return copy; + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ExplainLifecycleResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ExplainLifecycleResponseTests.java new file mode 100644 index 0000000000000..26eacb04b024f --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ExplainLifecycleResponseTests.java @@ -0,0 +1,61 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class ExplainLifecycleResponseTests extends AbstractXContentTestCase { + + @Override + protected ExplainLifecycleResponse createTestInstance() { + Map indexResponses = new HashMap<>(); + for (int i = 0; i < randomIntBetween(0, 2); i++) { + IndexLifecycleExplainResponse indexResponse = IndexExplainResponseTests.randomIndexExplainResponse(); + indexResponses.put(indexResponse.getIndex(), indexResponse); + } + return new ExplainLifecycleResponse(indexResponses); + } + + @Override + protected ExplainLifecycleResponse doParseInstance(XContentParser parser) throws IOException { + return ExplainLifecycleResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); + entries.add(new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse)); + return new NamedXContentRegistry(entries); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ForceMergeActionTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ForceMergeActionTests.java new file mode 100644 index 0000000000000..16fafcfa24015 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ForceMergeActionTests.java @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +import static org.hamcrest.Matchers.equalTo; + +public class ForceMergeActionTests extends AbstractXContentTestCase { + + @Override + protected ForceMergeAction doParseInstance(XContentParser parser) { + return ForceMergeAction.parse(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected ForceMergeAction createTestInstance() { + return randomInstance(); + } + + static ForceMergeAction randomInstance() { + return new ForceMergeAction(randomIntBetween(1, 100)); + } + + public void testMissingMaxNumSegments() throws IOException { + BytesReference emptyObject = BytesReference.bytes(JsonXContent.contentBuilder().startObject().endObject()); + XContentParser parser = XContentHelper.createParser(null, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + emptyObject, XContentType.JSON); + Exception e = expectThrows(IllegalArgumentException.class, () -> ForceMergeAction.parse(parser)); + assertThat(e.getMessage(), equalTo("Required [max_num_segments]")); + } + + public void testInvalidNegativeSegmentNumber() { + Exception r = expectThrows(IllegalArgumentException.class, () -> new ForceMergeAction(randomIntBetween(-10, 0))); + assertThat(r.getMessage(), equalTo("[max_num_segments] must be a positive integer")); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/GetLifecyclePolicyRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/GetLifecyclePolicyRequestTests.java new file mode 100644 index 0000000000000..06d28207ce93a --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/GetLifecyclePolicyRequestTests.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.test.ESTestCase; + +public class GetLifecyclePolicyRequestTests extends ESTestCase { + + private GetLifecyclePolicyRequest createTestInstance() { + int numPolicies = randomIntBetween(0, 10); + String[] policyNames = new String[numPolicies]; + for (int i = 0; i < numPolicies; i++) { + policyNames[i] = "policy-" + randomAlphaOfLengthBetween(2, 5); + } + return new GetLifecyclePolicyRequest(policyNames); + } + + public void testValidation() { + GetLifecyclePolicyRequest request = createTestInstance(); + assertFalse(request.validate().isPresent()); + } + + public void testNullPolicyNameShouldFail() { + expectThrows(IllegalArgumentException.class, + () -> new GetLifecyclePolicyRequest(randomAlphaOfLengthBetween(2,20), null, randomAlphaOfLengthBetween(2,20))); + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/GetLifecyclePolicyResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/GetLifecyclePolicyResponseTests.java new file mode 100644 index 0000000000000..89dfbb8635332 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/GetLifecyclePolicyResponseTests.java @@ -0,0 +1,73 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static org.elasticsearch.client.indexlifecycle.LifecyclePolicyTests.createRandomPolicy; + +public class GetLifecyclePolicyResponseTests extends AbstractXContentTestCase { + + @Override + protected GetLifecyclePolicyResponse createTestInstance() { + int numPolicies = randomIntBetween(1, 10); + ImmutableOpenMap.Builder policies = ImmutableOpenMap.builder(); + for (int i = 0; i < numPolicies; i++) { + String policyName = "policy-" + randomAlphaOfLengthBetween(2, 5); + LifecyclePolicy policy = createRandomPolicy(policyName); + policies.put(policyName, new LifecyclePolicyMetadata(policy, randomLong(), randomLong())); + } + return new GetLifecyclePolicyResponse(policies.build()); + } + + @Override + protected GetLifecyclePolicyResponse doParseInstance(XContentParser parser) throws IOException { + return GetLifecyclePolicyResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); + entries.addAll(Arrays.asList( + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(AllocateAction.NAME), AllocateAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ForceMergeAction.NAME), ForceMergeAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ReadOnlyAction.NAME), ReadOnlyAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(RolloverAction.NAME), RolloverAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ShrinkAction.NAME), ShrinkAction::parse) + )); + return new NamedXContentRegistry(entries); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/IndexExplainResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/IndexExplainResponseTests.java new file mode 100644 index 0000000000000..fb7e73ee62191 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/IndexExplainResponseTests.java @@ -0,0 +1,122 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.function.Supplier; + +public class IndexExplainResponseTests extends AbstractXContentTestCase { + + static IndexLifecycleExplainResponse randomIndexExplainResponse() { + if (frequently()) { + return randomManagedIndexExplainResponse(); + } else { + return randomUnmanagedIndexExplainResponse(); + } + } + + private static IndexLifecycleExplainResponse randomUnmanagedIndexExplainResponse() { + return IndexLifecycleExplainResponse.newUnmanagedIndexResponse(randomAlphaOfLength(10)); + } + + private static IndexLifecycleExplainResponse randomManagedIndexExplainResponse() { + return IndexLifecycleExplainResponse.newManagedIndexResponse(randomAlphaOfLength(10), randomAlphaOfLength(10), + randomNonNegativeLong(), randomAlphaOfLength(10), randomAlphaOfLength(10), randomAlphaOfLength(10), + randomBoolean() ? null : randomAlphaOfLength(10), randomNonNegativeLong(), randomNonNegativeLong(), randomNonNegativeLong(), + randomBoolean() ? null : new BytesArray(new RandomStepInfo(() -> randomAlphaOfLength(10)).toString()), + randomBoolean() ? null : PhaseExecutionInfoTests.randomPhaseExecutionInfo("")); + } + + @Override + protected IndexLifecycleExplainResponse createTestInstance() { + return randomIndexExplainResponse(); + } + + @Override + protected IndexLifecycleExplainResponse doParseInstance(XContentParser parser) throws IOException { + return IndexLifecycleExplainResponse.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + private static class RandomStepInfo implements ToXContentObject { + + private final String key; + private final String value; + + RandomStepInfo(Supplier randomStringSupplier) { + this.key = randomStringSupplier.get(); + this.value = randomStringSupplier.get(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(key, value); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(key, value); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + RandomStepInfo other = (RandomStepInfo) obj; + return Objects.equals(key, other.key) && Objects.equals(value, other.value); + } + + @Override + public String toString() { + return Strings.toString(this); + } + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); + entries.add(new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse)); + return new NamedXContentRegistry(entries); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/LifecycleManagementStatusResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/LifecycleManagementStatusResponseTests.java new file mode 100644 index 0000000000000..144039b8995c6 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/LifecycleManagementStatusResponseTests.java @@ -0,0 +1,57 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.CoreMatchers; + +import java.io.IOException; +import java.util.EnumSet; +import java.util.stream.Collectors; + +public class LifecycleManagementStatusResponseTests extends ESTestCase { + + public void testAllValidStatuses() { + EnumSet.allOf(OperationMode.class) + .forEach(e -> assertEquals(new LifecycleManagementStatusResponse(e.name()).getOperationMode(), e)); + } + + public void testXContent() throws IOException { + XContentType xContentType = XContentType.JSON; + String mode = randomFrom(EnumSet.allOf(OperationMode.class) + .stream().map(Enum::name).collect(Collectors.toList())); + XContentParser parser = xContentType.xContent().createParser(NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, "{\"operation_mode\" : \"" + mode + "\"}"); + assertEquals(LifecycleManagementStatusResponse.fromXContent(parser).getOperationMode(), OperationMode.fromString(mode)); + } + + public void testXContentInvalid() throws IOException { + XContentType xContentType = XContentType.JSON; + String mode = randomAlphaOfLength(10); + XContentParser parser = xContentType.xContent().createParser(NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, "{\"operation_mode\" : \"" + mode + "\"}"); + Exception e = expectThrows(IllegalArgumentException.class, () -> LifecycleManagementStatusResponse.fromXContent(parser)); + assertThat(e.getMessage(), CoreMatchers.containsString("failed to parse field [operation_mode]")); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/LifecyclePolicyMetadataTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/LifecyclePolicyMetadataTests.java new file mode 100644 index 0000000000000..548ba366b640e --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/LifecyclePolicyMetadataTests.java @@ -0,0 +1,69 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static org.elasticsearch.client.indexlifecycle.LifecyclePolicyTests.createRandomPolicy; + +public class LifecyclePolicyMetadataTests extends AbstractXContentTestCase { + + private String policyName; + + @Override + protected LifecyclePolicyMetadata createTestInstance() { + policyName = randomAlphaOfLengthBetween(5,20); + LifecyclePolicy policy = createRandomPolicy(policyName); + return new LifecyclePolicyMetadata(policy, randomLong(), randomLong()); + } + + @Override + protected LifecyclePolicyMetadata doParseInstance(XContentParser parser) throws IOException { + return LifecyclePolicyMetadata.parse(parser, policyName); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); + entries.addAll(Arrays.asList( + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(AllocateAction.NAME), AllocateAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ForceMergeAction.NAME), ForceMergeAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ReadOnlyAction.NAME), ReadOnlyAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(RolloverAction.NAME), RolloverAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ShrinkAction.NAME), ShrinkAction::parse) + )); + return new NamedXContentRegistry(entries); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/LifecyclePolicyTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/LifecyclePolicyTests.java new file mode 100644 index 0000000000000..024cb13d8df37 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/LifecyclePolicyTests.java @@ -0,0 +1,243 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.equalTo; + +public class LifecyclePolicyTests extends AbstractXContentTestCase { + private static final Set VALID_HOT_ACTIONS = Sets.newHashSet(RolloverAction.NAME); + private static final Set VALID_WARM_ACTIONS = Sets.newHashSet(AllocateAction.NAME, ForceMergeAction.NAME, + ReadOnlyAction.NAME, ShrinkAction.NAME); + private static final Set VALID_COLD_ACTIONS = Sets.newHashSet(AllocateAction.NAME); + private static final Set VALID_DELETE_ACTIONS = Sets.newHashSet(DeleteAction.NAME); + + private String lifecycleName; + + @Override + protected LifecyclePolicy doParseInstance(XContentParser parser) { + return LifecyclePolicy.parse(parser, lifecycleName); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); + entries.addAll(Arrays.asList( + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(AllocateAction.NAME), AllocateAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ForceMergeAction.NAME), ForceMergeAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ReadOnlyAction.NAME), ReadOnlyAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(RolloverAction.NAME), RolloverAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ShrinkAction.NAME), ShrinkAction::parse) + )); + return new NamedXContentRegistry(entries); + } + + @Override + protected LifecyclePolicy createTestInstance() { + lifecycleName = randomAlphaOfLength(5); + return createRandomPolicy(lifecycleName); + } + + public void testValidatePhases() { + boolean invalid = randomBoolean(); + String phaseName = randomFrom("hot", "warm", "cold", "delete"); + if (invalid) { + phaseName += randomAlphaOfLength(5); + } + Map phases = Collections.singletonMap(phaseName, + new Phase(phaseName, TimeValue.ZERO, Collections.emptyMap())); + if (invalid) { + Exception e = expectThrows(IllegalArgumentException.class, () -> new LifecyclePolicy(lifecycleName, phases)); + assertThat(e.getMessage(), equalTo("Lifecycle does not support phase [" + phaseName + "]")); + } else { + new LifecyclePolicy(lifecycleName, phases); + } + } + + public void testValidateHotPhase() { + LifecycleAction invalidAction = null; + Map actions = randomSubsetOf(VALID_HOT_ACTIONS) + .stream().map(this::getTestAction).collect(Collectors.toMap(LifecycleAction::getName, Function.identity())); + if (randomBoolean()) { + invalidAction = getTestAction(randomFrom("allocate", "forcemerge", "delete", "shrink")); + actions.put(invalidAction.getName(), invalidAction); + } + Map hotPhase = Collections.singletonMap("hot", + new Phase("hot", TimeValue.ZERO, actions)); + + if (invalidAction != null) { + Exception e = expectThrows(IllegalArgumentException.class, + () -> new LifecyclePolicy(lifecycleName, hotPhase)); + assertThat(e.getMessage(), + equalTo("invalid action [" + invalidAction.getName() + "] defined in phase [hot]")); + } else { + new LifecyclePolicy(lifecycleName, hotPhase); + } + } + + public void testValidateWarmPhase() { + LifecycleAction invalidAction = null; + Map actions = randomSubsetOf(VALID_WARM_ACTIONS) + .stream().map(this::getTestAction).collect(Collectors.toMap(LifecycleAction::getName, Function.identity())); + if (randomBoolean()) { + invalidAction = getTestAction(randomFrom("rollover", "delete")); + actions.put(invalidAction.getName(), invalidAction); + } + Map warmPhase = Collections.singletonMap("warm", + new Phase("warm", TimeValue.ZERO, actions)); + + if (invalidAction != null) { + Exception e = expectThrows(IllegalArgumentException.class, + () -> new LifecyclePolicy(lifecycleName, warmPhase)); + assertThat(e.getMessage(), + equalTo("invalid action [" + invalidAction.getName() + "] defined in phase [warm]")); + } else { + new LifecyclePolicy(lifecycleName, warmPhase); + } + } + + public void testValidateColdPhase() { + LifecycleAction invalidAction = null; + Map actions = randomSubsetOf(VALID_COLD_ACTIONS) + .stream().map(this::getTestAction).collect(Collectors.toMap(LifecycleAction::getName, Function.identity())); + if (randomBoolean()) { + invalidAction = getTestAction(randomFrom("rollover", "delete", "forcemerge", "shrink")); + actions.put(invalidAction.getName(), invalidAction); + } + Map coldPhase = Collections.singletonMap("cold", + new Phase("cold", TimeValue.ZERO, actions)); + + if (invalidAction != null) { + Exception e = expectThrows(IllegalArgumentException.class, + () -> new LifecyclePolicy(lifecycleName, coldPhase)); + assertThat(e.getMessage(), + equalTo("invalid action [" + invalidAction.getName() + "] defined in phase [cold]")); + } else { + new LifecyclePolicy(lifecycleName, coldPhase); + } + } + + public void testValidateDeletePhase() { + LifecycleAction invalidAction = null; + Map actions = VALID_DELETE_ACTIONS + .stream().map(this::getTestAction).collect(Collectors.toMap(LifecycleAction::getName, Function.identity())); + if (randomBoolean()) { + invalidAction = getTestAction(randomFrom("allocate", "rollover", "forcemerge", "shrink")); + actions.put(invalidAction.getName(), invalidAction); + } + Map deletePhase = Collections.singletonMap("delete", + new Phase("delete", TimeValue.ZERO, actions)); + + if (invalidAction != null) { + Exception e = expectThrows(IllegalArgumentException.class, + () -> new LifecyclePolicy(lifecycleName, deletePhase)); + assertThat(e.getMessage(), + equalTo("invalid action [" + invalidAction.getName() + "] defined in phase [delete]")); + } else { + new LifecyclePolicy(lifecycleName, deletePhase); + } + } + + public static LifecyclePolicy createRandomPolicy(String lifecycleName) { + List phaseNames = randomSubsetOf(Arrays.asList("hot", "warm", "cold", "delete")); + Map phases = new HashMap<>(phaseNames.size()); + Function> validActions = (phase) -> { + switch (phase) { + case "hot": + return VALID_HOT_ACTIONS; + case "warm": + return VALID_WARM_ACTIONS; + case "cold": + return VALID_COLD_ACTIONS; + case "delete": + return VALID_DELETE_ACTIONS; + default: + throw new IllegalArgumentException("invalid phase [" + phase + "]"); + }}; + Function randomAction = (action) -> { + switch (action) { + case AllocateAction.NAME: + return AllocateActionTests.randomInstance(); + case DeleteAction.NAME: + return new DeleteAction(); + case ForceMergeAction.NAME: + return ForceMergeActionTests.randomInstance(); + case ReadOnlyAction.NAME: + return new ReadOnlyAction(); + case RolloverAction.NAME: + return RolloverActionTests.randomInstance(); + case ShrinkAction.NAME: + return ShrinkActionTests.randomInstance(); + default: + throw new IllegalArgumentException("invalid action [" + action + "]"); + }}; + for (String phase : phaseNames) { + TimeValue after = TimeValue.parseTimeValue(randomTimeValue(0, 1000000000, "s", "m", "h", "d"), "test_after"); + Map actions = new HashMap<>(); + List actionNames = randomSubsetOf(validActions.apply(phase)); + for (String action : actionNames) { + actions.put(action, randomAction.apply(action)); + } + phases.put(phase, new Phase(phase, after, actions)); + } + return new LifecyclePolicy(lifecycleName, phases); + } + + private LifecycleAction getTestAction(String actionName) { + switch (actionName) { + case AllocateAction.NAME: + return AllocateActionTests.randomInstance(); + case DeleteAction.NAME: + return new DeleteAction(); + case ForceMergeAction.NAME: + return ForceMergeActionTests.randomInstance(); + case ReadOnlyAction.NAME: + return new ReadOnlyAction(); + case RolloverAction.NAME: + return RolloverActionTests.randomInstance(); + case ShrinkAction.NAME: + return ShrinkActionTests.randomInstance(); + default: + throw new IllegalArgumentException("unsupported phase action [" + actionName + "]"); + } + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/OperationModeTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/OperationModeTests.java new file mode 100644 index 0000000000000..27651ba4a8c41 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/OperationModeTests.java @@ -0,0 +1,52 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.CoreMatchers; + +import java.util.EnumSet; + +public class OperationModeTests extends ESTestCase { + + public void testIsValidChange() { + assertFalse(OperationMode.RUNNING.isValidChange(OperationMode.RUNNING)); + assertTrue(OperationMode.RUNNING.isValidChange(OperationMode.STOPPING)); + assertFalse(OperationMode.RUNNING.isValidChange(OperationMode.STOPPED)); + + assertTrue(OperationMode.STOPPING.isValidChange(OperationMode.RUNNING)); + assertFalse(OperationMode.STOPPING.isValidChange(OperationMode.STOPPING)); + assertTrue(OperationMode.STOPPING.isValidChange(OperationMode.STOPPED)); + + assertTrue(OperationMode.STOPPED.isValidChange(OperationMode.RUNNING)); + assertFalse(OperationMode.STOPPED.isValidChange(OperationMode.STOPPING)); + assertFalse(OperationMode.STOPPED.isValidChange(OperationMode.STOPPED)); + } + + public void testFromName() { + EnumSet.allOf(OperationMode.class).forEach(e -> assertEquals(OperationMode.fromString(e.name()), e)); + } + + public void testFromNameInvalid() { + String invalidName = randomAlphaOfLength(10); + Exception e = expectThrows(IllegalArgumentException.class, () -> OperationMode.fromString(invalidName)); + assertThat(e.getMessage(), CoreMatchers.containsString(invalidName + " is not a valid operation_mode")); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/PhaseExecutionInfoTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/PhaseExecutionInfoTests.java new file mode 100644 index 0000000000000..0db9b56aea93c --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/PhaseExecutionInfoTests.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.junit.Before; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class PhaseExecutionInfoTests extends AbstractXContentTestCase { + + static PhaseExecutionInfo randomPhaseExecutionInfo(String phaseName) { + return new PhaseExecutionInfo(randomAlphaOfLength(5), PhaseTests.randomPhase(phaseName), + randomNonNegativeLong(), randomNonNegativeLong()); + } + + String phaseName; + + @Before + public void setupPhaseName() { + phaseName = randomAlphaOfLength(7); + } + + @Override + protected PhaseExecutionInfo createTestInstance() { + return randomPhaseExecutionInfo(phaseName); + } + + @Override + protected PhaseExecutionInfo doParseInstance(XContentParser parser) throws IOException { + return PhaseExecutionInfo.parse(parser, phaseName); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); + entries.add(new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse)); + return new NamedXContentRegistry(entries); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/PhaseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/PhaseTests.java new file mode 100644 index 0000000000000..3b4fc2fec6059 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/PhaseTests.java @@ -0,0 +1,80 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.junit.Before; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +public class PhaseTests extends AbstractXContentTestCase { + private String phaseName; + + @Before + public void setup() { + phaseName = randomAlphaOfLength(20); + } + + @Override + protected Phase createTestInstance() { + return randomPhase(phaseName); + } + + static Phase randomPhase(String phaseName) { + TimeValue after = null; + if (randomBoolean()) { + after = TimeValue.parseTimeValue(randomTimeValue(0, 1000000000, "s", "m", "h", "d"), "test_after"); + } + Map actions = Collections.emptyMap(); + if (randomBoolean()) { + actions = Collections.singletonMap(DeleteAction.NAME, new DeleteAction()); + } + return new Phase(phaseName, after, actions); + } + + @Override + protected Phase doParseInstance(XContentParser parser) { + return Phase.parse(parser, phaseName); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + List entries = new ArrayList<>(ClusterModule.getNamedXWriteables()); + entries.add(new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse)); + return new NamedXContentRegistry(entries); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + public void testDefaultAfter() { + Phase phase = new Phase(randomAlphaOfLength(20), null, Collections.emptyMap()); + assertEquals(TimeValue.ZERO, phase.getMinimumAge()); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/PutLifecyclePolicyRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/PutLifecyclePolicyRequestTests.java new file mode 100644 index 0000000000000..26cfe1946ac4d --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/PutLifecyclePolicyRequestTests.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.test.ESTestCase; + +import static org.elasticsearch.client.indexlifecycle.LifecyclePolicyTests.createRandomPolicy; + +public class PutLifecyclePolicyRequestTests extends ESTestCase { + + private PutLifecyclePolicyRequest createTestInstance() { + return new PutLifecyclePolicyRequest(createRandomPolicy(randomAlphaOfLengthBetween(5, 20))); + } + + public void testValidation() { + PutLifecyclePolicyRequest req = createTestInstance(); + assertFalse(req.validate().isPresent()); + } + + public void testNullPolicy() { + expectThrows(IllegalArgumentException.class, () -> new PutLifecyclePolicyRequest(null)); + } + + public void testNullPolicyName() { + expectThrows(IllegalArgumentException.class, () -> new PutLifecyclePolicyRequest(createRandomPolicy(randomFrom("", null)))); + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ReadOnlyActionTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ReadOnlyActionTests.java new file mode 100644 index 0000000000000..bf57478425cc9 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ReadOnlyActionTests.java @@ -0,0 +1,40 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +public class ReadOnlyActionTests extends AbstractXContentTestCase { + + @Override + protected ReadOnlyAction doParseInstance(XContentParser parser) { + return ReadOnlyAction.parse(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected ReadOnlyAction createTestInstance() { + return new ReadOnlyAction(); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/RemoveIndexLifecyclePolicyRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/RemoveIndexLifecyclePolicyRequestTests.java new file mode 100644 index 0000000000000..d5ccabc748df5 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/RemoveIndexLifecyclePolicyRequestTests.java @@ -0,0 +1,80 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; + +public class RemoveIndexLifecyclePolicyRequestTests extends ESTestCase { + + public void testNullIndices() { + expectThrows(NullPointerException.class, () -> new RemoveIndexLifecyclePolicyRequest(null)); + } + + public void testNullIndicesOptions() { + expectThrows(NullPointerException.class, () -> new RemoveIndexLifecyclePolicyRequest(Collections.emptyList(), null)); + } + + public void testValidate() { + RemoveIndexLifecyclePolicyRequest request = new RemoveIndexLifecyclePolicyRequest(Collections.emptyList()); + assertFalse(request.validate().isPresent()); + } + + protected RemoveIndexLifecyclePolicyRequest createInstance() { + if (randomBoolean()) { + return new RemoveIndexLifecyclePolicyRequest(Arrays.asList(generateRandomStringArray(20, 20, false)), + IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), + randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); + } else { + return new RemoveIndexLifecyclePolicyRequest(Arrays.asList(generateRandomStringArray(20, 20, false))); + } + } + + private RemoveIndexLifecyclePolicyRequest copyInstance(RemoveIndexLifecyclePolicyRequest req) { + return new RemoveIndexLifecyclePolicyRequest(new ArrayList<>(req.indices()), IndicesOptions.fromOptions( + req.indicesOptions().ignoreUnavailable(), req.indicesOptions().allowNoIndices(), + req.indicesOptions().expandWildcardsOpen(), req.indicesOptions().expandWildcardsClosed(), + req.indicesOptions().allowAliasesToMultipleIndices(), req.indicesOptions().forbidClosedIndices(), + req.indicesOptions().ignoreAliases(), req.indicesOptions().ignoreThrottled())); + } + + private RemoveIndexLifecyclePolicyRequest mutateInstance(RemoveIndexLifecyclePolicyRequest req) { + if (randomBoolean()) { + return new RemoveIndexLifecyclePolicyRequest(req.indices(), + randomValueOtherThan(req.indicesOptions(), () -> IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), + randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()))); + } else { + return new RemoveIndexLifecyclePolicyRequest( + randomValueOtherThan(req.indices(), () -> Arrays.asList(generateRandomStringArray(20, 20, false))), + req.indicesOptions()); + } + } + + public void testEqualsAndHashCode() { + for (int count = 0; count < 100; ++count) { + EqualsHashCodeTestUtils.checkEqualsAndHashCode(createInstance(), this::copyInstance, this::mutateInstance); + } + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/RemoveIndexLifecyclePolicyResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/RemoveIndexLifecyclePolicyResponseTests.java new file mode 100644 index 0000000000000..1f99a2dfdfac4 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/RemoveIndexLifecyclePolicyResponseTests.java @@ -0,0 +1,93 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; + +public class RemoveIndexLifecyclePolicyResponseTests extends ESTestCase { + + private void toXContent(RemoveIndexLifecyclePolicyResponse response, XContentBuilder builder) throws IOException { + builder.startObject(); + builder.field(RemoveIndexLifecyclePolicyResponse.HAS_FAILURES_FIELD.getPreferredName(), response.hasFailures()); + builder.field(RemoveIndexLifecyclePolicyResponse.FAILED_INDEXES_FIELD.getPreferredName(), response.getFailedIndexes()); + builder.endObject(); + } + + private RemoveIndexLifecyclePolicyResponse createInstance() { + List failedIndexes = Arrays.asList(generateRandomStringArray(20, 20, false)); + return new RemoveIndexLifecyclePolicyResponse(failedIndexes); + } + + private RemoveIndexLifecyclePolicyResponse copyInstance(RemoveIndexLifecyclePolicyResponse req) { + return new RemoveIndexLifecyclePolicyResponse(new ArrayList<>(req.getFailedIndexes())); + } + + private RemoveIndexLifecyclePolicyResponse mutateInstance(RemoveIndexLifecyclePolicyResponse req) { + return new RemoveIndexLifecyclePolicyResponse(randomValueOtherThan(req.getFailedIndexes(), + () -> Arrays.asList(generateRandomStringArray(20, 20, false)))); + } + + public void testFromXContent() throws IOException { + xContentTester( + this::createParser, + this::createInstance, + this::toXContent, + RemoveIndexLifecyclePolicyResponse::fromXContent) + .supportsUnknownFields(true) + .test(); + } + + public void testNullFailedIndices() { + IllegalArgumentException exception = + expectThrows(IllegalArgumentException.class, () -> new RemoveIndexLifecyclePolicyResponse(null)); + assertEquals("failed_indexes cannot be null", exception.getMessage()); + } + + public void testHasFailures() { + RemoveIndexLifecyclePolicyResponse response = new RemoveIndexLifecyclePolicyResponse(new ArrayList<>()); + assertFalse(response.hasFailures()); + assertEquals(Collections.emptyList(), response.getFailedIndexes()); + + int size = randomIntBetween(1, 10); + List failedIndexes = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + failedIndexes.add(randomAlphaOfLength(20)); + } + response = new RemoveIndexLifecyclePolicyResponse(failedIndexes); + assertTrue(response.hasFailures()); + assertEquals(failedIndexes, response.getFailedIndexes()); + } + + public void testEqualsAndHashCode() { + for (int count = 0; count < 100; ++count) { + EqualsHashCodeTestUtils.checkEqualsAndHashCode(createInstance(), this::copyInstance, this::mutateInstance); + } + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/RolloverActionTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/RolloverActionTests.java new file mode 100644 index 0000000000000..bbbdba37e5640 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/RolloverActionTests.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +public class RolloverActionTests extends AbstractXContentTestCase { + + @Override + protected RolloverAction doParseInstance(XContentParser parser) { + return RolloverAction.parse(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected RolloverAction createTestInstance() { + return randomInstance(); + } + + static RolloverAction randomInstance() { + ByteSizeUnit maxSizeUnit = randomFrom(ByteSizeUnit.values()); + ByteSizeValue maxSize = randomBoolean() ? null : new ByteSizeValue(randomNonNegativeLong() / maxSizeUnit.toBytes(1), maxSizeUnit); + Long maxDocs = randomBoolean() ? null : randomNonNegativeLong(); + TimeValue maxAge = (maxDocs == null && maxSize == null || randomBoolean()) + ? TimeValue.parseTimeValue(randomPositiveTimeValue(), "rollover_action_test") + : null; + return new RolloverAction(maxSize, maxAge, maxDocs); + } + + public void testNoConditions() { + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> new RolloverAction(null, null, null)); + assertEquals("At least one rollover condition must be set.", exception.getMessage()); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ShrinkActionTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ShrinkActionTests.java new file mode 100644 index 0000000000000..adeec1ff825a9 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/ShrinkActionTests.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +import static org.hamcrest.Matchers.equalTo; + +public class ShrinkActionTests extends AbstractXContentTestCase { + + @Override + protected ShrinkAction doParseInstance(XContentParser parser) throws IOException { + return ShrinkAction.parse(parser); + } + + @Override + protected ShrinkAction createTestInstance() { + return randomInstance(); + } + + static ShrinkAction randomInstance() { + return new ShrinkAction(randomIntBetween(1, 100)); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + public void testNonPositiveShardNumber() { + Exception e = expectThrows(Exception.class, () -> new ShrinkAction(randomIntBetween(-100, 0))); + assertThat(e.getMessage(), equalTo("[number_of_shards] must be greater than 0")); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/StartILMRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/StartILMRequestTests.java new file mode 100644 index 0000000000000..449ef7d1678eb --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/StartILMRequestTests.java @@ -0,0 +1,40 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; + +public class StartILMRequestTests extends ESTestCase { + + protected StartILMRequest createTestInstance() { + return new StartILMRequest(); + } + + public void testValidate() { + StartILMRequest request = createTestInstance(); + assertFalse(request.validate().isPresent()); + } + + public void testEqualsAndHashcode() { + EqualsHashCodeTestUtils.checkEqualsAndHashCode(createTestInstance(), (original) -> createTestInstance()); + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/StopILMRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/StopILMRequestTests.java new file mode 100644 index 0000000000000..f1618f3f0f0e3 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indexlifecycle/StopILMRequestTests.java @@ -0,0 +1,40 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.indexlifecycle; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; + +public class StopILMRequestTests extends ESTestCase { + + protected StopILMRequest createTestInstance() { + return new StopILMRequest(); + } + + public void testValidate() { + StopILMRequest request = createTestInstance(); + assertFalse(request.validate().isPresent()); + } + + public void testEqualsAndHashcode() { + EqualsHashCodeTestUtils.checkEqualsAndHashCode(createTestInstance(), (original) -> createTestInstance()); + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/license/StartBasicResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/license/StartBasicResponseTests.java deleted file mode 100644 index 8370a6ba9afed..0000000000000 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/license/StartBasicResponseTests.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.client.license; - -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.client.common.ProtocolUtils; -import org.elasticsearch.test.ESTestCase; - -import java.io.IOException; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; - -import static org.hamcrest.CoreMatchers.equalTo; - -public class StartBasicResponseTests extends ESTestCase { - - public void testFromXContent() throws Exception { - StartBasicResponse.Status status = randomFrom(StartBasicResponse.Status.values()); - - boolean acknowledged = status != StartBasicResponse.Status.NEED_ACKNOWLEDGEMENT; - String acknowledgeMessage = null; - Map ackMessages = Collections.emptyMap(); - if (status != StartBasicResponse.Status.GENERATED_BASIC) { - acknowledgeMessage = randomAlphaOfLength(10); - ackMessages = randomAckMessages(); - } - - final StartBasicResponse startBasicResponse = new StartBasicResponse(status, ackMessages, acknowledgeMessage); - - XContentType xContentType = randomFrom(XContentType.values()); - XContentBuilder builder = XContentFactory.contentBuilder(xContentType); - - toXContent(startBasicResponse, builder); - - final StartBasicResponse response = StartBasicResponse.fromXContent(createParser(builder)); - assertThat(response.isAcknowledged(), equalTo(acknowledged)); - assertThat(response.isBasicStarted(), equalTo(status.isBasicStarted())); - assertThat(response.getAcknowledgeMessage(), equalTo(acknowledgeMessage)); - assertThat(ProtocolUtils.equals(response.getAcknowledgeMessages(), ackMessages), equalTo(true)); - } - - private static void toXContent(StartBasicResponse response, XContentBuilder builder) throws IOException { - builder.startObject(); - builder.field("acknowledged", response.isAcknowledged()); - if (response.isBasicStarted()) { - builder.field("basic_was_started", true); - } else { - builder.field("basic_was_started", false); - builder.field("error_message", response.getErrorMessage()); - } - if (response.getAcknowledgeMessages().isEmpty() == false) { - builder.startObject("acknowledge"); - builder.field("message", response.getAcknowledgeMessage()); - for (Map.Entry entry : response.getAcknowledgeMessages().entrySet()) { - builder.startArray(entry.getKey()); - for (String message : entry.getValue()) { - builder.value(message); - } - builder.endArray(); - } - builder.endObject(); - } - builder.endObject(); - } - - private static Map randomAckMessages() { - int nFeatures = randomIntBetween(1, 5); - - Map ackMessages = new HashMap<>(); - - for (int i = 0; i < nFeatures; i++) { - String feature = randomAlphaOfLengthBetween(9, 15); - int nMessages = randomIntBetween(1, 5); - String[] messages = new String[nMessages]; - for (int j = 0; j < nMessages; j++) { - messages[j] = randomAlphaOfLengthBetween(10, 30); - } - ackMessages.put(feature, messages); - } - - return ackMessages; - } - -} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/migration/IndexUpgradeInfoRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/migration/IndexUpgradeInfoRequestTests.java index 2375077220116..86250fdaec274 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/migration/IndexUpgradeInfoRequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/migration/IndexUpgradeInfoRequestTests.java @@ -23,8 +23,6 @@ public class IndexUpgradeInfoRequestTests extends ESTestCase { - // TODO: add to cross XPack-HLRC serialization test - public void testNullIndices() { expectThrows(NullPointerException.class, () -> new IndexUpgradeInfoRequest((String[])null)); expectThrows(NullPointerException.class, () -> new IndexUpgradeInfoRequest().indices((String[])null)); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutFilterRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutFilterRequestTests.java new file mode 100644 index 0000000000000..6b39d81f171ca --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutFilterRequestTests.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.config.MlFilter; +import org.elasticsearch.client.ml.job.config.MlFilterTests; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + + +public class PutFilterRequestTests extends AbstractXContentTestCase { + + @Override + protected PutFilterRequest createTestInstance() { + return new PutFilterRequest(MlFilterTests.createRandom()); + } + + @Override + protected PutFilterRequest doParseInstance(XContentParser parser) { + return new PutFilterRequest(MlFilter.PARSER.apply(parser, null).build()); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutFilterResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutFilterResponseTests.java new file mode 100644 index 0000000000000..29eda47598b2f --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutFilterResponseTests.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.config.MlFilterTests; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class PutFilterResponseTests extends AbstractXContentTestCase { + + @Override + protected PutFilterResponse createTestInstance() { + return new PutFilterResponse(MlFilterTests.createRandom()); + } + + @Override + protected PutFilterResponse doParseInstance(XContentParser parser) throws IOException { + return PutFilterResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/DeleteRollupJobRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/DeleteRollupJobRequestTests.java new file mode 100644 index 0000000000000..c1271207d41bc --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/DeleteRollupJobRequestTests.java @@ -0,0 +1,56 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.rollup; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.junit.Before; + +import java.io.IOException; + +public class DeleteRollupJobRequestTests extends AbstractXContentTestCase { + + private String jobId; + + @Before + public void setUpOptionalId() { + jobId = randomAlphaOfLengthBetween(1, 10); + } + + @Override + protected DeleteRollupJobRequest createTestInstance() { + return new DeleteRollupJobRequest(jobId); + } + + @Override + protected DeleteRollupJobRequest doParseInstance(final XContentParser parser) throws IOException { + return DeleteRollupJobRequest.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + public void testRequireConfiguration() { + final NullPointerException e = expectThrows(NullPointerException.class, ()-> new DeleteRollupJobRequest(null)); + assertEquals("id parameter must not be null", e.getMessage()); + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/DeleteRollupJobResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/DeleteRollupJobResponseTests.java new file mode 100644 index 0000000000000..1dc02ff386de0 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/rollup/DeleteRollupJobResponseTests.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.rollup; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; +import org.junit.Before; + +import java.io.IOException; + +public class DeleteRollupJobResponseTests extends AbstractXContentTestCase { + + private boolean acknowledged; + + @Before + public void setupJobID() { + acknowledged = randomBoolean(); + } + + @Override + protected DeleteRollupJobResponse createTestInstance() { + return new DeleteRollupJobResponse(acknowledged); + } + + @Override + protected DeleteRollupJobResponse doParseInstance(XContentParser parser) throws IOException { + return DeleteRollupJobResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/AuthenticateResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/AuthenticateResponseTests.java new file mode 100644 index 0000000000000..ce813f5ecf59c --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/AuthenticateResponseTests.java @@ -0,0 +1,128 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.client.security.user.User; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; + +public class AuthenticateResponseTests extends ESTestCase { + + public void testFromXContent() throws IOException { + xContentTester( + this::createParser, + this::createTestInstance, + this::toXContent, + AuthenticateResponse::fromXContent) + .supportsUnknownFields(false) + .test(); + } + + public void testEqualsAndHashCode() { + final AuthenticateResponse reponse = createTestInstance(); + EqualsHashCodeTestUtils.checkEqualsAndHashCode(reponse, this::copy, + this::mutate); + } + + protected AuthenticateResponse createTestInstance() { + final String username = randomAlphaOfLengthBetween(1, 4); + final List roles = Arrays.asList(generateRandomStringArray(4, 4, false, true)); + final Map metadata; + metadata = new HashMap<>(); + if (randomBoolean()) { + metadata.put("string", null); + } else { + metadata.put("string", randomAlphaOfLengthBetween(0, 4)); + } + if (randomBoolean()) { + metadata.put("string_list", null); + } else { + metadata.put("string_list", Arrays.asList(generateRandomStringArray(4, 4, false, true))); + } + final String fullName = randomFrom(random(), null, randomAlphaOfLengthBetween(0, 4)); + final String email = randomFrom(random(), null, randomAlphaOfLengthBetween(0, 4)); + final boolean enabled = randomBoolean(); + return new AuthenticateResponse(new User(username, roles, metadata, fullName, email), enabled); + } + + private void toXContent(AuthenticateResponse response, XContentBuilder builder) throws IOException { + final User user = response.getUser(); + final boolean enabled = response.enabled(); + builder.startObject(); + builder.field(AuthenticateResponse.USERNAME.getPreferredName(), user.username()); + builder.field(AuthenticateResponse.ROLES.getPreferredName(), user.roles()); + builder.field(AuthenticateResponse.METADATA.getPreferredName(), user.metadata()); + if (user.fullName() != null) { + builder.field(AuthenticateResponse.FULL_NAME.getPreferredName(), user.fullName()); + } + if (user.email() != null) { + builder.field(AuthenticateResponse.EMAIL.getPreferredName(), user.email()); + } + builder.field(AuthenticateResponse.ENABLED.getPreferredName(), enabled); + builder.endObject(); + } + + private AuthenticateResponse copy(AuthenticateResponse response) { + final User originalUser = response.getUser(); + final User copyUser = new User(originalUser.username(), originalUser.roles(), originalUser.metadata(), originalUser.fullName(), + originalUser.email()); + return new AuthenticateResponse(copyUser, response.enabled()); + } + + private AuthenticateResponse mutate(AuthenticateResponse response) { + final User originalUser = response.getUser(); + switch (randomIntBetween(1, 6)) { + case 1: + return new AuthenticateResponse(new User(originalUser.username() + "wrong", originalUser.roles(), originalUser.metadata(), + originalUser.fullName(), originalUser.email()), response.enabled()); + case 2: + final Collection wrongRoles = new ArrayList<>(originalUser.roles()); + wrongRoles.add(randomAlphaOfLengthBetween(1, 4)); + return new AuthenticateResponse(new User(originalUser.username(), wrongRoles, originalUser.metadata(), + originalUser.fullName(), originalUser.email()), response.enabled()); + case 3: + final Map wrongMetadata = new HashMap<>(originalUser.metadata()); + wrongMetadata.put("wrong_string", randomAlphaOfLengthBetween(0, 4)); + return new AuthenticateResponse(new User(originalUser.username(), originalUser.roles(), wrongMetadata, + originalUser.fullName(), originalUser.email()), response.enabled()); + case 4: + return new AuthenticateResponse(new User(originalUser.username(), originalUser.roles(), originalUser.metadata(), + originalUser.fullName() + "wrong", originalUser.email()), response.enabled()); + case 5: + return new AuthenticateResponse(new User(originalUser.username(), originalUser.roles(), originalUser.metadata(), + originalUser.fullName(), originalUser.email() + "wrong"), response.enabled()); + case 6: + return new AuthenticateResponse(new User(originalUser.username(), originalUser.roles(), originalUser.metadata(), + originalUser.fullName(), originalUser.email()), !response.enabled()); + } + throw new IllegalStateException("Bad random number"); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/ClearRealmCacheResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/ClearRealmCacheResponseTests.java new file mode 100644 index 0000000000000..d21ed1a71a0c4 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/ClearRealmCacheResponseTests.java @@ -0,0 +1,75 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +public class ClearRealmCacheResponseTests extends ESTestCase { + + public void testParseFromXContent() throws IOException { + final ElasticsearchException exception = new ElasticsearchException("test"); + final String nodesHeader = "\"_nodes\": { \"total\": 2, \"successful\": 1, \"failed\": 1, \"failures\": [ " + + Strings.toString(exception) + "] },"; + final String clusterName = "\"cluster_name\": \"cn\","; + try (XContentParser parser = JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, "{" + nodesHeader + clusterName + "\"nodes\" : {} }")) { + + ClearRealmCacheResponse response = ClearRealmCacheResponse.fromXContent(parser); + assertNotNull(response); + assertThat(response.getNodes(), empty()); + assertThat(response.getClusterName(), equalTo("cn")); + assertThat(response.getHeader().getSuccessful(), equalTo(1)); + assertThat(response.getHeader().getFailed(), equalTo(1)); + assertThat(response.getHeader().getTotal(), equalTo(2)); + assertThat(response.getHeader().getFailures(), hasSize(1)); + assertThat(response.getHeader().getFailures().get(0).getMessage(), containsString("reason=test")); + } + + try (XContentParser parser = JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + "{" + nodesHeader + clusterName + "\"nodes\" : { " + + "\"id1\": { \"name\": \"a\"}, " + + "\"id2\": { \"name\": \"b\"}" + + "}}")) { + + ClearRealmCacheResponse response = ClearRealmCacheResponse.fromXContent(parser); + assertNotNull(response); + assertThat(response.getNodes(), hasSize(2)); + assertThat(response.getNodes().get(0).getId(), equalTo("id1")); + assertThat(response.getNodes().get(0).getName(), equalTo("a")); + assertThat(response.getNodes().get(1).getId(), equalTo("id2")); + assertThat(response.getNodes().get(1).getName(), equalTo("b")); + } + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/xpack/XPackInfoResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/xpack/XPackInfoResponseTests.java new file mode 100644 index 0000000000000..702c4bef64bd2 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/xpack/XPackInfoResponseTests.java @@ -0,0 +1,116 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.xpack; + +import org.elasticsearch.client.license.LicenseStatus; +import org.elasticsearch.client.xpack.XPackInfoResponse.BuildInfo; +import org.elasticsearch.client.xpack.XPackInfoResponse.FeatureSetsInfo; +import org.elasticsearch.client.xpack.XPackInfoResponse.FeatureSetsInfo.FeatureSet; +import org.elasticsearch.client.xpack.XPackInfoResponse.LicenseInfo; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.function.Predicate; + +public class XPackInfoResponseTests extends AbstractXContentTestCase { + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + protected XPackInfoResponse doParseInstance(XContentParser parser) throws IOException { + return XPackInfoResponse.fromXContent(parser); + } + + protected Predicate getRandomFieldsExcludeFilter() { + return path -> path.equals("features") + || (path.startsWith("features") && path.endsWith("native_code_info")); + } + + protected ToXContent.Params getToXContentParams() { + Map params = new HashMap<>(); + if (randomBoolean()) { + params.put("human", randomBoolean() ? "true" : "false"); + } + if (randomBoolean()) { + params.put("categories", "_none"); + } + return new ToXContent.MapParams(params); + } + + protected XPackInfoResponse createTestInstance() { + return new XPackInfoResponse( + randomBoolean() ? null : randomBuildInfo(), + randomBoolean() ? null : randomLicenseInfo(), + randomBoolean() ? null : randomFeatureSetsInfo()); + } + + private BuildInfo randomBuildInfo() { + return new BuildInfo( + randomAlphaOfLength(10), + randomAlphaOfLength(15)); + } + + private LicenseInfo randomLicenseInfo() { + return new LicenseInfo( + randomAlphaOfLength(10), + randomAlphaOfLength(4), + randomAlphaOfLength(5), + randomFrom(LicenseStatus.values()), + randomLong()); + } + + private FeatureSetsInfo randomFeatureSetsInfo() { + int size = between(0, 10); + Set featureSets = new HashSet<>(size); + while (featureSets.size() < size) { + featureSets.add(randomFeatureSet()); + } + return new FeatureSetsInfo(featureSets); + } + + private FeatureSet randomFeatureSet() { + return new FeatureSet( + randomAlphaOfLength(5), + randomBoolean() ? null : randomAlphaOfLength(20), + randomBoolean(), + randomBoolean(), + randomNativeCodeInfo()); + } + + private Map randomNativeCodeInfo() { + if (randomBoolean()) { + return null; + } + int size = between(0, 10); + Map nativeCodeInfo = new HashMap<>(size); + while (nativeCodeInfo.size() < size) { + nativeCodeInfo.put(randomAlphaOfLength(5), randomAlphaOfLength(5)); + } + return nativeCodeInfo; + } +} diff --git a/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java b/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java index 90801715b7e20..9191f5025581b 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java +++ b/client/rest/src/test/java/org/elasticsearch/client/documentation/RestClientDocumentation.java @@ -36,7 +36,7 @@ import org.apache.http.ssl.SSLContextBuilder; import org.apache.http.ssl.SSLContexts; import org.apache.http.util.EntityUtils; -import org.elasticsearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory; +import org.elasticsearch.client.HttpAsyncResponseConsumerFactory; import org.elasticsearch.client.Node; import org.elasticsearch.client.NodeSelector; import org.elasticsearch.client.Request; @@ -84,7 +84,8 @@ public class RestClientDocumentation { RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder(); builder.addHeader("Authorization", "Bearer " + TOKEN); // <1> builder.setHttpAsyncResponseConsumerFactory( // <2> - new HeapBufferedResponseConsumerFactory(30 * 1024 * 1024 * 1024)); + new HttpAsyncResponseConsumerFactory + .HeapBufferedResponseConsumerFactory(30 * 1024 * 1024 * 1024)); COMMON_OPTIONS = builder.build(); } // end::rest-client-options-singleton diff --git a/dev-tools/es_release_notes.pl b/dev-tools/es_release_notes.pl index 4ea7e124598ec..cf19a1cd9ddf5 100755 --- a/dev-tools/es_release_notes.pl +++ b/dev-tools/es_release_notes.pl @@ -32,7 +32,7 @@ ">enhancement", ">bug", ">regression", ">upgrade" ); my %Ignore = map { $_ => 1 } - ( ">non-issue", ">refactoring", ">docs", ">test", ">test-failure", ":Core/Build" ); + ( ">non-issue", ">refactoring", ">docs", ">test", ">test-failure", ":Core/Build", "backport" ); my %Group_Labels = ( '>breaking' => 'Breaking changes', diff --git a/distribution/bwc/maintenance-bugfix-snapshot/build.gradle b/distribution/bwc/bugfix/build.gradle similarity index 100% rename from distribution/bwc/maintenance-bugfix-snapshot/build.gradle rename to distribution/bwc/bugfix/build.gradle diff --git a/distribution/bwc/build.gradle b/distribution/bwc/build.gradle index f1b2ae01fda84..c466a6ec03da7 100644 --- a/distribution/bwc/build.gradle +++ b/distribution/bwc/build.gradle @@ -17,236 +17,225 @@ * under the License. */ - - import org.apache.tools.ant.taskdefs.condition.Os import org.elasticsearch.gradle.LoggedExec import org.elasticsearch.gradle.Version +import org.elasticsearch.gradle.VersionCollection import java.nio.charset.StandardCharsets import static org.elasticsearch.gradle.BuildPlugin.getJavaHome + /** - * This is a dummy project which does a local checkout of the previous - * version's branch, and builds a snapshot. This allows backcompat - * tests to test against the next unreleased version, closest to this version, - * without relying on snapshots. + * We want to be able to do BWC tests for unreleased versions without relying on and waiting for snapshots. + * For this we need to check out and build the unreleased versions. + * Since These depend on the current version, we can't name the Gradle projects statically, and don't know what the + * unreleased versions are when Gradle projects are set up, so we use "build-unreleased-version-*" as placeholders + * and configure them to build various versions here. */ -subprojects { - - Version bwcVersion = bwcVersions.getSnapshotForProject(project.name) - if (bwcVersion == null) { - // this project wont do anything - return - } - - String bwcBranch - if (project.name == 'next-minor-snapshot') { - // this is always a .x series - bwcBranch = "${bwcVersion.major}.x" - } else { - bwcBranch = "${bwcVersion.major}.${bwcVersion.minor}" - } - - apply plugin: 'distribution' - // Not published so no need to assemble - assemble.enabled = false - assemble.dependsOn.remove('buildBwcVersion') - - File checkoutDir = file("${buildDir}/bwc/checkout-${bwcBranch}") - - final String remote = System.getProperty("tests.bwc.remote", "elastic") - - final boolean gitFetchLatest - final String gitFetchLatestProperty = System.getProperty("tests.bwc.git_fetch_latest", "true") - if ("true".equals(gitFetchLatestProperty)) { - gitFetchLatest = true - } else if ("false".equals(gitFetchLatestProperty)) { - gitFetchLatest = false - } else { - throw new GradleException("tests.bwc.git_fetch_latest must be [true] or [false] but was [" + gitFetchLatestProperty + "]") - } - - task createClone(type: LoggedExec) { - onlyIf { checkoutDir.exists() == false } - commandLine = ['git', 'clone', rootDir, checkoutDir] - } - - task findRemote(type: LoggedExec) { - dependsOn createClone - workingDir = checkoutDir - commandLine = ['git', 'remote', '-v'] - ByteArrayOutputStream output = new ByteArrayOutputStream() - standardOutput = output - doLast { - project.ext.remoteExists = false - output.toString('UTF-8').eachLine { - if (it.contains("${remote}\t")) { - project.ext.remoteExists = true - } - } - } - } - - task addRemote(type: LoggedExec) { - dependsOn findRemote - onlyIf { project.ext.remoteExists == false } - workingDir = checkoutDir - commandLine = ['git', 'remote', 'add', "${remote}", "https://github.com/${remote}/elasticsearch.git"] - } - - task fetchLatest(type: LoggedExec) { - onlyIf { project.gradle.startParameter.isOffline() == false && gitFetchLatest } - dependsOn addRemote - workingDir = checkoutDir - commandLine = ['git', 'fetch', '--all'] - } - - String buildMetadataKey = "bwc_refspec_${project.path.substring(1)}" - task checkoutBwcBranch(type: LoggedExec) { - String refspec = System.getProperty("tests.bwc.refspec.${bwcBranch}", buildMetadata.get(buildMetadataKey, "${remote}/${bwcBranch}")) - dependsOn fetchLatest - workingDir = checkoutDir - commandLine = ['git', 'checkout', refspec] - doFirst { - println "Checking out elasticsearch ${refspec} for branch ${bwcBranch}" - } - } - - File buildMetadataFile = project.file("build/${project.name}/build_metadata") - task writeBuildMetadata(type: LoggedExec) { - dependsOn checkoutBwcBranch - workingDir = checkoutDir - commandLine = ['git', 'rev-parse', 'HEAD'] - ignoreExitValue = true - ByteArrayOutputStream output = new ByteArrayOutputStream() - standardOutput = output - doLast { - if (execResult.exitValue != 0) { - output.toString('UTF-8').eachLine { line -> logger.error(line) } - execResult.assertNormalExitValue() - } - project.mkdir(buildMetadataFile.parent) - String commit = output.toString('UTF-8') - buildMetadataFile.setText("${buildMetadataKey}=${commit}", 'UTF-8') - println "Checked out elasticsearch commit ${commit}" +bwcVersions.forPreviousUnreleased { VersionCollection.UnreleasedVersionInfo unreleasedVersion -> project("${project.path}:${unreleasedVersion.gradleProjectName}") { + Version bwcVersion = unreleasedVersion.version + String bwcBranch = unreleasedVersion.branch + apply plugin: 'distribution' + // Not published so no need to assemble + assemble.enabled = false + assemble.dependsOn.remove('buildBwcVersion') + + File checkoutDir = file("${buildDir}/bwc/checkout-${bwcBranch}") + + final String remote = System.getProperty("tests.bwc.remote", "elastic") + + final boolean gitFetchLatest + final String gitFetchLatestProperty = System.getProperty("tests.bwc.git_fetch_latest", "true") + if ("true".equals(gitFetchLatestProperty)) { + gitFetchLatest = true + } else if ("false".equals(gitFetchLatestProperty)) { + gitFetchLatest = false + } else { + throw new GradleException("tests.bwc.git_fetch_latest must be [true] or [false] but was [" + gitFetchLatestProperty + "]") } - } - - List artifactFiles = [] - List projectDirs = [] - for (String project : ['zip', 'deb', 'rpm']) { - String baseDir = "distribution" - if (bwcVersion.onOrAfter('6.3.0')) { - baseDir += project == 'zip' ? '/archives' : '/packages' - // add oss variant first - projectDirs.add("${baseDir}/oss-${project}") - artifactFiles.add(file("${checkoutDir}/${baseDir}/oss-${project}/build/distributions/elasticsearch-oss-${bwcVersion}.${project}")) + + task createClone(type: LoggedExec) { + onlyIf { checkoutDir.exists() == false } + commandLine = ['git', 'clone', rootDir, checkoutDir] } - projectDirs.add("${baseDir}/${project}") - artifactFiles.add(file("${checkoutDir}/${baseDir}/${project}/build/distributions/elasticsearch-${bwcVersion}.${project}")) - } - - task buildBwcVersion(type: Exec) { - dependsOn checkoutBwcBranch, writeBuildMetadata - workingDir = checkoutDir - doFirst { - // Execution time so that the checkouts are available - List lines = file("$checkoutDir/.ci/java-versions.properties").readLines() - environment( - 'JAVA_HOME', - getJavaHome(it, Integer.parseInt( - lines - .findAll({ it.startsWith("ES_BUILD_JAVA=java") }) - .collect({ it.replace("ES_BUILD_JAVA=java", "").trim() }) - .join("!!") - )) - ) - environment( - 'RUNTIME_JAVA_HOME', - getJavaHome(it, Integer.parseInt( - lines - .findAll({ it.startsWith("ES_RUNTIME_JAVA=java") }) - .collect({ it.replace("ES_RUNTIME_JAVA=java", "").trim() }) - .join("!!") - )) - ) + + task findRemote(type: LoggedExec) { + dependsOn createClone + workingDir = checkoutDir + commandLine = ['git', 'remote', '-v'] + ByteArrayOutputStream output = new ByteArrayOutputStream() + standardOutput = output + doLast { + project.ext.remoteExists = false + output.toString('UTF-8').eachLine { + if (it.contains("${remote}\t")) { + project.ext.remoteExists = true + } + } + } } - if (Os.isFamily(Os.FAMILY_WINDOWS)) { - executable 'cmd' - args '/C', 'call', new File(checkoutDir, 'gradlew').toString() - } else { - executable new File(checkoutDir, 'gradlew').toString() + task addRemote(type: LoggedExec) { + dependsOn findRemote + onlyIf { project.ext.remoteExists == false } + workingDir = checkoutDir + commandLine = ['git', 'remote', 'add', "${remote}", "https://github.com/${remote}/elasticsearch.git"] } - if (gradle.startParameter.isOffline()) { - args "--offline" + + task fetchLatest(type: LoggedExec) { + onlyIf { project.gradle.startParameter.isOffline() == false && gitFetchLatest } + dependsOn addRemote + workingDir = checkoutDir + commandLine = ['git', 'fetch', '--all'] } - for (String dir : projectDirs) { - args ":${dir.replace('/', ':')}:assemble" + + String buildMetadataKey = "bwc_refspec_${project.path.substring(1)}" + task checkoutBwcBranch(type: LoggedExec) { + String refspec = System.getProperty("tests.bwc.refspec.${bwcBranch}", buildMetadata.get(buildMetadataKey, "${remote}/${bwcBranch}")) + dependsOn fetchLatest + workingDir = checkoutDir + commandLine = ['git', 'checkout', refspec] + doFirst { + println "Checking out elasticsearch ${refspec} for branch ${bwcBranch}" + } } - args "-Dbuild.snapshot=true" - final LogLevel logLevel = gradle.startParameter.logLevel - if ([LogLevel.QUIET, LogLevel.WARN, LogLevel.INFO, LogLevel.DEBUG].contains(logLevel)) { - args "--${logLevel.name().toLowerCase(Locale.ENGLISH)}" + + File buildMetadataFile = project.file("build/${project.name}/build_metadata") + task writeBuildMetadata(type: LoggedExec) { + dependsOn checkoutBwcBranch + workingDir = checkoutDir + commandLine = ['git', 'rev-parse', 'HEAD'] + ignoreExitValue = true + ByteArrayOutputStream output = new ByteArrayOutputStream() + standardOutput = output + doLast { + if (execResult.exitValue != 0) { + output.toString('UTF-8').eachLine { line -> logger.error(line) } + execResult.assertNormalExitValue() + } + project.mkdir(buildMetadataFile.parent) + String commit = output.toString('UTF-8') + buildMetadataFile.setText("${buildMetadataKey}=${commit}", 'UTF-8') + println "Checked out elasticsearch commit ${commit}" + } } - final String showStacktraceName = gradle.startParameter.showStacktrace.name() - assert ["INTERNAL_EXCEPTIONS", "ALWAYS", "ALWAYS_FULL"].contains(showStacktraceName) - if (showStacktraceName.equals("ALWAYS")) { - args "--stacktrace" - } else if (showStacktraceName.equals("ALWAYS_FULL")) { - args "--full-stacktrace" + + List artifactFiles = [] + List projectDirs = [] + for (String project : ['zip', 'deb', 'rpm']) { + String baseDir = "distribution" + if (bwcVersion.onOrAfter('6.3.0')) { + baseDir += project == 'zip' ? '/archives' : '/packages' + // add oss variant first + projectDirs.add("${baseDir}/oss-${project}") + artifactFiles.add(file("${checkoutDir}/${baseDir}/oss-${project}/build/distributions/elasticsearch-oss-${bwcVersion}-SNAPSHOT.${project}")) + } + projectDirs.add("${baseDir}/${project}") + artifactFiles.add(file("${checkoutDir}/${baseDir}/${project}/build/distributions/elasticsearch-${bwcVersion}-SNAPSHOT.${project}")) } - standardOutput = new IndentingOutputStream(System.out) - errorOutput = new IndentingOutputStream(System.err) - doLast { - List missing = artifactFiles.grep { file -> - false == file.exists() - } - if (false == missing.empty) { - throw new InvalidUserDataException( - "Building bwc version didn't generate expected files ${missing}") - } + + task buildBwcVersion(type: Exec) { + dependsOn checkoutBwcBranch, writeBuildMetadata + workingDir = checkoutDir + doFirst { + // Execution time so that the checkouts are available + List lines = file("${checkoutDir}/.ci/java-versions.properties").readLines() + environment( + 'JAVA_HOME', + getJavaHome(it, Integer.parseInt( + lines + .findAll({ it.startsWith("ES_BUILD_JAVA=java") }) + .collect({ it.replace("ES_BUILD_JAVA=java", "").trim() }) + .join("!!") + )) + ) + environment( + 'RUNTIME_JAVA_HOME', + getJavaHome(it, Integer.parseInt( + lines + .findAll({ it.startsWith("ES_RUNTIME_JAVA=java") }) + .collect({ it.replace("ES_RUNTIME_JAVA=java", "").trim() }) + .join("!!") + )) + ) + } + + if (Os.isFamily(Os.FAMILY_WINDOWS)) { + executable 'cmd' + args '/C', 'call', new File(checkoutDir, 'gradlew').toString() + } else { + executable new File(checkoutDir, 'gradlew').toString() + } + if (gradle.startParameter.isOffline()) { + args "--offline" + } + for (String dir : projectDirs) { + args ":${dir.replace('/', ':')}:assemble" + } + args "-Dbuild.snapshot=true" + final LogLevel logLevel = gradle.startParameter.logLevel + if ([LogLevel.QUIET, LogLevel.WARN, LogLevel.INFO, LogLevel.DEBUG].contains(logLevel)) { + args "--${logLevel.name().toLowerCase(Locale.ENGLISH)}" + } + final String showStacktraceName = gradle.startParameter.showStacktrace.name() + assert ["INTERNAL_EXCEPTIONS", "ALWAYS", "ALWAYS_FULL"].contains(showStacktraceName) + if (showStacktraceName.equals("ALWAYS")) { + args "--stacktrace" + } else if (showStacktraceName.equals("ALWAYS_FULL")) { + args "--full-stacktrace" + } + standardOutput = new IndentingOutputStream(System.out, bwcVersion) + errorOutput = new IndentingOutputStream(System.err, bwcVersion) + doLast { + List missing = artifactFiles.grep { file -> + false == file.exists() + } + if (false == missing.empty) { + throw new InvalidUserDataException( + "Building ${bwcVersion} didn't generate expected files ${missing}") + } + } } - } - - if (gradle.startParameter.taskNames == ["assemble"]) { - // Gradle needs the `artifacts` declaration, including `builtBy` bellow to make projects dependencies on this - // project work, but it will also trigger the build of these for the `assemble` task. - // Since these are only used for testing, we don't want to assemble them if `assemble` is the single command being - // ran. - logger.info("Skipping BWC builds since `assemble` is the only task name provided on the command line") - } else { - artifacts { - for (File artifactFile : artifactFiles) { - String artifactName = artifactFile.name.contains('oss') ? 'elasticsearch-oss' : 'elasticsearch' - String suffix = artifactFile.toString()[-3..-1] - 'default' file: artifactFile, name: artifactName, type: suffix, builtBy: buildBwcVersion - } + + if (gradle.startParameter.taskNames == ["assemble"]) { + // Gradle needs the `artifacts` declaration, including `builtBy` bellow to make projects dependencies on this + // project work, but it will also trigger the build of these for the `assemble` task. + // Since these are only used for testing, we don't want to assemble them if `assemble` is the single command being + // ran. + logger.info("Skipping BWC builds since `assemble` is the only task name provided on the command line") + } else { + artifacts { + for (File artifactFile : artifactFiles) { + String artifactName = artifactFile.name.contains('oss') ? 'elasticsearch-oss' : 'elasticsearch' + String suffix = artifactFile.toString()[-3..-1] + 'default' file: artifactFile, name: artifactName, type: suffix, builtBy: buildBwcVersion + } + } } - } -} +}} class IndentingOutputStream extends OutputStream { - public static final byte[] INDENT = " [bwc] ".getBytes(StandardCharsets.UTF_8) - private final OutputStream delegate + public final byte[] indent + private final OutputStream delegate - public IndentingOutputStream(OutputStream delegate) { - this.delegate = delegate - } + public IndentingOutputStream(OutputStream delegate, Object version) { + this.delegate = delegate + indent = " [${version}] ".getBytes(StandardCharsets.UTF_8) + } - @Override - public void write(int b) { - write([b] as int[], 0, 1) - } + @Override + public void write(int b) { + write([b] as int[], 0, 1) + } - public void write(int[] bytes, int offset, int length) { - for (int i = 0; i < bytes.length; i++) { - delegate.write(bytes[i]) - if (bytes[i] == '\n') { - delegate.write(INDENT) - } + public void write(int[] bytes, int offset, int length) { + for (int i = 0; i < bytes.length; i++) { + delegate.write(bytes[i]) + if (bytes[i] == '\n') { + delegate.write(indent) + } + } } - } } diff --git a/distribution/bwc/next-bugfix-snapshot/build.gradle b/distribution/bwc/maintenance/build.gradle similarity index 100% rename from distribution/bwc/next-bugfix-snapshot/build.gradle rename to distribution/bwc/maintenance/build.gradle diff --git a/distribution/bwc/next-minor-snapshot/build.gradle b/distribution/bwc/minor/build.gradle similarity index 100% rename from distribution/bwc/next-minor-snapshot/build.gradle rename to distribution/bwc/minor/build.gradle diff --git a/distribution/bwc/staged-minor-snapshot/build.gradle b/distribution/bwc/staged/build.gradle similarity index 100% rename from distribution/bwc/staged-minor-snapshot/build.gradle rename to distribution/bwc/staged/build.gradle diff --git a/distribution/src/bin/elasticsearch-service-mgr.exe b/distribution/src/bin/elasticsearch-service-mgr.exe index 730240403a7da..e5d4b55d91628 100644 Binary files a/distribution/src/bin/elasticsearch-service-mgr.exe and b/distribution/src/bin/elasticsearch-service-mgr.exe differ diff --git a/distribution/src/bin/elasticsearch-service-x64.exe b/distribution/src/bin/elasticsearch-service-x64.exe index dab7def75583f..acd94f2507615 100644 Binary files a/distribution/src/bin/elasticsearch-service-x64.exe and b/distribution/src/bin/elasticsearch-service-x64.exe differ diff --git a/docs/Versions.asciidoc b/docs/Versions.asciidoc index 2981bce61899e..92c065fd3d4c7 100644 --- a/docs/Versions.asciidoc +++ b/docs/Versions.asciidoc @@ -1,7 +1,7 @@ :version: 6.6.0 :major-version: 6.x -:lucene_version: 7.5.0 -:lucene_version_path: 7_5_0 +:lucene_version: 7.6.0 +:lucene_version_path: 7_6_0 :branch: 6.x :jdk: 1.8.0_131 :jdk_major: 8 diff --git a/docs/build.gradle b/docs/build.gradle index 987ad79e0479b..df42ea6b80a12 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -1078,6 +1078,34 @@ buildRestTests.setups['calendar_outages_addevent'] = buildRestTests.setups['cale ]} ''' +// used by median absolute deviation aggregation +buildRestTests.setups['reviews'] = ''' + - do: + indices.create: + index: reviews + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + _doc: + properties: + product: + type: keyword + rating: + type: long + - do: + bulk: + index: reviews + type: _doc + refresh: true + body: | + {"index": {"_id": "1"}} + {"product": "widget-foo", "rating": 1} + {"index": {"_id": "2"}} + {"product": "widget-foo", "rating": 5} +''' + buildRestTests.setups['remote_cluster'] = buildRestTests.setups['host'] + ''' - do: cluster.put_settings: @@ -1096,3 +1124,33 @@ buildRestTests.setups['remote_cluster_and_leader_index'] = buildRestTests.setups index.number_of_shards: 1 index.soft_deletes.enabled: true ''' + +buildRestTests.setups['seats'] = ''' + - do: + indices.create: + index: seats + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + _doc: + properties: + theatre: + type: keyword + cost: + type: long + - do: + bulk: + index: seats + type: _doc + refresh: true + body: | + {"index":{}} + {"theatre": "Skyline", "cost": 1} + {"index":{}} + {"theatre": "Graye", "cost": 5} + {"index":{}} + {"theatre": "Graye", "cost": 8} + {"index":{}} + {"theatre": "Skyline", "cost": 10}''' \ No newline at end of file diff --git a/docs/java-rest/high-level/document/term-vectors.asciidoc b/docs/java-rest/high-level/document/term-vectors.asciidoc index e739e37732fd4..e8d4a25a2cac0 100644 --- a/docs/java-rest/high-level/document/term-vectors.asciidoc +++ b/docs/java-rest/high-level/document/term-vectors.asciidoc @@ -62,9 +62,9 @@ include::../execution.asciidoc[] [id="{upid}-{api}-response"] -==== TermVectorsResponse +==== Term Vectors Response -The `TermVectorsResponse` contains the following information: ++{response}+ contains the following information: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- @@ -77,7 +77,7 @@ include-tagged::{doc-tests-file}[{api}-response] ===== Inspecting Term Vectors -If `TermVectorsResponse` contains non-null list of term vectors, +If +{response}+ contains non-null list of term vectors, more information about each term vector can be obtained using the following: ["source","java",subs="attributes,callouts,macros"] diff --git a/docs/java-rest/high-level/ml/put-filter.asciidoc b/docs/java-rest/high-level/ml/put-filter.asciidoc new file mode 100644 index 0000000000000..2582e7715ab59 --- /dev/null +++ b/docs/java-rest/high-level/ml/put-filter.asciidoc @@ -0,0 +1,53 @@ +-- +:api: put-filter +:request: PutFilterRequest +:response: PutFilterResponse +-- +[id="{upid}-{api}"] +=== Put Filter API + +The Put Filter API can be used to create a new {ml} filter +in the cluster. The API accepts a +{request}+ object +as a request and returns a +{response}+. + +[id="{upid}-{api}-request"] +==== Put Filter Request + +A +{request}+ requires the following argument: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- +<1> The configuration of the {ml} filter to create as a `MlFilter` + +[id="{upid}-{api}-config"] +==== Filter Configuration + +The `MlFilter` object contains all the details about the {ml} filter +configuration. + +A `MlFilter` contains the following arguments: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-config] +-------------------------------------------------- +<1> Required, the filter ID +<2> Optional, the filter description +<3> Optional, the items of the filter. A wildcard * can be used at the beginning or the end of an item. +Up to 10000 items are allowed in each filter. + +include::../execution.asciidoc[] + +[id="{upid}-{api}-response"] +==== Response + +The returned +{response}+ returns the full representation of +the new {ml} filter if it has been successfully created. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> The newly created `MlFilter` diff --git a/docs/java-rest/high-level/rollup/delete_job.asciidoc b/docs/java-rest/high-level/rollup/delete_job.asciidoc new file mode 100644 index 0000000000000..c98a6fb732659 --- /dev/null +++ b/docs/java-rest/high-level/rollup/delete_job.asciidoc @@ -0,0 +1,35 @@ +-- +:api: rollup-delete-job +:request: DeleteRollupJobRequest +:response: DeleteRollupJobResponse +-- + +[id="{upid}-{api}"] +=== Delete Rollup Job API + + +[id="{upid}-{api}-request"] +==== Request + +The Delete Rollup Job API allows you to delete a job by ID. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- +<1> The ID of the job to delete. + +[id="{upid}-{api}-response"] +==== Response + +The returned +{response}+ indicates if the delete command was received. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> Whether or not the delete job request was received. + +include::../execution.asciidoc[] + + diff --git a/docs/java-rest/high-level/search/count.asciidoc b/docs/java-rest/high-level/search/count.asciidoc new file mode 100644 index 0000000000000..f70e1e1fd4d22 --- /dev/null +++ b/docs/java-rest/high-level/search/count.asciidoc @@ -0,0 +1,114 @@ +-- +:api: count +:request: CountRequest +:response: CountResponse +-- +[id="{upid}-{api}"] + +=== Count API + +[id="{upid}-{api}-request"] + +==== Count Request + +The +{request}+ is used to execute a query and get the number of matches for the query. The query to use in +{request}+ can be +set in similar way as query in `SearchRequest` using `SearchSourceBuilder`. + +In its most basic form, we can add a query to the request: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request-basic] +-------------------------------------------------- + +<1> Creates the +{request}+. Without arguments this runs against all indices. +<2> Most search parameters are added to the `SearchSourceBuilder`. +<3> Add a `match_all` query to the `SearchSourceBuilder`. +<4> Add the `SearchSourceBuilder` to the +{request}+. + +[[java-rest-high-count-request-optional]] +===== Count Request optional arguments + +Let's first look at some of the optional arguments of a +{request}+: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request-indices-types] +-------------------------------------------------- +<1> Restricts the request to an index +<2> Limits the request to a type + +There are a couple of other interesting optional parameters: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request-routing] +-------------------------------------------------- +<1> Set a routing parameter + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request-indicesOptions] +-------------------------------------------------- +<1> Setting `IndicesOptions` controls how unavailable indices are resolved and how wildcard expressions are expanded + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request-preference] +-------------------------------------------------- +<1> Use the preference parameter e.g. to execute the search to prefer local shards. The default is to randomize across shards. + +===== Using the SearchSourceBuilder in CountRequest + +Both in search and count API calls, most options controlling the search behavior can be set on the `SearchSourceBuilder`, +which contains more or less the equivalent of the options in the search request body of the Rest API. + +Here are a few examples of some common options: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-source-basics] +-------------------------------------------------- +<1> Create a `SearchSourceBuilder` with default options. +<2> Set the query. Can be any type of `QueryBuilder` + +After this, the `SearchSourceBuilder` only needs to be added to the ++{request}+: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-source-setter] +-------------------------------------------------- + +Note subtle difference when using `SearchSourceBuilder` in `SearchRequest` and using `SearchSourceBuilder` in +{request}+ - using +`SearchSourceBuilder` in `SearchRequest` one can use `SearchSourceBuilder.size()` and `SearchSourceBuilder.from()` methods to set the +number of search hits to return, and the starting index. In +{request}+ we're interested in total number of matches and these methods +have no meaning. + +The <> page gives a list of all available search queries with +their corresponding `QueryBuilder` objects and `QueryBuilders` helper methods. + +include::../execution.asciidoc[] + +[id="{upid}-{api}-response"] +==== CountResponse + +The +{response}+ that is returned by executing the count API call provides total count of hits and details about the count execution +itself, like the HTTP status code, or whether the request terminated early: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response-1] +-------------------------------------------------- + +The response also provides information about the execution on the +shard level by offering statistics about the total number of shards that were +affected by the underlying search, and the successful vs. unsuccessful shards. Possible +failures can also be handled by iterating over an array off +`ShardSearchFailures` like in the following example: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response-2] +-------------------------------------------------- + diff --git a/docs/java-rest/high-level/security/authenticate.asciidoc b/docs/java-rest/high-level/security/authenticate.asciidoc new file mode 100644 index 0000000000000..e50c64bf9d0f5 --- /dev/null +++ b/docs/java-rest/high-level/security/authenticate.asciidoc @@ -0,0 +1,66 @@ + +-- +:api: authenticate +:response: AuthenticateResponse +-- + +[id="{upid}-{api}"] +=== Authenticate API + +[id="{upid}-{api}-sync"] +==== Execution + +Authenticating and retrieving information about a user can be performed +using the `security().authenticate()` method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-execute] +-------------------------------------------------- + +This method does not require a request object. The client waits for the ++{response}+ to be returned before continuing with code execution. + +[id="{upid}-{api}-response"] +==== Response + +The returned +{response}+ contains two fields. Firstly, the `user` field +, accessed with `getUser`, contains all the information about this +authenticated user. The other field, `enabled`, tells if this user is actually +usable or has been temporalily deactivated. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> `getUser` retrieves the `User` instance containing the information, +see {javadoc-client}/security/user/User.html. +<2> `enabled` tells if this user is usable or is deactivated. + +[id="{upid}-{api}-async"] +==== Asynchronous Execution + +This request can also be executed asynchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-execute-async] +-------------------------------------------------- +<1> The `ActionListener` to use when the execution completes. This method does +not require a request object. + +The asynchronous method does not block and returns immediately. Once the request +has completed the `ActionListener` is called back using the `onResponse` method +if the execution completed successfully or using the `onFailure` method if +it failed. + +A typical listener for a +{response}+ looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-execute-listener] +-------------------------------------------------- +<1> Called when the execution completed successfully. The response is +provided as an argument. +<2> Called in case of a failure. The exception is provided as an argument. + diff --git a/docs/java-rest/high-level/security/clear-realm-cache.asciidoc b/docs/java-rest/high-level/security/clear-realm-cache.asciidoc new file mode 100644 index 0000000000000..5427db148d65e --- /dev/null +++ b/docs/java-rest/high-level/security/clear-realm-cache.asciidoc @@ -0,0 +1,33 @@ + +-- +:api: clear-realm-cache +:request: ClearRealmCacheRequest +:response: ClearRealmCacheResponse +-- + +[id="{upid}-{api}"] +=== Clear Realm Cache API + +[id="{upid}-{api}-request"] +==== Clear Realm Cache Request + +A +{request}+ supports defining the name of realms and usernames that the cache should be cleared +for. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- + +include::../execution.asciidoc[] + +[id="{upid}-{api}-response"] +==== Clear Roles Cache Response + +The returned +{response}+ allows to retrieve information about where the cache was cleared. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> the list of nodes that the cache was cleared on diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index e87789c27a812..9d2fd2aaf1f77 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -54,6 +54,7 @@ The Java High Level REST Client supports the following Search APIs: * <<{upid}-field-caps>> * <<{upid}-rank-eval>> * <<{upid}-explain>> +* <<{upid}-count>> include::search/search.asciidoc[] include::search/scroll.asciidoc[] @@ -63,6 +64,7 @@ include::search/multi-search-template.asciidoc[] include::search/field-caps.asciidoc[] include::search/rank-eval.asciidoc[] include::search/explain.asciidoc[] +include::search/count.asciidoc[] == Miscellaneous APIs @@ -258,6 +260,7 @@ The Java High Level REST Client supports the following Machine Learning APIs: * <<{upid}-get-calendars>> * <<{upid}-put-calendar>> * <<{upid}-delete-calendar>> +* <<{upid}-put-filter>> include::ml/put-job.asciidoc[] include::ml/get-job.asciidoc[] @@ -286,6 +289,7 @@ include::ml/get-categories.asciidoc[] include::ml/get-calendars.asciidoc[] include::ml/put-calendar.asciidoc[] include::ml/delete-calendar.asciidoc[] +include::ml/put-filter.asciidoc[] == Migration APIs @@ -299,15 +303,16 @@ include::migration/get-assistance.asciidoc[] :upid: {mainid}-rollup :doc-tests-file: {doc-tests}/RollupDocumentationIT.java - The Java High Level REST Client supports the following Rollup APIs: * <> +* <<{upid}-rollup-delete-job>> * <<{upid}-rollup-start-job>> * <> * <<{upid}-x-pack-rollup-get-rollup-caps>> include::rollup/put_job.asciidoc[] +include::rollup/delete_job.asciidoc[] include::rollup/start_job.asciidoc[] include::rollup/get_job.asciidoc[] include::rollup/get_rollup_caps.asciidoc[] @@ -325,6 +330,8 @@ The Java High Level REST Client supports the following Security APIs: * <> * <> * <<{upid}-clear-roles-cache>> +* <<{upid}-clear-realm-cache>> +* <<{upid}-authenticate>> * <> * <> * <> @@ -337,6 +344,8 @@ include::security/disable-user.asciidoc[] include::security/change-password.asciidoc[] include::security/delete-role.asciidoc[] include::security/clear-roles-cache.asciidoc[] +include::security/clear-realm-cache.asciidoc[] +include::security/authenticate.asciidoc[] include::security/get-certificates.asciidoc[] include::security/put-role-mapping.asciidoc[] include::security/get-role-mappings.asciidoc[] @@ -384,4 +393,4 @@ don't leak into the rest of the documentation. :response!: :doc-tests-file!: :upid!: --- \ No newline at end of file +-- diff --git a/docs/painless/painless-contexts.asciidoc b/docs/painless/painless-contexts.asciidoc index cc7bc752ec6d9..d3f598525a8b7 100644 --- a/docs/painless/painless-contexts.asciidoc +++ b/docs/painless/painless-contexts.asciidoc @@ -44,7 +44,7 @@ specialized code may define new ways to use a Painless script. | {ref}/search-aggregations-metrics-scripted-metric-aggregation.html[Elasticsearch Documentation] | Metric aggregation reduce | <> | {ref}/search-aggregations-metrics-scripted-metric-aggregation.html[Elasticsearch Documentation] -| Bucket aggregation | <> +| Bucket script aggregation | <> | {ref}/search-aggregations-pipeline-bucket-script-aggregation.html[Elasticsearch Documentation] | Watcher condition | <> | {xpack-ref}/condition-script.html[Elasticsearch Documentation] diff --git a/docs/painless/painless-contexts/index.asciidoc b/docs/painless/painless-contexts/index.asciidoc index a71fde0be32a0..df0c6f71e7798 100644 --- a/docs/painless/painless-contexts/index.asciidoc +++ b/docs/painless/painless-contexts/index.asciidoc @@ -28,7 +28,7 @@ include::painless-metric-agg-combine-context.asciidoc[] include::painless-metric-agg-reduce-context.asciidoc[] -include::painless-bucket-agg-context.asciidoc[] +include::painless-bucket-script-agg-context.asciidoc[] include::painless-analysis-predicate-context.asciidoc[] diff --git a/docs/painless/painless-contexts/painless-bucket-agg-context.asciidoc b/docs/painless/painless-contexts/painless-bucket-agg-context.asciidoc deleted file mode 100644 index 3bb4cae3d3bab..0000000000000 --- a/docs/painless/painless-contexts/painless-bucket-agg-context.asciidoc +++ /dev/null @@ -1,21 +0,0 @@ -[[painless-bucket-agg-context]] -=== Bucket aggregation context - -Use a Painless script in an -{ref}/search-aggregations-pipeline-bucket-script-aggregation.html[bucket aggregation] -to calculate a value as a result in a bucket. - -*Variables* - -`params` (`Map`, read-only):: - User-defined parameters passed in as part of the query. The parameters - include values defined as part of the `buckets_path`. - -*Return* - -numeric:: - The calculated value as the result. - -*API* - -The standard <> is available. \ No newline at end of file diff --git a/docs/painless/painless-contexts/painless-bucket-script-agg-context.asciidoc b/docs/painless/painless-contexts/painless-bucket-script-agg-context.asciidoc new file mode 100644 index 0000000000000..5a5306016945d --- /dev/null +++ b/docs/painless/painless-contexts/painless-bucket-script-agg-context.asciidoc @@ -0,0 +1,86 @@ +[[painless-bucket-script-agg-context]] +=== Bucket script aggregation context + +Use a Painless script in an +{ref}/search-aggregations-pipeline-bucket-script-aggregation.html[`bucket_script` pipeline aggregation] +to calculate a value as a result in a bucket. + +==== Variables + +`params` (`Map`, read-only):: + User-defined parameters passed in as part of the query. The parameters + include values defined as part of the `buckets_path`. + +==== Return + +numeric:: + The calculated value as the result. + +==== API + +The standard <> is available. + +==== Example + +To run this example, first follow the steps in <>. + +The painless context in a `bucket_script` aggregation provides a `params` map. This map contains both +user-specified custom values, as well as the values from other aggregations specified in the `buckets_path` +property. + +This example takes the values from a min and max aggregation, calculates the difference, +and adds the user-specified base_cost to the result: + +[source,Painless] +-------------------------------------------------- +(params.max - params.min) + params.base_cost +-------------------------------------------------- + +Note that the values are extracted from the `params` map. In context, the aggregation looks like this: + +[source,js] +-------------------------------------------------- +GET /seats/_search +{ + "size": 0, + "aggs": { + "theatres": { + "terms": { + "field": "theatre", + "size": 10 + }, + "aggs": { + "min_cost": { + "min": { + "field": "cost" + } + }, + "max_cost": { + "max": { + "field": "cost" + } + }, + "spread_plus_base": { + "bucket_script": { + "buckets_path": { <1> + "min": "min_cost", + "max": "max_cost" + }, + "script": { + "params": { + "base_cost": 5 <2> + }, + "source": "(params.max - params.min) + params.base_cost" + } + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:seats] +<1> The `buckets_path` points to two aggregations (`min_cost`, `max_cost`) and adds `min`/`max` variables +to the `params` map +<2> The user-specified `base_cost` is also added to the script's `params` map \ No newline at end of file diff --git a/docs/painless/painless-contexts/painless-context-examples.asciidoc b/docs/painless/painless-contexts/painless-context-examples.asciidoc index a6a67e22a5bd9..469f425d1d89f 100644 --- a/docs/painless/painless-contexts/painless-context-examples.asciidoc +++ b/docs/painless/painless-contexts/painless-context-examples.asciidoc @@ -46,7 +46,7 @@ the request URL. PUT /seats { "mappings": { - "_doc": { + "seat": { "properties": { "theatre": { "type": "keyword" }, "play": { "type": "text" }, diff --git a/docs/plugins/repository.asciidoc b/docs/plugins/repository.asciidoc index 9a4e90bebd714..d444170801833 100644 --- a/docs/plugins/repository.asciidoc +++ b/docs/plugins/repository.asciidoc @@ -32,7 +32,7 @@ The GCS repository plugin adds support for using Google Cloud Storage service as The following plugin has been contributed by our community: -* https://github.com/wikimedia/search-repository-swift[Openstack Swift] (by Wikimedia Foundation) +* https://github.com/BigDataBoutique/elasticsearch-repository-swift[Openstack Swift] (by Wikimedia Foundation and BigData Boutique) include::repository-azure.asciidoc[] diff --git a/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc b/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc index 1d185e80f4f96..514528da0d0bd 100644 --- a/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/datehistogram-aggregation.asciidoc @@ -1,12 +1,129 @@ [[search-aggregations-bucket-datehistogram-aggregation]] === Date Histogram Aggregation -A multi-bucket aggregation similar to the <> except it can -only be applied on date values. Since dates are represented in Elasticsearch internally as long values, it is possible -to use the normal `histogram` on dates as well, though accuracy will be compromised. The reason for this is in the fact -that time based intervals are not fixed (think of leap years and on the number of days in a month). For this reason, -we need special support for time based data. From a functionality perspective, this histogram supports the same features -as the normal <>. The main difference is that the interval can be specified by date/time expressions. +This multi-bucket aggregation is similar to the normal +<>, but it can +only be used with date values. Because dates are represented internally in +Elasticsearch as long values, it is possible, but not as accurate, to use the +normal `histogram` on dates as well. The main difference in the two APIs is +that here the interval can be specified using date/time expressions. Time-based +data requires special support because time-based intervals are not always a +fixed length. + +==== Setting intervals + +There seems to be no limit to the creativity we humans apply to setting our +clocks and calendars. We've invented leap years and leap seconds, standard and +daylight savings times, and timezone offsets of 30 or 45 minutes rather than a +full hour. While these creations help keep us in sync with the cosmos and our +environment, they can make specifying time intervals accurately a real challenge. +The only universal truth our researchers have yet to disprove is that a +millisecond is always the same duration, and a second is always 1000 milliseconds. +Beyond that, things get complicated. + +Generally speaking, when you specify a single time unit, such as 1 hour or 1 day, you +are working with a _calendar interval_, but multiples, such as 6 hours or 3 days, are +_fixed-length intervals_. + +For example, a specification of 1 day (1d) from now is a calendar interval that +means "at +this exact time tomorrow" no matter the length of the day. A change to or from +daylight savings time that results in a 23 or 25 hour day is compensated for and the +specification of "this exact time tomorrow" is maintained. But if you specify 2 or +more days, each day must be of the same fixed duration (24 hours). In this case, if +the specified interval includes the change to or from daylight savings time, the +interval will end an hour sooner or later than you expect. + +There are similar differences to consider when you specify single versus multiple +minutes or hours. Multiple time periods longer than a day are not supported. + +Here are the valid time specifications and their meanings: + +milliseconds (ms) :: +Fixed length interval; supports multiples. + +seconds (s) :: +1000 milliseconds; fixed length interval (except for the last second of a +minute that contains a leap-second, which is 2000ms long); supports multiples. + +minutes (m) :: +All minutes begin at 00 seconds. + +* One minute (1m) is the interval between 00 seconds of the first minute and 00 +seconds of the following minute in the specified timezone, compensating for any +intervening leap seconds, so that the number of minutes and seconds past the +hour is the same at the start and end. +* Multiple minutes (__n__m) are intervals of exactly 60x1000=60,000 milliseconds +each. + +hours (h) :: +All hours begin at 00 minutes and 00 seconds. + +* One hour (1h) is the interval between 00:00 minutes of the first hour and 00:00 +minutes of the following hour in the specified timezone, compensating for any +intervening leap seconds, so that the number of minutes and seconds past the hour +is the same at the start and end. +* Multiple hours (__n__h) are intervals of exactly 60x60x1000=3,600,000 milliseconds +each. + +days (d) :: +All days begin at the earliest possible time, which is usually 00:00:00 +(midnight). + +* One day (1d) is the interval between the start of the day and the start of +of the following day in the specified timezone, compensating for any intervening +time changes. +* Multiple days (__n__d) are intervals of exactly 24x60x60x1000=86,400,000 +milliseconds each. + +weeks (w) :: + +* One week (1w) is the interval between the start day_of_week:hour:minute:second +and the same day of the week and time of the following week in the specified +timezone. +* Multiple weeks (__n__w) are not supported. + +months (M) :: + +* One month (1M) is the interval between the start day of the month and time of +day and the same day of the month and time of the following month in the specified +timezone, so that the day of the month and time of day are the same at the start +and end. +* Multiple months (__n__M) are not supported. + +quarters (q) :: + +* One quarter (1q) is the interval between the start day of the month and +time of day and the same day of the month and time of day three months later, +so that the day of the month and time of day are the same at the start and end. + +* Multiple quarters (__n__q) are not supported. + +years (y) :: + +* One year (1y) is the interval between the start day of the month and time of +day and the same day of the month and time of day the following year in the +specified timezone, so that the date and time are the same at the start and end. + +* Multiple years (__n__y) are not supported. + +NOTE: +In all cases, when the specified end time does not exist, the actual end time is +the closest available time after the specified end. + +Widely distributed applications must also consider vagaries such as countries that +start and stop daylight savings time at 12:01 A.M., so end up with one minute of +Sunday followed by an additional 59 minutes of Saturday once a year, and countries +that decide to move across the international date line. Situations like +that can make irregular timezone offsets seem easy. + +As always, rigorous testing, especially around time-change events, will ensure +that your time interval specification is +what you intend it to be. + +WARNING: +To avoid unexpected results, all connected servers and clients must sync to a +reliable network time service. + +==== Examples Requesting bucket intervals of a month. @@ -27,13 +144,11 @@ POST /sales/_search?size=0 // CONSOLE // TEST[setup:sales] -Available expressions for interval: `year` (`1y`), `quarter` (`1q`), `month` (`1M`), `week` (`1w`), -`day` (`1d`), `hour` (`1h`), `minute` (`1m`), `second` (`1s`) - -Time values can also be specified via abbreviations supported by <> parsing. -Note that fractional time values are not supported, but you can address this by shifting to another -time unit (e.g., `1.5h` could instead be specified as `90m`). Also note that time intervals larger than -days do not support arbitrary values but can only be one unit large (e.g. `1y` is valid, `2y` is not). +You can also specify time values using abbreviations supported by +<> parsing. +Note that fractional time values are not supported, but you can address this by +shifting to another +time unit (e.g., `1.5h` could instead be specified as `90m`). [source,js] -------------------------------------------------- @@ -52,15 +167,16 @@ POST /sales/_search?size=0 // CONSOLE // TEST[setup:sales] -==== Keys +===== Keys Internally, a date is represented as a 64 bit number representing a timestamp -in milliseconds-since-the-epoch. These timestamps are returned as the bucket -++key++s. The `key_as_string` is the same timestamp converted to a formatted -date string using the format specified with the `format` parameter: +in milliseconds-since-the-epoch (01/01/1970 midnight UTC). These timestamps are +returned as the ++key++ name of the bucket. The `key_as_string` is the same +timestamp converted to a formatted +date string using the `format` parameter sprcification: -TIP: If no `format` is specified, then it will use the first date -<> specified in the field mapping. +TIP: If you don't specify `format`, the first date +<> specified in the field mapping is used. [source,js] -------------------------------------------------- @@ -113,15 +229,15 @@ Response: -------------------------------------------------- // TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,"hits": $body.hits,/] -==== Time Zone +===== Timezone Date-times are stored in Elasticsearch in UTC. By default, all bucketing and -rounding is also done in UTC. The `time_zone` parameter can be used to indicate -that bucketing should use a different time zone. +rounding is also done in UTC. Use the `time_zone` parameter to indicate +that bucketing should use a different timezone. -Time zones may either be specified as an ISO 8601 UTC offset (e.g. `+01:00` or -`-08:00`) or as a timezone id, an identifier used in the TZ database like -`America/Los_Angeles`. +You can specify timezones as either an ISO 8601 UTC offset (e.g. `+01:00` or +`-08:00`) or as a timezone ID as specified in the IANA timezone database, +such as`America/Los_Angeles`. Consider the following example: @@ -151,7 +267,7 @@ GET my_index/_search?size=0 --------------------------------- // CONSOLE -UTC is used if no time zone is specified, which would result in both of these +If you don't specify a timezone, UTC is used. This would result in both of these documents being placed into the same day bucket, which starts at midnight UTC on 1 October 2015: @@ -174,8 +290,8 @@ on 1 October 2015: --------------------------------- // TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,"hits": $body.hits,/] -If a `time_zone` of `-01:00` is specified, then midnight starts at one hour before -midnight UTC: +If you specify a `time_zone` of `-01:00`, midnight in that timezone is one hour +before midnight UTC: [source,js] --------------------------------- @@ -223,28 +339,27 @@ second document falls into the bucket for 1 October 2015: // TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,"hits": $body.hits,/] <1> The `key_as_string` value represents midnight on each day - in the specified time zone. + in the specified timezone. WARNING: When using time zones that follow DST (daylight savings time) changes, buckets close to the moment when those changes happen can have slightly different -sizes than would be expected from the used `interval`. +sizes than you would expect from the used `interval`. For example, consider a DST start in the `CET` time zone: on 27 March 2016 at 2am, -clocks were turned forward 1 hour to 3am local time. When using `day` as `interval`, +clocks were turned forward 1 hour to 3am local time. If you use `day` as `interval`, the bucket covering that day will only hold data for 23 hours instead of the usual -24 hours for other buckets. The same is true for shorter intervals like e.g. 12h. -Here, we will have only a 11h bucket on the morning of 27 March when the DST shift +24 hours for other buckets. The same is true for shorter intervals, like 12h, +where you'll have only a 11h bucket on the morning of 27 March when the DST shift happens. +===== Offset -==== Offset - -The `offset` parameter is used to change the start value of each bucket by the +Use the `offset` parameter to change the start value of each bucket by the specified positive (`+`) or negative offset (`-`) duration, such as `1h` for an hour, or `1d` for a day. See <> for more possible time duration options. -For instance, when using an interval of `day`, each bucket runs from midnight -to midnight. Setting the `offset` parameter to `+6h` would change each bucket +For example, when using an interval of `day`, each bucket runs from midnight +to midnight. Setting the `offset` parameter to `+6h` changes each bucket to run from 6am to 6am: [source,js] @@ -301,12 +416,13 @@ documents into buckets starting at 6am: ----------------------------- // TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,"hits": $body.hits,/] -NOTE: The start `offset` of each bucket is calculated after the `time_zone` +NOTE: The start `offset` of each bucket is calculated after `time_zone` adjustments have been made. -==== Keyed Response +===== Keyed Response -Setting the `keyed` flag to `true` will associate a unique string key with each bucket and return the ranges as a hash rather than an array: +Setting the `keyed` flag to `true` associates a unique string key with each +bucket and returns the ranges as a hash rather than an array: [source,js] -------------------------------------------------- @@ -358,20 +474,25 @@ Response: -------------------------------------------------- // TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,"hits": $body.hits,/] -==== Scripts +===== Scripts -Like with the normal <>, both document level scripts and -value level scripts are supported. It is also possible to control the order of the returned buckets using the `order` -settings and filter the returned buckets based on a `min_doc_count` setting (by default all buckets between the first -bucket that matches documents and the last one are returned). This histogram also supports the `extended_bounds` -setting, which enables extending the bounds of the histogram beyond the data itself (to read more on why you'd want to -do that please refer to the explanation <>). +As with the normal <>, +both document-level scripts and +value-level scripts are supported. You can control the order of the returned +buckets using the `order` +settings and filter the returned buckets based on a `min_doc_count` setting +(by default all buckets between the first +bucket that matches documents and the last one are returned). This histogram +also supports the `extended_bounds` +setting, which enables extending the bounds of the histogram beyond the data +itself. For more information, see +<>. -==== Missing value +===== Missing value -The `missing` parameter defines how documents that are missing a value should be treated. -By default they will be ignored but it is also possible to treat them as if they -had a value. +The `missing` parameter defines how to treat documents that are missing a value. +By default, they are ignored, but it is also possible to treat them as if they +have a value. [source,js] -------------------------------------------------- @@ -391,20 +512,22 @@ POST /sales/_search?size=0 // CONSOLE // TEST[setup:sales] -<1> Documents without a value in the `publish_date` field will fall into the same bucket as documents that have the value `2000-01-01`. +<1> Documents without a value in the `publish_date` field will fall into the +same bucket as documents that have the value `2000-01-01`. -==== Order +===== Order -By default the returned buckets are sorted by their `key` ascending, though the order behaviour can be controlled using -the `order` setting. Supports the same `order` functionality as the <>. +By default the returned buckets are sorted by their `key` ascending, but you can +control the order using +the `order` setting. This setting supports the same `order` functionality as +<>. deprecated[6.0.0, Use `_key` instead of `_time` to order buckets by their dates/keys] -==== Use of a script to aggregate by day of the week +===== Using a script to aggregate by day of the week -There are some cases where date histogram can't help us, like for example, when we need -to aggregate the results by day of the week. -In this case to overcome the problem, we can use a script that returns the day of the week: +When you need to aggregate the results by day of the week, use a script that +returns the day of the week: [source,js] @@ -452,5 +575,5 @@ Response: -------------------------------------------------- // TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,"hits": $body.hits,/] -The response will contain all the buckets having as key the relative day of -the week: 1 for Monday, 2 for Tuesday... 7 for Sunday. +The response will contain all the buckets having the relative day of +the week as key : 1 for Monday, 2 for Tuesday... 7 for Sunday. diff --git a/docs/reference/aggregations/metrics.asciidoc b/docs/reference/aggregations/metrics.asciidoc index 96597564dac2d..1d3daa2cecae5 100644 --- a/docs/reference/aggregations/metrics.asciidoc +++ b/docs/reference/aggregations/metrics.asciidoc @@ -41,6 +41,8 @@ include::metrics/tophits-aggregation.asciidoc[] include::metrics/valuecount-aggregation.asciidoc[] +include::metrics/median-absolute-deviation-aggregation.asciidoc[] + diff --git a/docs/reference/aggregations/metrics/median-absolute-deviation-aggregation.asciidoc b/docs/reference/aggregations/metrics/median-absolute-deviation-aggregation.asciidoc new file mode 100644 index 0000000000000..2e88b12d92cd1 --- /dev/null +++ b/docs/reference/aggregations/metrics/median-absolute-deviation-aggregation.asciidoc @@ -0,0 +1,189 @@ +[[search-aggregations-metrics-median-absolute-deviation-aggregation]] +=== Median Absolute Deviation Aggregation + +This `single-value` aggregation approximates the https://en.wikipedia.org/wiki/Median_absolute_deviation[median absolute deviation] +of its search results. + +Median absolute deviation is a measure of variability. It is a robust +statistic, meaning that it is useful for describing data that may have +outliers, or may not be normally distributed. For such data it can be more +descriptive than standard deviation. + +It is calculated as the median of each data point's deviation from the median +of the entire sample. That is, for a random variable X, the median absolute +deviation is median(|median(X) - X~i~|). + +==== Example + +Assume our data represents product reviews on a one to five star scale. +Such reviews are usually summarized as a mean, which is easily understandable +but doesn't describe the reviews' variability. Estimating the median absolute +deviation can provide insight into how much reviews vary from one another. + +In this example we have a product which has an average rating of +3 stars. Let's look at its ratings' median absolute deviation to determine +how much they vary + +[source,js] +--------------------------------------------------------- +GET reviews/_search +{ + "size": 0, + "aggs": { + "review_average": { + "avg": { + "field": "rating" + } + }, + "review_variability": { + "median_absolute_deviation": { + "field": "rating" <1> + } + } + } +} +--------------------------------------------------------- +// CONSOLE +// TEST[setup:reviews] +<1> `rating` must be a numeric field + +The resulting median absolute deviation of `2` tells us that there is a fair +amount of variability in the ratings. Reviewers must have diverse opinions about +this product. + +[source,js] +--------------------------------------------------------- +{ + ... + "aggregations": { + "review_average": { + "value": 3.0 + }, + "review_variability": { + "value": 2.0 + } + } +} +--------------------------------------------------------- +// TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,"hits": $body.hits,/] + +==== Approximation + +The naive implementation of calculating median absolute deviation stores the +entire sample in memory, so this aggregation instead calculates an +approximation. It uses the https://github.com/tdunning/t-digest[TDigest data structure] +to approximate the sample median and the median of deviations from the sample +median. For more about the approximation characteristics of TDigests, see +<>. + +The tradeoff between resource usage and accuracy of a TDigest's quantile +approximation, and therefore the accuracy of this aggregation's approximation +of median absolute deviation, is controlled by the `compression` parameter. A +higher `compression` setting provides a more accurate approximation at the +cost of higher memory usage. For more about the characteristics of the TDigest +`compression` parameter see +<>. + +[source,js] +--------------------------------------------------------- +GET reviews/_search +{ + "size": 0, + "aggs": { + "review_variability": { + "median_absolute_deviation": { + "field": "rating", + "compression": 100 + } + } + } +} +--------------------------------------------------------- +// CONSOLE +// TEST[setup:reviews] + +The default `compression` value for this aggregation is `1000`. At this +compression level this aggregation is usually within 5% of the exact result, +but observed performance will depend on the sample data. + +==== Script + +This metric aggregation supports scripting. In our example above, product +reviews are on a scale of one to five. If we wanted to modify them to a scale +of one to ten, we can using scripting. + +To provide an inline script: + +[source,js] +--------------------------------------------------------- +GET reviews/_search +{ + "size": 0, + "aggs": { + "review_variability": { + "median_absolute_deviation": { + "script": { + "lang": "painless", + "source": "doc['rating'].value * params.scaleFactor", + "params": { + "scaleFactor": 2 + } + } + } + } + } +} +--------------------------------------------------------- +// CONSOLE +// TEST[setup:reviews] + +To provide a stored script: + +[source,js] +--------------------------------------------------------- +GET reviews/_search +{ + "size": 0, + "aggs": { + "review_variability": { + "median_absolute_deviation": { + "script": { + "id": "my_script", + "params": { + "field": "rating" + } + } + } + } + } +} +--------------------------------------------------------- +// CONSOLE +// TEST[setup:reviews,stored_example_script] + +==== Missing value + +The `missing` parameter defines how documents that are missing a value should be +treated. By default they will be ignored but it is also possible to treat them +as if they had a value. + +Let's be optimistic and assume some reviewers loved the product so much that +they forgot to give it a rating. We'll assign them five stars + +[source,js] +--------------------------------------------------------- +GET reviews/_search +{ + "size": 0, + "aggs": { + "review_variability": { + "median_absolute_deviation": { + "field": "rating", + "missing": 5 + } + } + } +} +--------------------------------------------------------- +// CONSOLE +// TEST[setup:reviews] diff --git a/docs/reference/docs/delete-by-query.asciidoc b/docs/reference/docs/delete-by-query.asciidoc index c0ee6c56a2fa4..0a71222a3b3f0 100644 --- a/docs/reference/docs/delete-by-query.asciidoc +++ b/docs/reference/docs/delete-by-query.asciidoc @@ -277,7 +277,7 @@ The number of requests per second effectively executed during the delete by quer `throttled_until_millis`:: -This field should always be equal to zero in a delete by query response. It only +This field should always be equal to zero in a `_delete_by_query` response. It only has meaning when using the <>, where it indicates the next time (in milliseconds since epoch) a throttled request will be executed again in order to conform to `requests_per_second`. diff --git a/docs/reference/docs/reindex.asciidoc b/docs/reference/docs/reindex.asciidoc index 5e236f05607e4..374256b0ad6cc 100644 --- a/docs/reference/docs/reindex.asciidoc +++ b/docs/reference/docs/reindex.asciidoc @@ -164,13 +164,7 @@ POST _reindex `index` and `type` in `source` can both be lists, allowing you to copy from lots of sources in one request. This will copy documents from the `_doc` and -`post` types in the `twitter` and `blog` index. The copied documents would include the -`post` type in the `twitter` index and the `_doc` type in the `blog` index. For more -specific parameters, you can use `query`. - -The Reindex API makes no effort to handle ID collisions. For such issues, the target index -will remain valid, but it's not easy to predict which document will survive because -the iteration order isn't well defined. +`post` types in the `twitter` and `blog` indices. [source,js] -------------------------------------------------- @@ -181,12 +175,19 @@ POST _reindex "type": ["_doc", "post"] }, "dest": { - "index": "all_together" + "index": "all_together", + "type": "_doc" } } -------------------------------------------------- // CONSOLE -// TEST[s/^/PUT twitter\nPUT blog\n/] +// TEST[setup:twitter] +// TEST[s/^/PUT blog\/post\/post1?refresh\n{"test": "foo"}\n/] + +NOTE: The Reindex API makes no effort to handle ID collisions so the last +document written will "win" but the order isn't usually predictable so it is +not a good idea to rely on this behavior. Instead, make sure that IDs are unique +using a script. It's also possible to limit the number of processed documents by setting `size`. This will only copy a single document from `twitter` to @@ -671,7 +672,7 @@ The number of requests per second effectively executed during the reindex. `throttled_until_millis`:: -This field should always be equal to zero in a `_delete_by_query` response. It only +This field should always be equal to zero in a `_reindex` response. It only has meaning when using the <>, where it indicates the next time (in milliseconds since epoch) a throttled request will be executed again in order to conform to `requests_per_second`. diff --git a/docs/reference/docs/update-by-query.asciidoc b/docs/reference/docs/update-by-query.asciidoc index 84af9ed0d86b8..b30ba75c25bf5 100644 --- a/docs/reference/docs/update-by-query.asciidoc +++ b/docs/reference/docs/update-by-query.asciidoc @@ -237,7 +237,7 @@ batch size is `1000`, so if the `requests_per_second` is set to `500`: [source,txt] -------------------------------------------------- target_time = 1000 / 500 per second = 2 seconds -wait_time = target_time - delete_time = 2 seconds - .5 seconds = 1.5 seconds +wait_time = target_time - write_time = 2 seconds - .5 seconds = 1.5 seconds -------------------------------------------------- Since the batch is issued as a single `_bulk` request large batch sizes will @@ -332,7 +332,7 @@ The number of requests per second effectively executed during the update by quer `throttled_until_millis`:: -This field should always be equal to zero in a delete by query response. It only +This field should always be equal to zero in an `_update_by_query` response. It only has meaning when using the <>, where it indicates the next time (in milliseconds since epoch) a throttled request will be executed again in order to conform to `requests_per_second`. diff --git a/docs/reference/ilm/apis/delete-lifecycle.asciidoc b/docs/reference/ilm/apis/delete-lifecycle.asciidoc new file mode 100644 index 0000000000000..3c8b6c397c07f --- /dev/null +++ b/docs/reference/ilm/apis/delete-lifecycle.asciidoc @@ -0,0 +1,89 @@ +[role="xpack"] +[testenv="basic"] +[[ilm-delete-lifecycle]] +=== Delete Lifecycle Policy API +++++ +Delete Policy +++++ + +Deletes an existing lifecycle policy + +==== Request + +`DELETE _ilm/policy/` + +==== Description + +Deletes an existing lifecycle policy + +==== Path Parameters + +`policy` (required):: + (string) Identifier for the policy. + +==== Request Parameters + +`timeout`:: + (time units) Specifies the period of time to wait for the completion of the + DELETE operation. When this period of time elapses, the API fails and returns + an error. The default value is `30s`. For more information about time units, + see <>. + +`master_timeout`:: + (time units) Specifies the period of time to wait for the connection with master. + When this period of time elapses, the API fails and returns an error. + The default value is `30s`. For more information about time units, see <>. + + +==== Examples + +The following example deletes an existing policy named `my_policy`: + +////////////////////////// + +[source,js] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "warm": { + "min_age": "10d", + "actions": { + "forcemerge": { + "max_num_segments": 1 + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST + +////////////////////////// + +[source,js] +-------------------------------------------------- +DELETE _ilm/policy/my_policy +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +If the request does not encounter errors, you receive the following result: + +[source,js] +-------------------------------------------------- +{ + "acknowledged": true +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE diff --git a/docs/reference/ilm/apis/explain.asciidoc b/docs/reference/ilm/apis/explain.asciidoc new file mode 100644 index 0000000000000..95daf0bda1f6b --- /dev/null +++ b/docs/reference/ilm/apis/explain.asciidoc @@ -0,0 +1,296 @@ +[role="xpack"] +[testenv="basic"] +[[ilm-explain]] +=== Explain Lifecycle API +++++ +Explain Lifecycle +++++ + +Shows the current lifecycle status for an index. + +==== Request + +`GET /_ilm/explain` + +==== Description + +This API returns information relating to the current lifecycle state of an +index. This includes information such as the currently executing phase, action, +and step and the timestamp when the index entered them. It also shows the +definition of the current phase that is being run and in the event that there +has been a failure, information regarding the failure. + +==== Path Parameters + +`index` (required):: + (string) Identifier for the index. + +==== Request Parameters + +`timeout`:: + (time units) Specifies the period of time to wait for the completion of the + GET operation. When this period of time elapses, the API fails and returns + an error. The default value is `30s`. For more information about time units, + see <>. + +`master_timeout`:: + (time units) Specifies the period of time to wait for the connection with master. + When this period of time elapses, the API fails and returns an error. + The default value is `30s`. For more information about time units, see <>. + + +==== Examples + +The following example retrieves the lifecycle state for the index `my_index`: + +////////////////////////// + +[source,js] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "warm": { + "min_age": "10d", + "actions": { + "forcemerge": { + "max_num_segments": 1 + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} + } + } + } + } +} + +PUT my_index +{ + "settings": { + "index.lifecycle.name": "my_policy", + "index.number_of_replicas": 0 + } +} + +GET /_cluster/health?wait_for_status=green&timeout=10s +-------------------------------------------------- +// CONSOLE +// TEST + +////////////////////////// + +[source,js] +-------------------------------------------------- +GET my_index/_ilm/explain +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +When the index is first taken over by ILM you will see a response like the following: + +[source,js] +-------------------------------------------------- +{ + "indices": { + "my_index": { + "index": "my_index", + "managed": true, <1> + "policy": "my_policy", <2> + "lifecycle_date_millis": 1538475653281, <3> + "phase": "new", <4> + "phase_time_millis": 1538475653317, <5> + "action": "complete", <6> + "action_time_millis": 1538475653317, <7> + "step": "complete", <8> + "step_time_millis": 1538475653317 <9> + } + } +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE[s/"lifecycle_date_millis": 1538475653281/"lifecycle_date_millis": $body.indices.my_index.lifecycle_date_millis/] +// TESTRESPONSE[s/"phase_time_millis": 1538475653317/"phase_time_millis": $body.indices.my_index.phase_time_millis/] +// TESTRESPONSE[s/"action_time_millis": 1538475653317/"action_time_millis": $body.indices.my_index.action_time_millis/] +// TESTRESPONSE[s/"step_time_millis": 1538475653317/"step_time_millis": $body.indices.my_index.step_time_millis/] +<1> Shows if the index is being managed by ILM. If the index is not managed by +ILM the other fields will not be shown +<2> The name of the policy which ILM is using for this index +<3> The timestamp used for the `min_age` +<4> The current phase +<5> The timestamp for when the index entered the current phase +<6> The current action +<7> The timestamp for when the index entered the current action +<8> The current step +<9> The timestamp for when the index entered the current step + +When the policy is running on the index the response will contain a +`phase_execution` object that describes the exact phase that is being run. +Changes to the underlying policy will not affect this index until the current +phase definition has been completely executed. + +[source,js] +-------------------------------------------------- +{ + "indices": { + "test-000069": { + "index": "test-000069", + "managed": true, + "policy": "my_lifecycle3", + "lifecycle_date_millis": 1538475653281, + "lifecycle_date": "2018-10-15T13:45:21.981Z", + "phase": "hot", + "phase_time_millis": 1538475653317, + "phase_time": "2018-10-15T13:45:22.577Z", + "action": "rollover", + "action_time_millis": 1538475653317, + "action_time": "2018-10-15T13:45:22.577Z", + "step": "attempt_rollover", + "step_time_millis": 1538475653317, + "step_time": "2018-10-15T13:45:22.577Z", + "phase_execution": { <1> + "policy": "my_lifecycle3", <2> + "phase_definition": { <3> + "min_age": "0ms", + "actions": { + "rollover": { + "max_age": "30s" + } + } + }, + "version": 3, <4> + "modified_date": "2018-10-15T13:21:41.576Z", <5> + "modified_date_in_millis": 1539609701576 <6> + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE[skip:not possible to get the cluster into this state in a docs test] +<1> The phase execution information for this index in its current phase +<2> The policy that this phase definition was loaded from +<3> The phase definition itself. This is the JSON for the phase loaded from the +policy at the time the index entered the current phase +<4> The version of the policy at the time the phase definition was loaded +<5> The last modified date of the policy at the time the phase definition was loaded +<6> The last modified epoch time of the policy at the time the phase definition was loaded + + +If the policy is waiting for a step to complete for the index, the response will contain step information such as: + +[source,js] +-------------------------------------------------- +{ + "indices": { + "test-000020": { + "index": "test-000020", + "managed": true, + "policy": "my_lifecycle3", + "lifecycle_date_millis": 1538475653281, + "lifecycle_date": "2018-10-15T13:45:21.981Z", + "phase": "warm", + "phase_time_millis": 1538475653317, + "phase_time": "2018-10-15T13:45:22.577Z", + "action": "allocate", + "action_time_millis": 1538475653317, + "action_time": "2018-10-15T13:45:22.577Z", + "step": "check-allocation", + "step_time_millis": 1538475653317, + "step_time": "2018-10-15T13:45:22.577Z", + "step_info": { <1> + "message": "Waiting for all shard copies to be active", + "shards_left_to_allocate": -1, + "all_shards_active": false, + "actual_replicas": 2 + }, + "phase_execution": { + "policy": "my_lifecycle3", + "phase_definition": { + "min_age": "0ms", + "actions": { + "allocate": { + "number_of_replicas": 2, + "include": { + "box_type": "warm" + }, + "exclude": {}, + "require": {} + }, + "forcemerge": { + "max_num_segments": 1 + } + } + }, + "version": 2, + "modified_date": "2018-10-15T13:20:02.489Z", + "modified_date_in_millis": 1539609602489 + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE[skip:not possible to get the cluster into this state in a docs test] +<1> `step_info` shows information about what ILM is waiting for on this index. +In this case we are waiting for all shard copies of the index to be active. + +If the index is in the ERROR step, something has gone wrong when executing a +step in the policy and will need to be investigated and resolved for the index +to make progress. TO help determine how to resolve the error the explain response +will show the step that failed in `failed_step`, and the information on the error +that occurred in `step_info`. + +[source,js] +-------------------------------------------------- +{ + "indices": { + "test-000056": { + "index": "test-000056", + "managed": true, + "policy": "my_lifecycle3", + "lifecycle_date_millis": 1538475653281, + "lifecycle_date": "2018-10-15T13:45:21.981Z", + "phase": "hot", + "phase_time_millis": 1538475653317, + "phase_time": "2018-10-15T13:45:22.577Z", + "action": "rollover", + "action_time_millis": 1538475653317, + "action_time": "2018-10-15T13:45:22.577Z", + "step": "ERROR", + "step_time_millis": 1538475653317, + "step_time": "2018-10-15T13:45:22.577Z", + "failed_step": "attempt_rollover", <1> + "step_info": { <2> + "type": "resource_already_exists_exception", + "reason": "index [test-000057/H7lF9n36Rzqa-KfKcnGQMg] already exists", + "index_uuid": "H7lF9n36Rzqa-KfKcnGQMg", + "index": "test-000057" + }, + "phase_execution": { + "policy": "my_lifecycle3", + "phase_definition": { + "min_age": "0ms", + "actions": { + "rollover": { + "max_age": "30s" + } + } + }, + "version": 3, + "modified_date": "2018-10-15T13:21:41.576Z", + "modified_date_in_millis": 1539609701576 + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE[skip:not possible to get the cluster into this state in a docs test] +<1> The step that caused an error +<2> Information on the error that occurred. In this case the next index already +existed when the rollover operation was performed diff --git a/docs/reference/ilm/apis/get-lifecycle.asciidoc b/docs/reference/ilm/apis/get-lifecycle.asciidoc new file mode 100644 index 0000000000000..dbc8a572903b3 --- /dev/null +++ b/docs/reference/ilm/apis/get-lifecycle.asciidoc @@ -0,0 +1,115 @@ +[role="xpack"] +[testenv="basic"] +[[ilm-get-lifecycle]] +=== Get Lifecycle Policy API +++++ +Get Policy +++++ + +Retrieves an existing policy + +==== Request + +`GET _ilm/policy` +`GET _ilm/policy/` + +==== Description + +This API returns a policy definition along with some of its metadata like +its last modified date and version. If no path parameters are provided, then +all the policies defined will be returned. + +==== Path Parameters + +`policy` (optional):: + (string) Identifier for the policy. + +==== Request Parameters + +`timeout`:: + (time units) Specifies the period of time to wait for the completion of the + GET operation. When this period of time elapses, the API fails and returns + an error. The default value is `30s`. For more information about time units, + see <>. + +`master_timeout`:: + (time units) Specifies the period of time to wait for the connection with master. + When this period of time elapses, the API fails and returns an error. + The default value is `30s`. For more information about time units, see <>. + + +==== Examples + +The following example retrieves the policy named `my_policy`: + +////////////////////////// + +[source,js] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "warm": { + "min_age": "10d", + "actions": { + "forcemerge": { + "max_num_segments": 1 + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST + +////////////////////////// + +[source,js] +-------------------------------------------------- +GET _ilm/policy +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +If the request does not encounter errors, you receive the following result: + +[source,js] +-------------------------------------------------- +{ + "my_policy": { + "version": 1, <1> + "modified_date": 82392349, <2> + "policy": { + "phases": { + "warm": { + "min_age": "10d", + "actions": { + "forcemerge": { + "max_num_segments": 1 + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE[s/"modified_date": 82392349/"modified_date": $body.my_policy.modified_date/] +<1> The version of the policy. This is increased whenever the policy is updated +<2> The timestamp when this policy was last modified diff --git a/docs/reference/ilm/apis/get-status.asciidoc b/docs/reference/ilm/apis/get-status.asciidoc new file mode 100644 index 0000000000000..8f5d2289ff2ea --- /dev/null +++ b/docs/reference/ilm/apis/get-status.asciidoc @@ -0,0 +1,55 @@ +[role="xpack"] +[testenv="basic"] +[[ilm-get-status]] +=== Get ILM Status API +++++ +Get ILM Status +++++ + +Gets the current status for ILM. + +==== Request + +`GET /_ilm/status` + +==== Description + +This API will return the current status of the ILM plugin. The response contains +a `operation_mode` field which shows whether the ILM plugin is `STARTED`, `STOPPING` +or `STOPPED`. This `operation_mode` is controlled by the <> +and <> APIs. + +==== Request Parameters + +`timeout`:: + (time units) Specifies the period of time to wait for the completion of the + get operation. When this period of time elapses, the API fails and returns + an error. The default value is `30s`. For more information about time units, + see <>. + +`master_timeout`:: + (time units) Specifies the period of time to wait for the connection with master. + When this period of time elapses, the API fails and returns an error. + The default value is `30s`. For more information about time units, see <>. + + +==== Examples + +The following example stops the ILM plugin. + +[source,js] +-------------------------------------------------- +GET _ilm/status +-------------------------------------------------- +// CONSOLE + +If the request does not encounter errors, you receive the following result: + +[source,js] +-------------------------------------------------- +{ + "operation_mode": "RUNNING" +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE diff --git a/docs/reference/ilm/apis/ilm-api.asciidoc b/docs/reference/ilm/apis/ilm-api.asciidoc new file mode 100644 index 0000000000000..49c7d2155d516 --- /dev/null +++ b/docs/reference/ilm/apis/ilm-api.asciidoc @@ -0,0 +1,42 @@ +[[index-lifecycle-management-api]] +== Index Lifecycle Management API + +You can use the following APIs to manage policies on indices. + +[float] +[[ilm-api-policy-endpoint]] +=== Policy Management APIs + +* <> +* <> +* <> + +[float] +[[ilm-api-index-endpoint]] +=== Index Management APIs + +* <> +* <> + +[float] +[[ilm-api-management-endpoint]] +=== Operation Management APIs + +* <> +* <> +* <> +* <> + + +include::put-lifecycle.asciidoc[] +include::get-lifecycle.asciidoc[] +include::delete-lifecycle.asciidoc[] + +include::move-to-step.asciidoc[] +include::remove-policy.asciidoc[] +include::retry-policy.asciidoc[] + +include::get-status.asciidoc[] +include::explain.asciidoc[] +include::start.asciidoc[] +include::stop.asciidoc[] diff --git a/docs/reference/ilm/apis/move-to-step.asciidoc b/docs/reference/ilm/apis/move-to-step.asciidoc new file mode 100644 index 0000000000000..c34b800856c10 --- /dev/null +++ b/docs/reference/ilm/apis/move-to-step.asciidoc @@ -0,0 +1,121 @@ +[role="xpack"] +[testenv="basic"] +[[ilm-move-to-step]] +=== Move To Step API +++++ +Move To Step +++++ + +Moves a managed index into a specific execution step its policy + +==== Request + +`POST _ilm/move/` + +==== Description + +WARNING: This is an expert API that may lead to unintended data loss. When used, +an index's policy will begin executing at the specified step. It will execute +the step specified even if it has already executed it. Since this is a, potentionally, +dangerous action, specifying both the current step and next step to move to is +required in the body of the request. + +This API changes the current step for the specified index to the step supplied in the body of the request + +==== Path Parameters + +`index` (required):: + (string) Identifier for the index. + +==== Request Parameters + +`timeout`:: + (time units) Specifies the period of time to wait for the completion of the + move operation. When this period of time elapses, the API fails and returns + an error. The default value is `30s`. For more information about time units, + see <>. + +`master_timeout`:: + (time units) Specifies the period of time to wait for the connection with master. + When this period of time elapses, the API fails and returns an error. + The default value is `30s`. For more information about time units, see <>. + + +==== Examples + +The following example moves the index `my_index` from the initial step to the +forcemerge step: + +////////////////////////// + +[source,js] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "warm": { + "min_age": "10d", + "actions": { + "forcemerge": { + "max_num_segments": 1 + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} + } + } + } + } +} + +PUT my_index +{ + "settings": { + "index.lifecycle.name": "my_policy" + } +} +-------------------------------------------------- +// CONSOLE +// TEST + +////////////////////////// + +[source,js] +-------------------------------------------------- +POST _ilm/move/my_index +{ + "current_step": { <1> + "phase": "new", + "action": "complete", + "name": "complete" + }, + "next_step": { <2> + "phase": "warm", + "action": "forcemerge", + "name": "forcemerge" + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] +<1> The step that the index is currently expected to be executing +<2> The step that the index should move to when executing this request + +If the request does not encounter errors, you receive the following result: + +[source,js] +-------------------------------------------------- +{ + "acknowledged": true +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE + +NOTE: An error will be returned if the index is now longer executing the step +specified in `current_step`. This is so the index is not moved from an +unexpected step into the `next_step`. diff --git a/docs/reference/ilm/apis/put-lifecycle.asciidoc b/docs/reference/ilm/apis/put-lifecycle.asciidoc new file mode 100644 index 0000000000000..36650078db652 --- /dev/null +++ b/docs/reference/ilm/apis/put-lifecycle.asciidoc @@ -0,0 +1,82 @@ +[role="xpack"] +[testenv="basic"] +[[ilm-put-lifecycle]] +=== Put Lifecycle Policy API +++++ +Put Policy +++++ + +Creates or updates an ILM Policy + +==== Request + +`PUT _ilm/policy/` + +==== Description + +This API creates a new Lifecycle Policy, or updates an existing one with the same +identifier. Each call will replace the existing policy and increment the `version` +associated with the policy. + +NOTE: The `version` is only for informational purposes. Only the latest version +of the policy is stored. + +==== Path Parameters + +`policy` (required):: + (string) Identifier for the policy. + +==== Request Parameters + +`timeout`:: + (time units) Specifies the period of time to wait for the completion of the + PUT operation. When this period of time elapses, the API fails and returns + an error. The default value is `30s`. For more information about time units, + see <>. + +`master_timeout`:: + (time units) Specifies the period of time to wait for the connection with master. + When this period of time elapses, the API fails and returns an error. + The default value is `30s`. For more information about time units, see <>. + + +==== Examples + +The following example creates a new policy named `my_policy`: + +[source,js] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "warm": { + "min_age": "10d", + "actions": { + "forcemerge": { + "max_num_segments": 1 + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST + +If the request does not encounter errors, you receive the following result: +[source,js] +---- +{ + "acknowledged": true +} +---- +// CONSOLE +// TESTRESPONSE diff --git a/docs/reference/ilm/apis/remove-policy.asciidoc b/docs/reference/ilm/apis/remove-policy.asciidoc new file mode 100644 index 0000000000000..f37ac9a715d28 --- /dev/null +++ b/docs/reference/ilm/apis/remove-policy.asciidoc @@ -0,0 +1,98 @@ +[role="xpack"] +[testenv="basic"] +[[ilm-remove-policy]] +=== Remove Policy On Index API +++++ +Remove Policy From Index +++++ + +Unassigns a policy from a specified index pattern + +==== Request + +`POST /_ilm/remove` + +==== Description + +This action removes a policy from managing an index. It is effectively the same as setting an index's +`index.lifecycle.name` setting to null. + +==== Path Parameters + +`index` (required):: + (string) Identifier for the index. + +==== Request Parameters + +`timeout`:: + (time units) Specifies the period of time to wait for the completion of the + operation. When this period of time elapses, the API fails and returns + an error. The default value is `30s`. For more information about time units, + see <>. + +`master_timeout`:: + (time units) Specifies the period of time to wait for the connection with master. + When this period of time elapses, the API fails and returns an error. + The default value is `30s`. For more information about time units, see <>. + + +==== Examples + +The following example removes a policy `my_policy` from an index `my_index`. + +////////////////////////// + +[source,js] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "warm": { + "min_age": "10d", + "actions": { + "forcemerge": { + "max_num_segments": 1 + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} + } + } + } + } +} + +PUT my_index +{ + "settings": { + "index.lifecycle.name": "my_policy" + } +} +-------------------------------------------------- +// CONSOLE +// TEST + +////////////////////////// + +[source,js] +-------------------------------------------------- +POST my_index/_ilm/remove +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +If the request does not encounter errors, you receive the following result: + +[source,js] +-------------------------------------------------- +{ + "has_failures" : false, + "failed_indexes" : [] +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE diff --git a/docs/reference/ilm/apis/retry-policy.asciidoc b/docs/reference/ilm/apis/retry-policy.asciidoc new file mode 100644 index 0000000000000..7c81f9423ef12 --- /dev/null +++ b/docs/reference/ilm/apis/retry-policy.asciidoc @@ -0,0 +1,59 @@ +[role="xpack"] +[testenv="basic"] +[[ilm-retry-policy]] +=== Retry Policy Execution API +++++ +Retry Policy Execution +++++ + +Retry executing the policy for an index which has errored. + +==== Request + +`POST /_ilm/retry` + +==== Description + +This API will re-run a policy is currently in the ERROR step. It will set the +policy back to the step where the error occurred and attempt to re-execute it. +Information on whether an index is in the ERROR step can be obtained from the +<> + +==== Path Parameters + +`index` (required):: + (string) Identifier for the indices to retry in comma-separated format. + +==== Request Parameters + +`timeout`:: + (time units) Specifies the period of time to wait for the completion of the + retry operation. When this period of time elapses, the API fails and returns + an error. The default value is `30s`. For more information about time units, + see <>. + +`master_timeout`:: + (time units) Specifies the period of time to wait for the connection with master. + When this period of time elapses, the API fails and returns an error. + The default value is `30s`. For more information about time units, see <>. + + +==== Examples + +The following example retries the policy for index `my_index`. + +[source,js] +-------------------------------------------------- +POST my_index/_ilm/retry +-------------------------------------------------- +// NOTCONSOLE + +If the request does not encounter errors, you receive the following result: + +[source,js] +-------------------------------------------------- +{ + "acknowledged": true +} +-------------------------------------------------- +// NOTCONSOLE diff --git a/docs/reference/ilm/apis/start.asciidoc b/docs/reference/ilm/apis/start.asciidoc new file mode 100644 index 0000000000000..073a584e4d872 --- /dev/null +++ b/docs/reference/ilm/apis/start.asciidoc @@ -0,0 +1,90 @@ +[role="xpack"] +[testenv="basic"] +[[ilm-start]] +=== Start ILM API +++++ +Start ILM +++++ + +Start the ILM plugin + +==== Request + +`POST /_ilm/start` + +==== Description + +This API will start the ILM plugin if it is currently stopped. ILM is started +by default when the cluster is formed so this API is only needed if ILM has +been stopped using the <>. + +==== Request Parameters + +`timeout`:: + (time units) Specifies the period of time to wait for the completion of the + start operation. When this period of time elapses, the API fails and returns + an error. The default value is `30s`. For more information about time units, + see <>. + +`master_timeout`:: + (time units) Specifies the period of time to wait for the connection with master. + When this period of time elapses, the API fails and returns an error. + The default value is `30s`. For more information about time units, see <>. + + +==== Examples + +The following example starts the ILM plugin. + +////////////////////////// + +[source,js] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "warm": { + "min_age": "10d", + "actions": { + "forcemerge": { + "max_num_segments": 1 + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} + } + } + } + } +} + +PUT my_index + +POST _ilm/stop +-------------------------------------------------- +// CONSOLE +// TEST + +////////////////////////// + +[source,js] +-------------------------------------------------- +POST _ilm/start +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +If the request does not encounter errors, you receive the following result: + +[source,js] +-------------------------------------------------- +{ + "acknowledged": true +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE diff --git a/docs/reference/ilm/apis/stop.asciidoc b/docs/reference/ilm/apis/stop.asciidoc new file mode 100644 index 0000000000000..cdc038adabcfc --- /dev/null +++ b/docs/reference/ilm/apis/stop.asciidoc @@ -0,0 +1,101 @@ +[role="xpack"] +[testenv="basic"] +[[ilm-stop]] +=== Stop ILM API +++++ +Stop ILM +++++ + +Stop the ILM plugin. + +==== Request + +`POST /_ilm/stop` + +==== Description + +This API will stop the ILM plugin. This can be used for period where +maintenance is required and ILM should not perform any actions on any indices. +The API will return as soon as the stop request has been acknowledged but the +plugin may not immediately stop but rather need to wait for some operations +to finish before it's stopped. Progress can be seen using the +<> API. + +==== Request Parameters + +`timeout`:: + (time units) Specifies the period of time to wait for the response. When this + period of time elapses, the API fails and returns an error. The default value + is `30s`. For more information about time units, see <>. + +`master_timeout`:: + (time units) Specifies the period of time to wait for the connection with master. + When this period of time elapses, the API fails and returns an error. + The default value is `30s`. For more information about time units, see <>. + + +==== Examples + +The following example stops the ILM plugin. + +////////////////////////// + +[source,js] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "warm": { + "min_age": "10d", + "actions": { + "forcemerge": { + "max_num_segments": 1 + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} + } + } + } + } +} + +PUT my_index +-------------------------------------------------- +// CONSOLE +// TEST + +////////////////////////// + +[source,js] +-------------------------------------------------- +POST _ilm/stop +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +If the request does not encounter errors, you receive the following result: + +[source,js] +-------------------------------------------------- +{ + "acknowledged": true +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE + +////////////////////////// + +[source,js] +-------------------------------------------------- +POST _ilm/start +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +////////////////////////// diff --git a/docs/reference/ilm/get-index-lifecycle-information.asciidoc b/docs/reference/ilm/get-index-lifecycle-information.asciidoc new file mode 100644 index 0000000000000..3d5dc8a172010 --- /dev/null +++ b/docs/reference/ilm/get-index-lifecycle-information.asciidoc @@ -0,0 +1,11 @@ +[role="xpack"] +[[get-index-lifecycle-information]] +== Get index lifecycle information + +Execution Model +Discuss how actions are actually split up into discrete steps and how you can see more information about where an index is within a policy (info and all) +Talk about the jump-to-step API +Error Handling +Show error in explain api +Demonstrate the retry API +Show how to get a sense of progress for things like the allocate step diff --git a/docs/reference/ilm/getting-started-ilm.asciidoc b/docs/reference/ilm/getting-started-ilm.asciidoc new file mode 100644 index 0000000000000..ad3596e92a1ad --- /dev/null +++ b/docs/reference/ilm/getting-started-ilm.asciidoc @@ -0,0 +1,15 @@ +[role="xpack"] +[[getting-started-index-lifecycle-management]] +== Getting started with {ilm} + +Create a policy that rolls over after 1 day deletes an index after 30 days + +Show create policy API req/res + +Show assign policy to index API req/res + +Show both the API and how it is done with `index.lifecyce.name` using the +create-index API + +Show explain API to show current state, but ignore the “step” related info, +only focus on managed/phase/action diff --git a/docs/reference/ilm/index.asciidoc b/docs/reference/ilm/index.asciidoc new file mode 100644 index 0000000000000..d85f92fb1c28a --- /dev/null +++ b/docs/reference/ilm/index.asciidoc @@ -0,0 +1,62 @@ +[role="xpack"] +[testenv="basic"] +[[index-lifecycle-management]] += Managing Indices + +:ilm: index lifecycle management +:ILM: Index lifecycle management +[partintro] +-- +The <> enable you to automate how you +want to manage your indices over time. Rather than simply performing management +actions on your indices on a set schedule, you can base actions on other factors +such as shard size and performance requirements. + +You control how indices are handled as they age by attaching a +lifecycle policy to the index template used to create them. You can update +the policy to modify the lifecycle of both new and existing indices. + +For time series indices, there are four stages in the index lifecycle: + +* Hot--the index is actively being updated and queried. +* Warm--the index is no longer being updated, but is still being queried. +* Cold--the index is no longer being updated and is seldom queried. The +information still needs to be searchable, but it's okay if those queries are +slower. +* Delete--the index is no longer needed and can safely be deleted. + +The lifecycle policy governs how the index transitions through these stages and +the actions that are performed on the index at each stage. The policy can +specify: + +* The maximum size or age at which you want to roll over to a new index. +* The point at which the index is no longer being updated and the number of +primary shards can be reduced. +* When to force a merge to permanently delete documents marked for deletion. +* The point at which the index can be moved to less performant hardware. +* The point at which the availability is not as critical and the number of +replicas can be reduced. +* When the index can be safely deleted. + +For example, if you are indexing metrics data from a fleet of ATMs into +Elasticsearch, you might define a policy that says: + +. When the index reaches 5GB, roll over to a new index. +. Move the old index into the warm stage, mark it read only, and shrink it down +to a single shard. +. After 7 days, move the index into the cold stage and move it to less expensive +hardware. +. Delete the index once the required 30 day retention period is reached. +-- + +include::getting-started-ilm.asciidoc[] + +include::using-policies-rollover.asciidoc[] + +include::set-up-lifecycle-policy.asciidoc[] + +include::update-lifecycle-policy.asciidoc[] + +include::get-index-lifecycle-information.asciidoc[] + +include::start-stop-ilm.asciidoc[] diff --git a/docs/reference/ilm/set-up-lifecycle-policy.asciidoc b/docs/reference/ilm/set-up-lifecycle-policy.asciidoc new file mode 100644 index 0000000000000..7f5bb84c598a4 --- /dev/null +++ b/docs/reference/ilm/set-up-lifecycle-policy.asciidoc @@ -0,0 +1,112 @@ +[role="xpack"] +[testenv="basic"] +[[set-up-lifecycle-policy]] +== Set up {ilm} policy + +In order for an index to use an {ilm} policy to manage its lifecycle we must +first define a lifecycle policy for it to use. The following request creates +a policy called `my_policy` in Elasticsearch which we can later use to manage +our indexes. + +[source,js] +------------------------ +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "hot": { + "actions": { + "rollover": { + "max_size": "25GB" <1> + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} <2> + } + } + } + } +} +------------------------ +// CONSOLE +<1> Rollover the index when it reaches 25GB in size +<2> Delete the index when its 30 days old + +{ilm} will manage an index using the policy defined in the +`index.lifecycle.name` index setting. If this setting does not exist in the +settings for a particular index {ilm} will not manage that index. + +To set the policy for an index there are two options: +1. Apply the policy to an index template and bootstrap creating the first index +2. Apply the policy to a new index in a create index request + +=== Applying a policy to an index template + +The `index.lifecycle.name` setting can be set in an index template so that it +is automatically applied to indexes matching the templates index pattern: + +[source,js] +----------------------- +PUT _template/my_template +{ + "index_patterns": ["test-*"], <1> + "settings": { + "number_of_shards": 1, + "number_of_replicas": 1, + "index.lifecycle.name": "my_policy", <2> + "index.lifecycle.rollover_alias": "test-alias" + } +} +----------------------- +// CONSOLE +<1> This template will be applied to all indexes which have a name starting +with `test-` +<2> The template will set the policy to be used to `my_policy` + +Now that a policy exists and is used in an index template we can create an +initial index which will be managed by our policy: + +[source,js] +----------------------- +PUT test-000001 +{ + "aliases": { + "test-alias":{ + "is_write_index": true <1> + } + } +} +----------------------- +// CONSOLE +<1> Set this initial index to be the write index for this alias. + +We can now write data to the `test-alias` alias. Because we have a rollover +action defined in our policy when the index grows larger than 25GB {ilm} will +create a new index and roll the alias over to use the new index automatically. + +=== Apply a policy to a create index request + +The `index.lifecycle.name` setting can be set on an individual create index +request so {ilm} immediately starts managing the index: + +[source,js] +----------------------- +PUT test-index +{ + "settings": { + "number_of_shards": 1, + "number_of_replicas": 1, + "index.lifecycle.name": "my_policy" + } +} +----------------------- +// CONSOLE + +IMPORTANT: Its recommended not to use the create index API with a policy that +defines a rollover action. If you do so, the new index as the result of the +rollover will not carry forward the policy. Always use index templates to +define policies with rollover actions. + diff --git a/docs/reference/ilm/start-stop-ilm.asciidoc b/docs/reference/ilm/start-stop-ilm.asciidoc new file mode 100644 index 0000000000000..938b97d44721f --- /dev/null +++ b/docs/reference/ilm/start-stop-ilm.asciidoc @@ -0,0 +1,168 @@ +[role="xpack"] +[testenv="basic"] +[[start-stop-ilm]] +== Start And Stop {ilm} + +All indices that are managed by ILM will continue to execute +their policies. There may be times when this is not desired on certain +indices, or maybe even all the indices in a cluster. For example, +maybe there are scheduled maintenance windows when cluster topology +changes are desired that may impact running ILM actions. For this reason, +ILM has two ways to disable operations. + +Normally, ILM will be running by default. +To see the current operating status of ILM, use the <> +to see the current state of ILM. + +//// +[source,js] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "warm": { + "min_age": "10d", + "actions": { + "forcemerge": { + "max_num_segments": 1 + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} + } + } + } + } +} + +PUT my_index +{ + "settings": { + "index.lifecycle.name": "my_policy" + } +} +-------------------------------------------------- +// CONSOLE +//// + +[source,js] +-------------------------------------------------- +GET _ilm/status +-------------------------------------------------- +// CONSOLE + +If the request does not encounter errors, you receive the following result: + +[source,js] +-------------------------------------------------- +{ + "operation_mode": "RUNNING" +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE + +The operating modes of ILM: + + +.ILM Operating Modes +|=== +|Name |Description +|RUNNING |Normal operation where all policies are executed as normal +|STOPPING|ILM has received a request to stop but is still processing some policies +|STOPPED |This represents a state where no policies are executed +|=== + +=== Stopping ILM + +The ILM service can be paused such that no further steps will be executed +using the <>. + +[source,js] +-------------------------------------------------- +POST _ilm/stop +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +When stopped, all further policy actions will be halted. This will +be reflected in the Status API + +//// +[source,js] +-------------------------------------------------- +GET _ilm/status +-------------------------------------------------- +// CONSOLE +// TEST[continued] +//// + +[source,js] +-------------------------------------------------- +{ + "operation_mode": "STOPPING" +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE + +The ILM service will then, asynchronously, run all policies to a point +where it is safe to stop. After ILM verifies that it is safe, it will +move to the `STOPPED` mode. + +//// +[source,js] +-------------------------------------------------- +PUT trigger_ilm_cs_action + +GET _ilm/status +-------------------------------------------------- +// CONSOLE +// TEST[continued] +//// + +[source,js] +-------------------------------------------------- +{ + "operation_mode": "STOPPED" +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE + +=== Starting ILM + +To start ILM and continue executing policies, use the <>. + + +[source,js] +-------------------------------------------------- +POST _ilm/start +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +//// +[source,js] +-------------------------------------------------- +GET _ilm/status +-------------------------------------------------- +// CONSOLE +// TEST[continued] +//// + +The Start API will send a request to the ILM service to immediately begin +normal operations. + +[source,js] +-------------------------------------------------- +{ + "operation_mode": "RUNNING" +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE diff --git a/docs/reference/ilm/update-lifecycle-policy.asciidoc b/docs/reference/ilm/update-lifecycle-policy.asciidoc new file mode 100644 index 0000000000000..334b5a953fd0e --- /dev/null +++ b/docs/reference/ilm/update-lifecycle-policy.asciidoc @@ -0,0 +1,432 @@ +[role="xpack"] +[testenv="basic"] +[[update-lifecycle-policy]] +== Update lifecycle policy + +Updating existing ILM policies is useful to fix mistakes or change +strategies for newly created indices. It is possible to update policy definitions +and an index's `index.lifecycle.name` settings independently. To prevent the situation +that phase definitions are modified while currently being executed on an index, each index +will keep the version of the current phase definition it began execution with until it completes. + +There are three scenarios for examining the behavior updating policies and +their effects on policy execution on indices. + +=== Updates to policies not managing indices + +Indices not referencing an existing policy that is updated will not be affected. +If an index is assigned to the policy, it will be assigned the latest version of that policy + +To show this, let's create a policy `my_policy`. + +[source,js] +------------------------ +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "hot": { + "actions": { + "rollover": { + "max_size": "25GB" + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} + } + } + } + } +} +------------------------ +// CONSOLE + +This newly defined policy will be created and assigned to have a version equal +to 1. Since we haven't assigned any indices to this policy, any updates that +occur will be reflected completely on indices that are newly set to be managed +by this policy. + +Updating the Delete phase's minimum age can be done in an update request. + +[source,js] +------------------------ +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "hot": { + "actions": { + "rollover": { + "max_size": "25GB" + } + } + }, + "delete": { + "min_age": "10d", <1> + "actions": { + "delete": {} + } + } + } + } +} +------------------------ +// CONSOLE +// TEST[continued] +<1> update `min_age` to 10 days + +////////// +[source,js] +-------------------------------------------------- +GET _ilm/policy +-------------------------------------------------- +// CONSOLE +// TEST[continued] +////////// + +When we get the policy, we will see it reflect our latest changes, but +with its version bumped to 2. + +[source,js] +-------------------------------------------------- +{ + "my_policy": { + "version": 2, <1> + "modified_date": 82392349, <2> + "policy": { + "phases": { + "hot": { + "min_age": "0ms", + "actions": { + "rollover": { + "max_size": "25gb" + } + } + }, + "delete": { + "min_age": "10d", + "actions": { + "delete": {} + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE[s/"modified_date": 82392349/"modified_date": $body.my_policy.modified_date/] +<1> The updated version value +<2> The timestamp when this policy was updated last. + +Afterwords, any indices set to `my_policy` will execute against version 2 of +the policy. + +=== Updates to executing policies + +Indices preserve the phase definition from the latest policy version that existed +at the time that it entered that phase. Changes to the currently-executing phase within policy updates will +not be reflected during execution. This means that updates to the `hot` phase, for example, will not affect +indices that are currently executing the corresponding `hot` phase. + +Let's say we have an index `my_index` managed by the below `my_executing_policy` definition. + +[source,js] +------------------------ +PUT _ilm/policy/my_executing_policy +{ + "policy": { + "phases": { + "hot": { + "actions": { + "rollover": { + "max_docs": 1 + } + } + }, + "delete": { + "min_age": "10d", + "actions": { + "delete": {} + } + } + } + } +} +------------------------ +// CONSOLE + +//// +[source,js] +------------------------ +PUT my_index +{ + "settings": { + "index.lifecycle.name": "my_executing_policy" + } +} +------------------------ +// CONSOLE +// TEST[continued] +//// + +The <> is useful to introspect managed indices to see which phase definition they are currently executing. +Using this API, we can find out that `my_index` is currently attempting to be rolled over. + +[source,js] +-------------------------------------------------- +GET my_index/_ilm/explain +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +[source,js] +-------------------------------------------------- +{ + "indices": { + "my_index": { + "index": "my_index", + "managed": true, + "policy": "my_executing_policy", + "lifecycle_date_millis": 1538475653281, + "phase": "hot", + "phase_time_millis": 1538475653317, + "action": "rollover", + "action_time_millis": 1538475653317, + "step": "attempt_rollover", + "step_time_millis": 1538475653317, + "phase_execution": { + "policy": "my_executing_policy", + "modified_date_in_millis": 1538475653317, + "version": 1, + "phase_definition": { + "min_age": "0ms", + "actions": { + "rollover": { + "max_docs": 1 + } + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE[s/"lifecycle_date_millis": 1538475653281/"lifecycle_date_millis": $body.indices.my_index.lifecycle_date_millis/] +// TESTRESPONSE[s/"phase_time_millis": 1538475653317/"phase_time_millis": $body.indices.my_index.phase_time_millis/] +// TESTRESPONSE[s/"action_time_millis": 1538475653317/"action_time_millis": $body.indices.my_index.action_time_millis/] +// TESTRESPONSE[s/"step_time_millis": 1538475653317/"step_time_millis": $body.indices.my_index.step_time_millis/] +// TESTRESPONSE[s/"modified_date_in_millis": 1538475653317/"modified_date_in_millis": $body.indices.my_index.phase_execution.modified_date_in_millis/] + +Updating `my_executing_policy` to have no rollover action and, instead, go directly into a newly introduced `warm` phase. + +[source,js] +------------------------ +PUT _ilm/policy/my_executing_policy +{ + "policy": { + "phases": { + "warm": { + "min_age": "1d", + "actions": { + "forcemerge": { + "max_num_segments": 1 + } + } + }, + "delete": { + "min_age": "10d", + "actions": { + "delete": {} + } + } + } + } +} +------------------------ +// CONSOLE +// TEST[continued] + +Now, version 2 of this policy has no `hot` phase, but if we run the Explain API again, we will see that nothing has changed. +The index `my_index` is still executing version 1 of the policy. + +//// +[source,js] +-------------------------------------------------- +GET my_index/_ilm/explain +-------------------------------------------------- +// CONSOLE +// TEST[continued] +//// + +[source,js] +-------------------------------------------------- +{ + "indices": { + "my_index": { + "index": "my_index", + "managed": true, + "policy": "my_executing_policy", + "lifecycle_date_millis": 1538475653281, + "phase": "hot", + "phase_time_millis": 1538475653317, + "action": "rollover", + "action_time_millis": 1538475653317, + "step": "attempt_rollover", + "step_time_millis": 1538475653317, + "phase_execution": { + "policy": "my_executing_policy", + "modified_date_in_millis": 1538475653317, + "version": 1, + "phase_definition": { + "min_age": "0ms", + "actions": { + "rollover": { + "max_docs": 1 + } + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE[s/"lifecycle_date_millis": 1538475653281/"lifecycle_date_millis": $body.indices.my_index.lifecycle_date_millis/] +// TESTRESPONSE[s/"phase_time_millis": 1538475653317/"phase_time_millis": $body.indices.my_index.phase_time_millis/] +// TESTRESPONSE[s/"action_time_millis": 1538475653317/"action_time_millis": $body.indices.my_index.action_time_millis/] +// TESTRESPONSE[s/"step_time_millis": 1538475653317/"step_time_millis": $body.indices.my_index.step_time_millis/] +// TESTRESPONSE[s/"modified_date_in_millis": 1538475653317/"modified_date_in_millis": $body.indices.my_index.phase_execution.modified_date_in_millis/] + +After indexing one document into `my_index` so that rollover succeeds and moves onto the next phase, we will notice something new. The +index will move into the next phase in the updated version 2 of its policy. + +//// +[source,js] +-------------------------------------------------- +PUT my_index/_doc/1 +{ + "foo": "bar" +} + +GET my_index/_ilm/explain +-------------------------------------------------- +// CONSOLE +// TEST[continued] +//// + +[source,js] +-------------------------------------------------- +{ + "indices": { + "my_index": { + "index": "my_index", + "managed": true, + "policy": "my_executing_policy", + "lifecycle_date_millis": 1538475653281, + "phase": "warm", + "phase_time_millis": 1538475653317, + "action": "forcemerge", + "action_time_millis": 1538475653317, + "step": "forcemerge", + "step_time_millis": 1538475653317, + "phase_execution": { + "policy": "my_executing_policy", + "modified_date_in_millis": 1538475653317, + "version": 2, <1> + "phase_definition": { + "min_age": "1d", + "actions": { + "forcemerge": { + "max_num_segments": 1 + } + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TESTRESPONSE[skip:There is no way to force the index to move to the next step in a timely manner] +<1> The index has moved to using version 2 of the policy + +`my_index` will move to the next phase in the latest policy definition, which is the newly added `warm` phase. + +=== Switching policies for an index + +Setting `index.lifecycle.name` to a different policy behaves much like a policy update, but instead of just +switching to a different version, it switches to a different policy. + +After setting a policy for an index, we can switch out `my_policy` with +`my_other_policy` by just updating the index's `index.lifecycle.name` +setting to the new policy. After completing its currently executed phase, +it will move on to the next phase in `my_other_policy`. So if it was on the +`hot` phase before, it will move to the `delete` phase after the `hot` phase concluded. + +//// +[source,js] +------------------------ +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "hot": { + "actions": { + "rollover": { + "max_size": "25GB" + } + } + }, + "delete": { + "min_age": "10d", + "actions": { + "delete": {} + } + } + } + } +} + +PUT _ilm/policy/my_other_policy +{ + "policy": { + "phases": { + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +} + +PUT my_index +{ + "settings": { + "index.lifecycle.name": "my_policy" + } +} +------------------------ +// CONSOLE + +//// + +[source,js] +-------------------------------------------------- +PUT my_index/_settings +{ + "lifecycle.name": "my_other_policy" +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +The change to the new policy will not happen immediately. The currently executing phase +of the existing policy for `my_index` will continue to execute until it completes. Once +completed, `my_index` will move to being managed by the `my_other_policy`. diff --git a/docs/reference/ilm/using-policies-rollover.asciidoc b/docs/reference/ilm/using-policies-rollover.asciidoc new file mode 100644 index 0000000000000..f7982af4fec81 --- /dev/null +++ b/docs/reference/ilm/using-policies-rollover.asciidoc @@ -0,0 +1,117 @@ +[role="xpack"] +[testenv="basic"] +[[using-policies-rollover]] +== Using policies to manage index rollover + +The rollover action enables you to automatically roll over to a new index based +on the index size, document count, or age. When a rollover is triggered, a new +index is created, the write alias is updated to point to the new index, and all +subsequent updates are written to the new index. + +Rolling over to a new index based on size, document count, or age is preferable +to time-based rollovers. Rolling over at an arbitrary time often results in +many small indices, which can have a negative impact on performance and +resource usage. + +You control when the rollover action is triggered by specifying one or more +rollover parameters. The rollover is performed once any of the criteria are +met. Because the criteria are checked periodically, the index might grow +slightly beyond the specified threshold. To control how often the critera are +checked, specify the `indices.lifecycle.poll_interval` cluster setting. + +The rollover action takes the following parameters: + +.`rollover` Action Parameters +|=== +|Name |Description +|max_size |The maximum estimated size the index is allowed to grow +to. Defaults tonull. Optional. +|max_docs |The maximum number of document the index should +contain. Defaults tonull. Optional. +|max_age |The maximum age of the index. Defaults to `null`. Optional. +|=== + +These parameters are used to determine when the index is considered "full" and +a rollover should be performed. Where multiple criteria are defined the +rollover operation will be performed once any of the criteria are met. + +The following request defines a policy with a rollover action that triggers +when the index size reaches 25GB. The old index is subsequently deleted after +30 days. + +NOTE: Once an index rolls over, {ilm} uses the timestamp of the rollover +operation rather than the index creation time to evaluate when to move the +index to the next phase. For indices that have rolled over, the `min_age` +criteria specified for a phase is relative to the rollover time for indices. In +this example, that means the index will be deleted 30 days after rollover, not +30 days from when the index was created. + +[source,js] +-------------------------------------------------- +PUT /_ilm/policy/my_policy +{ + "policy": { + "phases": { + "hot": { + "actions": { + "rollover": { + "max_size": "25GB" + } + } + }, + "delete": { + "min_age": "30d", + "actions": { + "delete": {} + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + +To use an {ilm} policy, you need to specify it in the index template used to +create the indices. For example, the following template associates `my_policy` +with indices created from the template `my_template`. + +[source,js] +----------------------- +PUT _template/my_template +{ + "index_patterns": ["test-*"], <1> + "settings": { + "number_of_shards": 1, + "number_of_replicas": 1, + "index.lifecycle.name": "my_policy", <2> + "index.lifecycle.rollover_alias": "test-alias" <3> + } +} +----------------------- +// CONSOLE +<1> Template applies to all indices with the prefix test- +<2> Associates my_policy with all indices created with this template +<3> Rolls over the write alias test when the rollover action is triggered + +To be able to start using the policy for these `test-*` indexes we need to +bootstrap the process by creating the first index. + +[source,js] +----------------------- +PUT test-000001 <1> +{ + "aliases": { + "test-alias":{ + "is_write_index": true <2> + } + } +} +----------------------- +// CONSOLE +<1> Creates the index called test-000001. The rollover action increments the +suffix number for each subsequent index. +<2> Designates this index as the write index for this alias. + +When the rollover is performed, the newly-created index is set as the write +index for the rolled over alias. Documents sent to the alias are indexed into +the new index, enabling indexing to continue uninterrupted. diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index 2905688f7be78..6f80d95079e26 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -57,7 +57,7 @@ include::index-modules.asciidoc[] include::ingest.asciidoc[] -include::ccr/index.asciidoc[] +include::ilm/index.asciidoc[] include::sql/index.asciidoc[] diff --git a/docs/reference/ingest/ingest-node-common-processor.asciidoc b/docs/reference/ingest/ingest-node-common-processor.asciidoc new file mode 100644 index 0000000000000..dcf8b63630b4b --- /dev/null +++ b/docs/reference/ingest/ingest-node-common-processor.asciidoc @@ -0,0 +1,5 @@ +| `if` | no | - | Conditionally execute this processor. +| `on_failure` | no | - | Handle failures for this processor. See <>. +| `ignore_failure` | no | `false` | Ignore failures for this processor. See <>. +| `tag` | no | - | An identifier for this processor. Useful for debugging and metrics. +// TODO: See <>. <-- for the if description once PR 35044 is merged \ No newline at end of file diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc index 2a26b0e5d4279..2e14028b8cf01 100644 --- a/docs/reference/ingest/ingest-node.asciidoc +++ b/docs/reference/ingest/ingest-node.asciidoc @@ -764,6 +764,12 @@ A node will not start if either of these plugins are not available. The <> can be used to fetch ingest usage statistics, globally and on a per pipeline basis. Useful to find out which pipelines are used the most or spent the most time on preprocessing. +[float] +=== Ingest Processor Plugins + +Additional ingest processors can be implemented and installed as Elasticsearch {plugins}/intro.html[plugins]. +See {plugins}/ingest.html[Ingest plugins] for information about the available ingest plugins. + [[append-processor]] === Append Processor Appends one or more values to an existing array if the field already exists and it is an array. @@ -778,6 +784,7 @@ Accepts a single value or an array of values. | Name | Required | Default | Description | `field` | yes | - | The field to be appended to. Supports <>. | `value` | yes | - | The value to be appended. Supports <>. +include::ingest-node-common-processor.asciidoc[] |====== [source,js] @@ -806,6 +813,7 @@ the field is not a supported format or resultant value exceeds 2^63. | `field` | yes | - | The field to convert | `target_field` | no | `field` | The field to assign the converted value to, by default `field` is updated in-place | `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document +include::ingest-node-common-processor.asciidoc[] |====== [source,js] @@ -844,6 +852,7 @@ still be updated with the unconverted field value. | `target_field` | no | `field` | The field to assign the converted value to, by default `field` is updated in-place | `type` | yes | - | The type to convert the existing value to | `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document +include::ingest-node-common-processor.asciidoc[] |====== [source,js] @@ -876,6 +885,7 @@ in the same order they were defined as part of the processor definition. | `formats` | yes | - | An array of the expected date formats. Can be a Joda pattern or one of the following formats: ISO8601, UNIX, UNIX_MS, or TAI64N. | `timezone` | no | UTC | The timezone to use when parsing the date. Supports <>. | `locale` | no | ENGLISH | The locale to use when parsing the date, relevant when parsing month names or week days. Supports <>. +include::ingest-node-common-processor.asciidoc[] |====== Here is an example that adds the parsed date to the `timestamp` field based on the `initial_date` field: @@ -1065,6 +1075,7 @@ understands this to mean `2016-04-01` as is explained in the <>. +include::ingest-node-common-processor.asciidoc[] |====== [[dissect-processor]] @@ -1134,7 +1145,7 @@ See <> for more information. | `pattern` | yes | - | The pattern to apply to the field | `append_separator`| no | "" (empty string) | The character(s) that separate the appended fields. | `ignore_missing` | no | false | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document -| ` +include::ingest-node-common-processor.asciidoc[] |====== [source,js] @@ -1162,7 +1173,6 @@ modifiers. | `+` with `/n` | Append with order | left and right | `%{+keyname/2} %{+keyname/1}` | Appends two or more fields together in the order specified | <> | `?` | Named skip key | left | `%{?ignoreme}` | Skips the matched value in the output. Same behavior as `%{}`| <> | `*` and `&` | Reference keys | left | `%{*r1} %{&r1}` | Sets the output key as value of `*` and output value of `&` | <> -| ` |====== [[dissect-modifier-skip-right-padding]] @@ -1265,6 +1275,14 @@ Reference key modifier example Drops the document without raising any errors. This is useful to prevent the document from getting indexed based on some condition. +[[drop-options]] +.Drop Options +[options="header"] +|====== +| Name | Required | Default | Description +include::ingest-node-common-processor.asciidoc[] +|====== + [source,js] -------------------------------------------------- { @@ -1289,6 +1307,7 @@ Otherwise these <> can't be accessed by any | Name | Required | Default | Description | `field` | yes | - | The field to expand into an object field | `path` | no | - | The field that contains the field to expand. Only required if the field to expand is part another object field, because the `field` option can only understand leaf fields. +include::ingest-node-common-processor.asciidoc[] |====== [source,js] @@ -1406,6 +1425,7 @@ to the requester. |====== | Name | Required | Default | Description | `message` | yes | - | The error message thrown by the processor. Supports <>. +include::ingest-node-common-processor.asciidoc[] |====== [source,js] @@ -1452,6 +1472,7 @@ then it aborts the execution and leaves the array unmodified. | `field` | yes | - | The array field | `processor` | yes | - | The processor to execute against each field | `ignore_missing` | no | false | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document +include::ingest-node-common-processor.asciidoc[] |====== Assume the following document: @@ -1645,6 +1666,7 @@ Grok expression. | `pattern_definitions` | no | - | A map of pattern-name and pattern tuples defining custom patterns to be used by the current processor. Patterns matching existing names will override the pre-existing definition. | `trace_match` | no | false | when true, `_ingest._grok_match_index` will be inserted into your matched document's metadata with the index into the pattern found in `patterns` that matched. | `ignore_missing` | no | false | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document +include::ingest-node-common-processor.asciidoc[] |====== Here is an example of using the provided patterns to extract out and name structured fields from a string field in @@ -1919,6 +1941,7 @@ If the field is not a string, the processor will throw an exception. | `replacement` | yes | - | The string to replace the matching patterns with | `target_field` | no | `field` | The field to assign the converted value to, by default `field` is updated in-place | `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document +include::ingest-node-common-processor.asciidoc[] |====== [source,js] @@ -1946,6 +1969,7 @@ Throws an error when the field is not an array. | `field` | yes | - | The field to be separated | `separator` | yes | - | The separator character | `target_field` | no | `field` | The field to assign the joined value to, by default `field` is updated in-place +include::ingest-node-common-processor.asciidoc[] |====== [source,js] @@ -1971,6 +1995,7 @@ Converts a JSON string into a structured JSON object. | `field` | yes | - | The field to be parsed | `target_field` | no | `field` | The field to insert the converted structured object into | `add_to_root` | no | false | Flag that forces the serialized json to be injected into the top level of the document. `target_field` must not be set when this option is chosen. +include::ingest-node-common-processor.asciidoc[] |====== All JSON-supported types will be parsed (null, boolean, number, array, object, string). @@ -2082,6 +2107,7 @@ For example, if you have a log message which contains `ip=1.2.3.4 error=REFUSED` | `trim_key` | no | `null` | String of characters to trim from extracted keys | `trim_value` | no | `null` | String of characters to trim from extracted values | `strip_brackets` | no | `false` | If `true` strip brackets `()`, `<>`, `[]` as well as quotes `'` and `"` from extracted values +include::ingest-node-common-processor.asciidoc[] |====== @@ -2097,6 +2123,7 @@ Converts a string to its lowercase equivalent. | `field` | yes | - | The field to make lowercase | `target_field` | no | `field` | The field to assign the converted value to, by default `field` is updated in-place | `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document +include::ingest-node-common-processor.asciidoc[] |====== [source,js] @@ -2119,6 +2146,7 @@ Executes another pipeline. |====== | Name | Required | Default | Description | `name` | yes | - | The name of the pipeline to execute +include::ingest-node-common-processor.asciidoc[] |====== [source,js] @@ -2234,6 +2262,7 @@ Removes existing fields. If one field doesn't exist, an exception will be thrown | Name | Required | Default | Description | `field` | yes | - | Fields to be removed. Supports <>. | `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document +include::ingest-node-common-processor.asciidoc[] |====== Here is an example to remove a single field: @@ -2272,6 +2301,7 @@ Renames an existing field. If the field doesn't exist or the new name is already | `field` | yes | - | The field to be renamed. Supports <>. | `target_field` | yes | - | The new name of the field. Supports <>. | `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document +include::ingest-node-common-processor.asciidoc[] |====== [source,js] @@ -2305,6 +2335,7 @@ caching see <>. | `id` | no | - | The stored script id to refer to | `source` | no | - | An inline script to be executed | `params` | no | - | Script Parameters +include::ingest-node-common-processor.asciidoc[] |====== One of `id` or `source` options must be provided in order to properly reference a script to execute. @@ -2401,6 +2432,7 @@ its value will be replaced with the provided one. | `field` | yes | - | The field to insert, upsert, or update. Supports <>. | `value` | yes | - | The value to be set for the field. Supports <>. | `override` | no | true | If processor will update fields with pre-existing non-null-valued field. When set to `false`, such fields will not be touched. +include::ingest-node-common-processor.asciidoc[] |====== [source,js] @@ -2414,6 +2446,43 @@ its value will be replaced with the provided one. -------------------------------------------------- // NOTCONSOLE + +[[ingest-node-set-security-user-processor]] +=== Set Security User Processor +Sets user-related details (such as `username`, `roles`, `email`, `full_name` +and `metadata` ) from the current +authenticated user to the current document by pre-processing the ingest. + +IMPORTANT: Requires an authenticated user for the index request. + +[[set-security-user-options]] +.Set Security User Options +[options="header"] +|====== +| Name | Required | Default | Description +| `field` | yes | - | The field to store the user information into. +| `properties` | no | [`username`, `roles`, `email`, `full_name`, `metadata`] | Controls what user related properties are added to the `field`. +include::ingest-node-common-processor.asciidoc[] +|====== + +The following example adds all user details for the current authenticated user +to the `user` field for all documents that are processed by this pipeline: + +[source,js] +-------------------------------------------------- +{ + "processors" : [ + { + "set_security_user": { + "field": "user" + } + } + ] +} +-------------------------------------------------- +// NOTCONSOLE + + [[split-processor]] === Split Processor Splits a field into an array using a separator character. Only works on string fields. @@ -2427,6 +2496,7 @@ Splits a field into an array using a separator character. Only works on string f | `separator` | yes | - | A regex which matches the separator, eg `,` or `\s+` | `target_field` | no | `field` | The field to assign the split value to, by default `field` is updated in-place | `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document +include::ingest-node-common-processor.asciidoc[] |====== [source,js] @@ -2455,6 +2525,7 @@ Throws an error when the field is not an array. | `field` | yes | - | The field to be sorted | `order` | no | `"asc"` | The sort order to use. Accepts `"asc"` or `"desc"`. | `target_field` | no | `field` | The field to assign the sorted value to, by default `field` is updated in-place +include::ingest-node-common-processor.asciidoc[] |====== [source,js] @@ -2482,6 +2553,7 @@ NOTE: This only works on leading and trailing whitespace. | `field` | yes | - | The string-valued field to trim whitespace from | `target_field` | no | `field` | The field to assign the trimmed value to, by default `field` is updated in-place | `ignore_missing` | no | `false` | If `true` and `field` does not exist, the processor quietly exits without modifying the document +include::ingest-node-common-processor.asciidoc[] |====== [source,js] @@ -2506,6 +2578,7 @@ Converts a string to its uppercase equivalent. | `field` | yes | - | The field to make uppercase | `target_field` | no | `field` | The field to assign the converted value to, by default `field` is updated in-place | `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document +include::ingest-node-common-processor.asciidoc[] |====== [source,js] @@ -2530,6 +2603,7 @@ URL-decodes a string | `field` | yes | - | The field to decode | `target_field` | no | `field` | The field to assign the converted value to, by default `field` is updated in-place | `ignore_missing` | no | `false` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document +include::ingest-node-common-processor.asciidoc[] |====== [source,js] diff --git a/docs/reference/migration/migrate_6_5.asciidoc b/docs/reference/migration/migrate_6_5.asciidoc index 4f3716b2ae4a7..a4dab4dc109a3 100644 --- a/docs/reference/migration/migrate_6_5.asciidoc +++ b/docs/reference/migration/migrate_6_5.asciidoc @@ -95,11 +95,13 @@ Entries in the previous format contained positional fields. These fields are no longer positional in the new format, as they are part of the structured log entry (the JSON document). Consequently, the following settings, which toggled some of the prefix -positional fields, have be renamed ("prefix" was spliced out): +positional fields, have been renamed ("prefix" was spliced out): + - `xpack.security.audit.logfile.prefix.emit_node_host_address` to `xpack.security.audit.logfile.emit_node_host_address` - `xpack.security.audit.logfile.prefix.emit_node_host_name` to `xpack.security.audit.logfile.emit_node_host_name` - `xpack.security.audit.logfile.prefix.emit_node_name` to `xpack.security.audit.logfile.emit_node_name` + In 7.0.0 the `prefix` variant will be removed. diff --git a/docs/reference/modules/indices/query_cache.asciidoc b/docs/reference/modules/indices/query_cache.asciidoc index f6cdf71925a94..aaa1ab1742841 100644 --- a/docs/reference/modules/indices/query_cache.asciidoc +++ b/docs/reference/modules/indices/query_cache.asciidoc @@ -5,6 +5,7 @@ The query cache is responsible for caching the results of queries. There is one queries cache per node that is shared by all shards. The cache implements an LRU eviction policy: when a cache becomes full, the least recently used data is evicted to make way for new data. +It is not possible to look at the contents being cached. The query cache only caches queries which are being used in a filter context. diff --git a/docs/reference/modules/remote-clusters.asciidoc b/docs/reference/modules/remote-clusters.asciidoc index 81d882f5f0eb6..4bf3073abd359 100644 --- a/docs/reference/modules/remote-clusters.asciidoc +++ b/docs/reference/modules/remote-clusters.asciidoc @@ -152,6 +152,15 @@ PUT _cluster/settings by default, but they can selectively be made optional by setting this setting to `true`. +`cluster.remote.${cluster_alias}.transport.ping_schedule`:: + + Sets the time interval between regular application-level ping messages that + are sent to ensure that transport connections to nodes belonging to remote + clusters are kept alive. If set to `-1`, application-level ping messages to + this remote cluster are not sent. If unset, application-level ping messages + are sent according to the global `transport.ping_schedule` setting, which + defaults to ``-1` meaning that pings are not sent. + [float] [[retrieve-remote-clusters-info]] === Retrieving remote clusters info diff --git a/docs/reference/modules/scripting/using.asciidoc b/docs/reference/modules/scripting/using.asciidoc index 725808e193640..86202a98dd537 100644 --- a/docs/reference/modules/scripting/using.asciidoc +++ b/docs/reference/modules/scripting/using.asciidoc @@ -204,7 +204,7 @@ you can change this behavior by using the `script.cache.expire` setting. You can configure the size of this cache by using the `script.cache.max_size` setting. By default, the cache size is `100`. -NOTE: The size of stored scripts is limited to 65,535 bytes. This can be +NOTE: The size of scripts is limited to 65,535 bytes. This can be changed by setting `script.max_size_in_bytes` setting to increase that soft limit, but if scripts are really large then a <> should be considered. diff --git a/docs/reference/modules/snapshots.asciidoc b/docs/reference/modules/snapshots.asciidoc index b2edca480117b..0aab3665951aa 100644 --- a/docs/reference/modules/snapshots.asciidoc +++ b/docs/reference/modules/snapshots.asciidoc @@ -112,8 +112,8 @@ which returns: ----------------------------------- // TESTRESPONSE -To retrieve information about multiple repositories, specify a -a comma-delimited list of repositories. You can also use the * wildcard when +To retrieve information about multiple repositories, specify a comma-delimited +list of repositories. You can also use the * wildcard when specifying repository names. For example, the following request retrieves information about all of the snapshot repositories that start with `repo` or contain `backup`: diff --git a/docs/reference/modules/transport.asciidoc b/docs/reference/modules/transport.asciidoc index 257181f70c507..c1bc83230e597 100644 --- a/docs/reference/modules/transport.asciidoc +++ b/docs/reference/modules/transport.asciidoc @@ -46,9 +46,9 @@ between all nodes. Defaults to `false`. |`transport.ping_schedule` | Schedule a regular application-level ping message to ensure that transport connections between nodes are kept alive. Defaults to -`5s` in the transport client and `-1` (disabled) elsewhere. It is preferable to -correctly configure TCP keep-alives instead of using this feature, because TCP -keep-alives apply to all kinds of long-lived connection and not just to +`5s` in the transport client and `-1` (disabled) elsewhere. It is preferable +to correctly configure TCP keep-alives instead of using this feature, because +TCP keep-alives apply to all kinds of long-lived connections and not just to transport connections. |======================================================================= diff --git a/docs/reference/monitoring/configuring-metricbeat.asciidoc b/docs/reference/monitoring/configuring-metricbeat.asciidoc index 3d4a37861f1ae..34cbee9c1699f 100644 --- a/docs/reference/monitoring/configuring-metricbeat.asciidoc +++ b/docs/reference/monitoring/configuring-metricbeat.asciidoc @@ -3,7 +3,9 @@ [[configuring-metricbeat]] === Monitoring {es} with {metricbeat} -beta[] In 6.5 and later, you can use {metricbeat} to collect data about {es} +beta[] + +In 6.5 and later, you can use {metricbeat} to collect data about {es} and ship it to the monitoring cluster, rather than routing it through exporters as described in <>. diff --git a/docs/reference/monitoring/images/metricbeat.png b/docs/reference/monitoring/images/metricbeat.png index bf6434dc4b40c..f74f856653043 100644 Binary files a/docs/reference/monitoring/images/metricbeat.png and b/docs/reference/monitoring/images/metricbeat.png differ diff --git a/docs/reference/query-dsl/function-score-query.asciidoc b/docs/reference/query-dsl/function-score-query.asciidoc index 4b4a82594a110..7c5ca95623e83 100644 --- a/docs/reference/query-dsl/function-score-query.asciidoc +++ b/docs/reference/query-dsl/function-score-query.asciidoc @@ -208,10 +208,10 @@ not. The number value is of type float. [[function-random]] ==== Random -The `random_score` generates scores that are uniformly distributed in [0, 1[. -By default, it uses the internal Lucene doc ids as a source of randomness, -which is very efficient but unfortunately not reproducible since documents might -be renumbered by merges. +The `random_score` generates scores that are uniformly distributed from 0 up to +but not including 1. By default, it uses the internal Lucene doc ids as a +source of randomness, which is very efficient but unfortunately not +reproducible since documents might be renumbered by merges. In case you want scores to be reproducible, it is possible to provide a `seed` and `field`. The final score will then be computed based on this seed, the diff --git a/docs/reference/query-dsl/geo-polygon-query.asciidoc b/docs/reference/query-dsl/geo-polygon-query.asciidoc index 189f135fa3bc7..c33b227824bdf 100644 --- a/docs/reference/query-dsl/geo-polygon-query.asciidoc +++ b/docs/reference/query-dsl/geo-polygon-query.asciidoc @@ -1,7 +1,7 @@ [[query-dsl-geo-polygon-query]] === Geo Polygon Query -A query allowing to include hits that only fall within a polygon of +A query returning hits that only fall within a polygon of points. Here is an example: [source,js] @@ -17,9 +17,9 @@ GET /_search "geo_polygon" : { "person.location" : { "points" : [ - {"lat" : 40, "lon" : -70}, - {"lat" : 30, "lon" : -80}, - {"lat" : 20, "lon" : -90} + {"lat" : 40, "lon" : -70}, + {"lat" : 30, "lon" : -80}, + {"lat" : 20, "lon" : -90} ] } } @@ -49,7 +49,9 @@ or longitude, or `STRICT` (default is `STRICT`). [float] ===== Lat Long as Array -Format in `[lon, lat]`, note, the order of lon/lat here in order to +Format as `[lon, lat]` + +Note: the order of lon/lat here must conform with http://geojson.org/[GeoJSON]. [source,js] diff --git a/docs/reference/query-dsl/percolate-query.asciidoc b/docs/reference/query-dsl/percolate-query.asciidoc index aac57363ffa60..2a9a9de4851d4 100644 --- a/docs/reference/query-dsl/percolate-query.asciidoc +++ b/docs/reference/query-dsl/percolate-query.asciidoc @@ -118,6 +118,10 @@ The above request will yield the following response: <2> The `_percolator_document_slot` field indicates which document has matched with this query. Useful when percolating multiple document simultaneously. +TIP: To provide a simple example, this documentation uses one index `my-index` for both the percolate queries and documents. +This set-up can work well when there are just a few percolate queries registered. However, with heavier usage it is recommended +to store queries and documents in separate indices. Please see <> for more details. + [float] ==== Parameters @@ -643,6 +647,7 @@ The above search request returns a response similar to this: query with `_name` parameter set to `query1`. [float] +[[how-it-works]] ==== How it Works Under the Hood When indexing a document into an index that has the <> mapping configured, the query @@ -679,3 +684,11 @@ GET /_search NOTE: The above example assumes that there is a `query` field of type `percolator` in the mappings. + +Given the design of percolation, it often makes sense to use separate indices for the percolate queries and documents +being percolated, as opposed to a single index as we do in examples. There are a few benefits to this approach: + +- Because percolate queries contain a different set of fields from the percolated documents, using two separate indices +allows for fields to be stored in a denser, more efficient way. +- Percolate queries do not scale in the same way as other queries, so percolation performance may benefit from using +a different index configuration, like the number of primary shards. diff --git a/docs/reference/query-dsl/range-query.asciidoc b/docs/reference/query-dsl/range-query.asciidoc index 620a175ff39a5..c5087d52f905e 100644 --- a/docs/reference/query-dsl/range-query.asciidoc +++ b/docs/reference/query-dsl/range-query.asciidoc @@ -139,3 +139,26 @@ GET _search // CONSOLE <1> This date will be converted to `2014-12-31T23:00:00 UTC`. <2> `now` is not affected by the `time_zone` parameter (dates must be stored as UTC). + +[[querying-range-fields]] +==== Querying range fields + +`range` queries can be used on fields of type <>, allowing to +match a range specified in the query with a range field value in the document. +The `relation` parameter controls how these two ranges are matched: + +[horizontal] +`WITHIN`:: + + Matches documents who's range field is entirely within the query's range. + +`CONTAINS`:: + + Matches documents who's range field entirely contains the query's range. + +`INTERSECTS`:: + + Matches documents who's range field intersects the query's range. + This is the default value when querying range fields. + +For examples, see <> mapping type. diff --git a/docs/reference/release-notes.asciidoc b/docs/reference/release-notes.asciidoc index 4a40e4dcf440e..9b7c776f8b039 100644 --- a/docs/reference/release-notes.asciidoc +++ b/docs/reference/release-notes.asciidoc @@ -6,6 +6,7 @@ This section summarizes the changes in each release. * <> +* <> * <> * <> * <> diff --git a/docs/reference/release-notes/6.4.asciidoc b/docs/reference/release-notes/6.4.asciidoc index cefb85042347c..0631e9b9567d0 100644 --- a/docs/reference/release-notes/6.4.asciidoc +++ b/docs/reference/release-notes/6.4.asciidoc @@ -35,18 +35,28 @@ //// [[release-notes-6.4.3]] -== 6.4.3 Release Notes +== {es} version 6.4.3 coming[6.4.3] Also see <>. +[float] +[[enhancement-6.4.3]] +=== Enhancements + +Machine learning:: +* Changes linker options on macOS to allow Homebrew installs ({ml-pull}225[#225]) + [[bug-6.4.3]] [float] === Bug fixes +Aggregations:: +* Check self references in metric agg after last doc collection (#33593) {pull}34001[#34001] + Authentication:: -* ListenableFuture should preserve ThreadContext {pull}34394[#34394] +* ListenableFuture should preserve ThreadContext (CVE-2018-17244) {pull}34394[#34394] * Allow an AuthenticationResult to return metadata {pull}34382[#34382] (issues: {issue}34290[#34290], {issue}34332[#34332]) * Preserve thread context during authentication {pull}34290[#34290] @@ -56,6 +66,13 @@ Circuit Breakers:: Java High Level REST Client:: * HLRC: Fixing bug when getting a missing pipeline {pull}34286[#34286] (issue: {issue}34119[#34119]) +Machine learning:: +* Fixes the cause of `hard_limit` memory errors for jobs with bucket spans greater +than one day ({ml-pull}243[#243]) +* Rules that trigger the `skip_model_update` action should also apply to the +anomaly model. This fixes an issue where anomaly scores of results that triggered +the rule would decrease if they occurred frequently. {ml-pull}222[#222] (issue:{ml-issue}217[#217]) + Network:: * Support PKCS#11 tokens as keystores and truststores {pull}34063[#34063] (issue: {issue}11[#11]) * Correctly handle PKCS#11 tokens for system keystore {pull}33460[#33460] (issues: {issue}11[#11], {issue}33459[#33459]) diff --git a/docs/reference/release-notes/6.5.asciidoc b/docs/reference/release-notes/6.5.asciidoc index 06473d1991f49..b3adc097f6b72 100644 --- a/docs/reference/release-notes/6.5.asciidoc +++ b/docs/reference/release-notes/6.5.asciidoc @@ -95,9 +95,6 @@ Plugins:: Analysis:: * Deprecates the standard filter {pull}33468[#33468] -Geo:: -* Make Geo Context Mapping Parsing More Strict (6.x version) {pull}32862[#32862] (issue: {issue}32821[#32821]) - Scripting:: * Scripting: Conditionally use java time api in scripting {pull}31441[#31441] @@ -105,6 +102,9 @@ Search:: * Deprecate negative query boost (#34486) {pull}34512[#34512] (issues: {issue}33309[#33309], {issue}34486[#34486]) * Deprecate negative `weight` in Function Score Query {pull}33624[#33624] (issue: {issue}31927[#31927]) +Suggesters:: +* Make Geo Context Mapping Parsing More Strict (6.x version) {pull}32862[#32862] (issue: {issue}32821[#32821]) + [[feature-6.5.0]] @@ -124,11 +124,11 @@ Audit:: Authentication:: * Allow User/Password realms to disable authc {pull}34033[#34033] (issue: {issue}33292[#33292]) -* Add support for "authorization_realms" {pull}33262[#33262] Authorization:: * [X-Pack] Beats centralized management: security role + licensing {pull}34305[#34305] (issues: {issue}30493[#30493], {issue}30520[#30520]) * Add get-user-privileges API {pull}33928[#33928] (issue: {issue}32777[#32777]) +* Add support for "authorization_realms" {pull}33262[#33262] CCR:: * [CCR] Added auto follow patterns feature {pull}33118[#33118] (issue: {issue}33007[#33007]) @@ -194,13 +194,9 @@ Authorization:: * [Kerberos] Add authorization realms support to Kerberos realm {pull}32392[#32392] * [X-Pack] Beats centralized management: security role + licensing {pull}30520[#30520] (issue: {issue}30493[#30493]) -Beats:: +Beats Plugin:: * [Monitoring] Update beats template to include apm-server metrics {pull}33286[#33286] -CCR:: -* Fill LocalCheckpointTracker with Lucene commit {pull}34474[#34474] (issues: {issue}0[#0], {issue}2[#2], {issue}33656[#33656]) -* Integrates soft-deletes into Elasticsearch {pull}33222[#33222] (issues: {issue}29530[#29530], {issue}30086[#30086], {issue}30120[#30120], {issue}30335[#30335], {issue}30522[#30522], {issue}31106[#31106]) - CRUD:: * Verify primary mode usage with assertions {pull}32667[#32667] (issues: {issue}10708[#10708], {issue}25692[#25692], {issue}32442[#32442]) * Refactor TransportShardBulkAction to better support retries {pull}31821[#31821] @@ -222,10 +218,11 @@ Distributed:: * Expose whether or not the global checkpoint updated {pull}32659[#32659] (issue: {issue}32651[#32651]) * Include translog path in error message when translog is corrupted {pull}32251[#32251] (issue: {issue}24929[#24929]) -Docs:: +Docs Infrastructure:: * Docs: Allow snippets to have line continuation {pull}32649[#32649] Engine:: +* Fill LocalCheckpointTracker with Lucene commit {pull}34474[#34474] (issues: {issue}0[#0], {issue}2[#2], {issue}33656[#33656]) * Lock down Engine.Searcher {pull}34363[#34363] (issue: {issue}34357[#34357]) * Fold EngineSearcher into Engine.Searcher {pull}34082[#34082] * Build DocStats from SegmentInfos in ReadOnlyEngine {pull}34079[#34079] (issue: {issue}33903[#33903]) @@ -235,7 +232,6 @@ Engine:: * Allow engine to recover from translog upto a seqno {pull}33032[#33032] (issue: {issue}32867[#32867]) ILM:: -* HLRC: Add ILM Retry {pull}33990[#33990] (issue: {issue}33100[#33100]) * 6.x - HLRC: Add ILM Status to HLRC (#33283) {pull}33448[#33448] (issue: {issue}33283[#33283]) Index APIs:: @@ -378,12 +374,17 @@ Recovery:: * Restore local history from translog on promotion {pull}33616[#33616] (issues: {issue}32867[#32867], {issue}33473[#33473]) * Reset replica engine to global checkpoint on promotion {pull}33473[#33473] (issue: {issue}32867[#32867]) * Bootstrap a new history_uuid when force allocating a stale primary {pull}33432[#33432] (issue: {issue}26712[#26712]) +* Integrates soft-deletes into Elasticsearch {pull}33222[#33222] (issues: {issue}29530[#29530], {issue}30086[#30086], {issue}30120[#30120], {issue}30335[#30335], {issue}30522[#30522], {issue}31106[#31106]) Rollup:: * [Rollup] Add support for date histo `format` when searching {pull}34537[#34537] (issue: {issue}34391[#34391]) * [Rollup] Only allow aggregating on multiples of configured interval {pull}32052[#32052] SQL:: +* SQL: Optimizer rule for folding nullable expressions {pull}35080[#35080] (issue: {issue}34826[#34826]) +* SQL: Improve painless script generated from `IN` {pull}35055[#35055] (issue: {issue}34750[#34750]) +* SQL: Implement CAST between STRING and IP {pull}34949[#34949] (issue: {issue}34799[#34799]) +* SQL: Fix function args verification and error msgs {pull}34926[#34926] (issues: {issue}33469[#33469], {issue}34752[#34752]) * SQL: Introduce ODBC mode, similar to JDBC {pull}34825[#34825] (issue: {issue}34720[#34720]) * SQL: Introduce support for IP fields {pull}34758[#34758] (issue: {issue}32499[#32499]) * SQL: Implement null handling for `IN(v1, v2, ...)` {pull}34750[#34750] (issue: {issue}34582[#34582]) @@ -430,6 +431,7 @@ Search:: * Ignore script fields when size is 0 {pull}31917[#31917] (issue: {issue}31824[#31824]) Security:: +* Generate non-encrypted license public key {pull}34626[#34626] * Security: don't call prepare index for reads {pull}34568[#34568] (issues: {issue}33205[#33205], {issue}34246[#34246]) * Enable security automaton caching {pull}34028[#34028] * Add Debug/Trace logging to token service {pull}34022[#34022] @@ -474,6 +476,7 @@ ZenDiscovery:: Aggregations:: * Fix handling of empty keyword in terms aggregation {pull}34457[#34457] (issue: {issue}34434[#34434]) +* Check self references in metric agg after last doc collection (#33593) {pull}34001[#34001] * Unmapped aggs should not run pipelines if they delegate reduction {pull}33528[#33528] (issue: {issue}33514[#33514]) * For filters aggregations, make sure that rewrites preserve other_bucket. {pull}32921[#32921] (issue: {issue}32834[#32834]) @@ -529,6 +532,7 @@ Geo:: * Use the determinant formula for calculating the orientation of a polygon {pull}27967[#27967] Index APIs:: +* Make XContentBuilder in AliasActions build `is_write_index` field {pull}35071[#35071] * Do not update number of replicas on no indices {pull}34481[#34481] * [Security] Get Alias API wildcard exclusion with Security {pull}34144[#34144] (issues: {issue}33518[#33518], {issue}33805[#33805]) * Allow to clear the fielddata cache per field {pull}33807[#33807] (issue: {issue}33798[#33798]) @@ -558,9 +562,9 @@ Machine Learning:: * Handle pre-6.x time fields {pull}34373[#34373] * [ML] Get job stats request should filter non-ML job tasks {pull}33516[#33516] (issue: {issue}33515[#33515]) * [ML] Prevent NPE parsing the stop datafeed request. {pull}33347[#33347] +* [ML] fix updating opened jobs scheduled events (#31651) {pull}32881[#32881] (issue: {issue}31651[#31651]) * Clear Job#finished_time when it is opened (#32605) {pull}32755[#32755] * [ML] Fix thread leak when waiting for job flush (#32196) {pull}32541[#32541] (issue: {issue}32196[#32196]) -* [ML] Adding an open job to a group does not update autodetect with related scheduled events {pull}31651[#31651] Mapping:: * Fix field mapping updates with similarity {pull}33634[#33634] (issue: {issue}33611[#33611]) @@ -588,11 +592,15 @@ Packaging:: REST API:: * Core: Fix IndicesSegmentResponse.toXcontent() serialization {pull}33414[#33414] (issue: {issue}29120[#29120]) +Ranking:: +* Fix a bug in function_score queries where we use the wrong boost_mode. {pull}35148[#35148] (issue: {issue}35123[#35123]) + Recovery:: * Resync fails to notify on unavaiable exceptions {pull}33615[#33615] (issues: {issue}31179[#31179], {issue}33613[#33613]) * Ensure to generate identical NoOp for the same failure {pull}33141[#33141] (issue: {issue}32986[#32986]) Rollup:: +* [Rollup] Proactively resolve index patterns in RollupSearch endoint {pull}34930[#34930] (issue: {issue}34828[#34828]) * Address BWC bug due to default metrics in (#34764) {pull}34810[#34810] (issue: {issue}34764[#34764]) * Allowing {index}/_xpack/rollup/data to accept comma delimited list {pull}34115[#34115] * [Rollup] Fix Caps Comparator to handle calendar/fixed time {pull}33336[#33336] (issue: {issue}32052[#32052]) @@ -601,6 +609,15 @@ Rollup:: * [Rollup] Improve ID scheme for rollup documents {pull}32558[#32558] (issue: {issue}32372[#32372]) SQL:: +* SQL: handle wildcard expansion on incorrect fields {pull}35134[#35134] (issue: {issue}35092[#35092]) +* SQL: Register missing processors {pull}35121[#35121] (issue: {issue}35119[#35119]) +* SQL: Fix NPE thrown if HAVING filter evals to null {pull}35108[#35108] (issue: {issue}35107[#35107]) +* SQL: Proper handling of nested fields at the beginning of the columns list {pull}35068[#35068] (issue: {issue}32951[#32951]) +* SQL: Fix incorrect AVG data type {pull}34948[#34948] (issue: {issue}33773[#33773]) +* SQL: Add `CAST` and `CONVERT` to `SHOW FUNCTIONS` {pull}34940[#34940] (issue: {issue}34939[#34939]) +* SQL: Handle aggregation for null group {pull}34916[#34916] (issue: {issue}34896[#34896]) +* SQL: Provide null-safe scripts for Not and Neg {pull}34877[#34877] (issue: {issue}34848[#34848]) +* SQL: Return error with ORDER BY on non-grouped. {pull}34855[#34855] (issue: {issue}34590[#34590]) * SQL: Fix queries with filter resulting in NO_MATCH {pull}34812[#34812] (issue: {issue}34613[#34613]) * SQL: Fix edge case: ` IN (null)` {pull}34802[#34802] (issue: {issue}34750[#34750]) * SQL: Verifier allows aliases aggregates for sorting {pull}34773[#34773] (issue: {issue}34607[#34607]) diff --git a/docs/reference/release-notes/highlights-6.5.0.asciidoc b/docs/reference/release-notes/highlights-6.5.0.asciidoc index affe5946c346a..e23a1822aa54f 100644 --- a/docs/reference/release-notes/highlights-6.5.0.asciidoc +++ b/docs/reference/release-notes/highlights-6.5.0.asciidoc @@ -4,4 +4,6 @@ 6.5.0 ++++ -coming[6.5.0] \ No newline at end of file +coming[6.5.0] + +See also <>. \ No newline at end of file diff --git a/docs/reference/release-notes/highlights.asciidoc b/docs/reference/release-notes/highlights.asciidoc index 4fe169a1655e0..85fda6152919e 100644 --- a/docs/reference/release-notes/highlights.asciidoc +++ b/docs/reference/release-notes/highlights.asciidoc @@ -9,10 +9,12 @@ This section summarizes the most important changes in each release. For the full list, see <> and <>. +* <> * <> * <> -- +include::highlights-6.5.0.asciidoc[] include::highlights-6.4.0.asciidoc[] include::highlights-6.3.0.asciidoc[] \ No newline at end of file diff --git a/docs/reference/rest-api/index.asciidoc b/docs/reference/rest-api/index.asciidoc index 5aef27e127500..eedc2dfa1f51f 100644 --- a/docs/reference/rest-api/index.asciidoc +++ b/docs/reference/rest-api/index.asciidoc @@ -10,6 +10,7 @@ directly to configure and access {xpack} features. * <> * <> * <> +* <> * <> * <> * <> @@ -22,6 +23,7 @@ directly to configure and access {xpack} features. include::info.asciidoc[] include::{es-repo-dir}/ccr/apis/ccr-apis.asciidoc[] include::{es-repo-dir}/graph/explore.asciidoc[] +include::{es-repo-dir}/ilm/apis/ilm-api.asciidoc[] include::{es-repo-dir}/licensing/index.asciidoc[] include::{es-repo-dir}/migration/migration.asciidoc[] include::{es-repo-dir}/ml/apis/ml-api.asciidoc[] diff --git a/docs/reference/rest-api/info.asciidoc b/docs/reference/rest-api/info.asciidoc index 6d5967d0cdc7d..fc6488010dabd 100644 --- a/docs/reference/rest-api/info.asciidoc +++ b/docs/reference/rest-api/info.asciidoc @@ -68,6 +68,11 @@ Example response: "available" : true, "enabled" : true }, + "ilm" : { + "description" : "Index lifecycle management for the Elastic Stack", + "available" : true, + "enabled" : true + }, "logstash" : { "description" : "Logstash management component for X-Pack", "available" : true, diff --git a/docs/reference/rollup/apis/rollup-search.asciidoc b/docs/reference/rollup/apis/rollup-search.asciidoc index 8e7fc69a00a6b..e2252a772184d 100644 --- a/docs/reference/rollup/apis/rollup-search.asciidoc +++ b/docs/reference/rollup/apis/rollup-search.asciidoc @@ -30,6 +30,7 @@ Rules for the `index` parameter: or using `_all`, is not permitted - Multiple non-rollup indices may be specified - Only one rollup index may be specified. If more than one are supplied an exception will be thrown +- Index patterns may be used, but if they match more than one rollup index an exception will be thrown. ==== Request Body diff --git a/docs/reference/rollup/rollup-search-limitations.asciidoc b/docs/reference/rollup/rollup-search-limitations.asciidoc index b61d1a743880f..c8a736450bde0 100644 --- a/docs/reference/rollup/rollup-search-limitations.asciidoc +++ b/docs/reference/rollup/rollup-search-limitations.asciidoc @@ -21,6 +21,7 @@ follows: or using `_all`, is not permitted - Multiple non-rollup indices may be specified - Only one rollup index may be specified. If more than one are supplied an exception will be thrown +- Index patterns may be used, but if they match more than one rollup index an exception will be thrown. This limitation is driven by the logic that decides which jobs are the "best" for any given query. If you have ten jobs stored in a single index, which cover the source data with varying degrees of completeness and different intervals, the query needs to determine which set diff --git a/docs/reference/settings/audit-settings.asciidoc b/docs/reference/settings/audit-settings.asciidoc index ec661a1f30c9f..69045dca0a2db 100644 --- a/docs/reference/settings/audit-settings.asciidoc +++ b/docs/reference/settings/audit-settings.asciidoc @@ -22,14 +22,13 @@ file named `_audit.log` on each node. You can also specify `index`, which puts the auditing events in an {es} index that is prefixed with `.security_audit_log`. The index can reside on the same cluster or a separate cluster. - ++ For backwards compatibility reasons, if you use the logfile output type, a `_access.log` file is also created. It contains the same information, but it uses the older (pre-6.5.0) formatting style. If the backwards compatible format is not required, it should be disabled. To do that, change its logger level to `off` in the `log4j2.properties` file. For more information, see <>. - + -- TIP: If the index is unavailable, it is possible for auditing events to diff --git a/docs/reference/settings/ml-settings.asciidoc b/docs/reference/settings/ml-settings.asciidoc index f75bf3a9f2986..5d4f9519006ae 100644 --- a/docs/reference/settings/ml-settings.asciidoc +++ b/docs/reference/settings/ml-settings.asciidoc @@ -77,11 +77,11 @@ opening spend more time in the `opening` state. Defaults to `2`. These settings are for advanced use cases; the default values are generally sufficient: -`xpack.ml.max_anomaly_records`:: (<>) +`xpack.ml.max_anomaly_records` (<>):: The maximum number of records that are output per bucket. The default value is `500`. -`xpack.ml.max_lazy_ml_nodes`:: (<>) +`xpack.ml.max_lazy_ml_nodes` (<>):: The number of lazily spun up Machine Learning nodes. Useful in situations where ML nodes are not desired until the first Machine Learning Job is opened. It defaults to `0` and has a maximum acceptable value of `3`. diff --git a/docs/reference/settings/monitoring-settings.asciidoc b/docs/reference/settings/monitoring-settings.asciidoc index aac6d15cd1c4e..f2a12c1e4b6c4 100644 --- a/docs/reference/settings/monitoring-settings.asciidoc +++ b/docs/reference/settings/monitoring-settings.asciidoc @@ -43,14 +43,14 @@ to `true`. Its default value is `false`. The `xpack.monitoring.collection` settings control how data is collected from your Elasticsearch nodes. -`xpack.monitoring.collection.enabled`:: (<>) +`xpack.monitoring.collection.enabled` (<>):: added[6.3.0] Set to `true` to enable the collection of monitoring data. When this setting is `false` (default), {es} monitoring data is not collected and all monitoring data from other sources such as {kib}, Beats, and Logstash is ignored. -`xpack.monitoring.collection.interval`:: (<>) +`xpack.monitoring.collection.interval` (<>):: Setting to `-1` to disable data collection has been deprecated. deprecated[6.3.0, Use `xpack.monitoring.collection.enabled` set to `false` instead.] @@ -59,7 +59,7 @@ Controls how often data samples are collected. Defaults to `10s`. If you modify the collection interval, set the `xpack.monitoring.min_interval_seconds` option in `kibana.yml` to the same value. -`xpack.monitoring.elasticsearch.collection.enabled`:: (<>) +`xpack.monitoring.elasticsearch.collection.enabled` (<>):: Controls whether statistics about your {es} cluster should be collected. Defaults to `true`. This is different from xpack.monitoring.collection.enabled, which allows you to enable or disable @@ -71,7 +71,7 @@ to pass through this cluster. Sets the timeout for collecting the cluster statistics. Defaults to `10s`. -`xpack.monitoring.collection.indices`:: (<>) +`xpack.monitoring.collection.indices` (<>):: Controls which indices Monitoring collects data from. Defaults to all indices. Specify the index names as a comma-separated list, for example `test1,test2,test3`. Names can include wildcards, for diff --git a/docs/reference/sql/index.asciidoc b/docs/reference/sql/index.asciidoc index aa9eebea7b7f6..99b946746cc60 100644 --- a/docs/reference/sql/index.asciidoc +++ b/docs/reference/sql/index.asciidoc @@ -3,10 +3,10 @@ [[xpack-sql]] = SQL Access -:sql-tests: {xes-repo-dir}/../../qa/sql +:sql-tests: {xes-repo-dir}/../../plugin/sql/qa :sql-specs: {sql-tests}/src/main/resources -:jdbc-tests: {sql-tests}/src/main/java/org/elasticsearch/xpack/qa/sql/jdbc -:security-tests: {sql-tests}/security/src/test/java/org/elasticsearch/xpack/qa/sql/security +:jdbc-tests: {sql-tests}/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc +:security-tests: {sql-tests}/security/src/test/java/org/elasticsearch/xpack/sql/qa/security :es-sql: Elasticsearch SQL [partintro] diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/ObjectPath.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ObjectPath.java similarity index 50% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/ObjectPath.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ObjectPath.java index 67ef405238aba..8a70f9cb70474 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/ObjectPath.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/ObjectPath.java @@ -1,48 +1,61 @@ /* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. */ -package org.elasticsearch.xpack.core.watcher.support.xcontent; -import org.elasticsearch.common.Strings; +package org.elasticsearch.common.xcontent; import java.lang.reflect.Array; import java.util.List; import java.util.Map; -public class ObjectPath { +/** + * Helper class to navigate nested objects using dot notation + */ +public final class ObjectPath { + + private static final String[] EMPTY_ARRAY = new String[0]; private ObjectPath() { } + /** + * Return the value within a given object at the specified path, or + * {@code null} if the path does not exist + */ + @SuppressWarnings("unchecked") public static T eval(String path, Object object) { return (T) evalContext(path, object); } private static Object evalContext(String path, Object ctx) { final String[] parts; - if (path == null || path.isEmpty()) parts = Strings.EMPTY_ARRAY; + if (path == null || path.isEmpty()) parts = EMPTY_ARRAY; else parts = path.split("\\."); - StringBuilder resolved = new StringBuilder(); for (String part : parts) { if (ctx == null) { return null; } if (ctx instanceof Map) { ctx = ((Map) ctx).get(part); - if (resolved.length() != 0) { - resolved.append("."); - } - resolved.append(part); } else if (ctx instanceof List) { try { int index = Integer.parseInt(part); ctx = ((List) ctx).get(index); - if (resolved.length() != 0) { - resolved.append("."); - } - resolved.append(part); } catch (NumberFormatException nfe) { return null; } @@ -50,10 +63,6 @@ private static Object evalContext(String path, Object ctx) { try { int index = Integer.parseInt(part); ctx = Array.get(ctx, index); - if (resolved.length() != 0) { - resolved.append("."); - } - resolved.append(part); } catch (NumberFormatException nfe) { return null; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/common/xcontent/XContentUtils.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentUtils.java similarity index 60% rename from x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/common/xcontent/XContentUtils.java rename to libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentUtils.java index da8ac3ef9d8f1..14a9f5be24b28 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/common/xcontent/XContentUtils.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentUtils.java @@ -1,20 +1,34 @@ /* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. */ -package org.elasticsearch.xpack.core.watcher.common.xcontent; -import org.elasticsearch.common.xcontent.XContentParser; +package org.elasticsearch.common.xcontent; import java.io.IOException; -public class XContentUtils { +public final class XContentUtils { private XContentUtils() { } - // TODO open this up in core + /** + * Convert a {@link XContentParser.Token} to a value + */ public static Object readValue(XContentParser parser, XContentParser.Token token) throws IOException { if (token == XContentParser.Token.VALUE_NULL) { return null; diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/xcontent/MapPathTests.java b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/ObjectPathTests.java similarity index 63% rename from x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/xcontent/MapPathTests.java rename to libs/x-content/src/test/java/org/elasticsearch/common/xcontent/ObjectPathTests.java index f89552a637726..52e9723743b44 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/xcontent/MapPathTests.java +++ b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/ObjectPathTests.java @@ -1,12 +1,25 @@ /* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. */ -package org.elasticsearch.xpack.watcher.support.xcontent; + +package org.elasticsearch.common.xcontent; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.core.watcher.support.xcontent.ObjectPath; import java.util.ArrayList; import java.util.Arrays; @@ -18,23 +31,23 @@ import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.Matchers.is; -public class MapPathTests extends ESTestCase { - public void testEval() throws Exception { +public class ObjectPathTests extends ESTestCase { + public void testEval() { Map map = singletonMap("key", "value"); assertThat(ObjectPath.eval("key", map), is((Object) "value")); assertThat(ObjectPath.eval("key1", map), nullValue()); } - public void testEvalList() throws Exception { - List list = Arrays.asList(1, 2, 3, 4); + public void testEvalList() { + List list = Arrays.asList(1, 2, 3, 4); Map map = singletonMap("key", list); int index = randomInt(3); assertThat(ObjectPath.eval("key." + index, map), is(list.get(index))); } - public void testEvalArray() throws Exception { + public void testEvalArray() { int[] array = new int[] { 1, 2, 3, 4 }; Map map = singletonMap("key", array); @@ -42,13 +55,13 @@ public void testEvalArray() throws Exception { assertThat(((Number) ObjectPath.eval("key." + index, map)).intValue(), is(array[index])); } - public void testEvalMap() throws Exception { + public void testEvalMap() { Map map = singletonMap("a", singletonMap("b", "val")); assertThat(ObjectPath.eval("a.b", map), is((Object) "val")); } - public void testEvalMixed() throws Exception { + public void testEvalMixed() { Map map = new HashMap<>(); Map mapA = new HashMap<>(); diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LegacyDelimitedPayloadTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LegacyDelimitedPayloadTokenFilterFactory.java index 484c9d9b1280b..de99ab530281c 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LegacyDelimitedPayloadTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/LegacyDelimitedPayloadTokenFilterFactory.java @@ -19,21 +19,21 @@ package org.elasticsearch.analysis.common; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.Version; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.IndexSettings; public class LegacyDelimitedPayloadTokenFilterFactory extends DelimitedPayloadTokenFilterFactory { - private static final DeprecationLogger DEPRECATION_LOGGER = - new DeprecationLogger(Loggers.getLogger(LegacyDelimitedPayloadTokenFilterFactory.class)); + private static final DeprecationLogger deprecationLogger = + new DeprecationLogger(LogManager.getLogger(LegacyDelimitedPayloadTokenFilterFactory.class)); LegacyDelimitedPayloadTokenFilterFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { super(indexSettings, env, name, settings); if (indexSettings.getIndexVersionCreated().onOrAfter(Version.V_6_2_0)) { - DEPRECATION_LOGGER.deprecated("Deprecated [delimited_payload_filter] used, replaced by [delimited_payload]"); + deprecationLogger.deprecated("Deprecated [delimited_payload_filter] used, replaced by [delimited_payload]"); } } } diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DotExpanderProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DotExpanderProcessor.java index 0698f6ed0a6c9..f4215af8390dd 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DotExpanderProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/DotExpanderProcessor.java @@ -52,28 +52,30 @@ public IngestDocument execute(IngestDocument ingestDocument) throws Exception { map = ingestDocument.getSourceAndMetadata(); } - if (ingestDocument.hasField(path)) { - Object value = map.remove(field); - ingestDocument.appendFieldValue(path, value); - } else { - // check whether we actually can expand the field in question into an object field. - // part of the path may already exist and if part of it would be a value field (string, integer etc.) - // then we can't override it with an object field and we should fail with a good reason. - // IngestDocument#setFieldValue(...) would fail too, but the error isn't very understandable - for (int index = path.indexOf('.'); index != -1; index = path.indexOf('.', index + 1)) { - String partialPath = path.substring(0, index); - if (ingestDocument.hasField(partialPath)) { - Object val = ingestDocument.getFieldValue(partialPath, Object.class); - if ((val instanceof Map) == false) { - throw new IllegalArgumentException("cannot expend [" + path + "], because [" + partialPath + + if (map.containsKey(field)) { + if (ingestDocument.hasField(path)) { + Object value = map.remove(field); + ingestDocument.appendFieldValue(path, value); + } else { + // check whether we actually can expand the field in question into an object field. + // part of the path may already exist and if part of it would be a value field (string, integer etc.) + // then we can't override it with an object field and we should fail with a good reason. + // IngestDocument#setFieldValue(...) would fail too, but the error isn't very understandable + for (int index = path.indexOf('.'); index != -1; index = path.indexOf('.', index + 1)) { + String partialPath = path.substring(0, index); + if (ingestDocument.hasField(partialPath)) { + Object val = ingestDocument.getFieldValue(partialPath, Object.class); + if ((val instanceof Map) == false) { + throw new IllegalArgumentException("cannot expend [" + path + "], because [" + partialPath + "] is not an object field, but a value field"); + } + } else { + break; } - } else { - break; } + Object value = map.remove(field); + ingestDocument.setFieldValue(path, value); } - Object value = map.remove(field); - ingestDocument.setFieldValue(path, value); } return ingestDocument; } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DotExpanderProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DotExpanderProcessorTests.java index fde7f0c9b8a02..d6a207b859eb0 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DotExpanderProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/DotExpanderProcessorTests.java @@ -143,4 +143,38 @@ public void testEscapeFields_path() throws Exception { assertThat(document.getFieldValue("field.foo.bar.baz", String.class), equalTo("value")); } + + public void testEscapeFields_doNothingIfFieldNotInSourceDoc() throws Exception { + //asking to expand a (literal) field that is not present in the source document + Map source = new HashMap<>(); + source.put("foo.bar", "baz1"); + IngestDocument document = new IngestDocument(source, Collections.emptyMap()); + //abc.def does not exist in source, so don't mutate document + DotExpanderProcessor processor = new DotExpanderProcessor("_tag", null, "abc.def"); + processor.execute(document); + //hasField returns false since it requires the expanded form, which is not expanded since we did not ask for it to be + assertFalse(document.hasField("foo.bar")); + //nothing has changed + assertEquals(document.getSourceAndMetadata().get("foo.bar"), "baz1"); + //abc.def is not found anywhere + assertFalse(document.hasField("abc.def")); + assertFalse(document.getSourceAndMetadata().containsKey("abc")); + assertFalse(document.getSourceAndMetadata().containsKey("abc.def")); + + //asking to expand a (literal) field that does not exist, but the nested field does exist + source = new HashMap<>(); + Map inner = new HashMap<>(); + inner.put("bar", "baz1"); + source.put("foo", inner); + document = new IngestDocument(source, Collections.emptyMap()); + //foo.bar, the literal value (as opposed to nested value) does not exist in source, so don't mutate document + processor = new DotExpanderProcessor("_tag", null, "foo.bar"); + processor.execute(document); + //hasField returns true because the nested/expanded form exists in the source document + assertTrue(document.hasField("foo.bar")); + //nothing changed + assertThat(document.getFieldValue("foo", Map.class).size(), equalTo(1)); + assertThat(document.getFieldValue("foo.bar", String.class), equalTo("baz1")); + } + } diff --git a/modules/lang-expression/licenses/lucene-expressions-7.5.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.5.0.jar.sha1 deleted file mode 100644 index 7c391c8fbd1cf..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-7.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4566befa0926231cfc86692809bba4f636836a58 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-7.6.0-snapshot-f9598f335b.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-7.6.0-snapshot-f9598f335b.jar.sha1 new file mode 100644 index 0000000000000..ca7f638b808b0 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-7.6.0-snapshot-f9598f335b.jar.sha1 @@ -0,0 +1 @@ +717fb4e5f1c1667819465805858731d6683f3ab4 \ No newline at end of file diff --git a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionPlugin.java b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionPlugin.java index fd80c56cdbe8a..cd0b09eca8c3a 100644 --- a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionPlugin.java +++ b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionPlugin.java @@ -31,6 +31,6 @@ public class ExpressionPlugin extends Plugin implements ScriptPlugin { @Override public ScriptEngine getScriptEngine(Settings settings, Collection> contexts) { - return new ExpressionScriptEngine(settings); + return new ExpressionScriptEngine(); } } diff --git a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngine.java b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngine.java index 6e6d5dc443d3c..b2712a4b19555 100644 --- a/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngine.java +++ b/modules/lang-expression/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngine.java @@ -30,8 +30,6 @@ import org.apache.lucene.search.SortField; import org.elasticsearch.SpecialPermission; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.mapper.DateFieldMapper; @@ -63,14 +61,10 @@ * Provides the infrastructure for Lucene expressions as a scripting language for Elasticsearch. Only * {@link SearchScript}s are supported. */ -public class ExpressionScriptEngine extends AbstractComponent implements ScriptEngine { +public class ExpressionScriptEngine implements ScriptEngine { public static final String NAME = "expression"; - public ExpressionScriptEngine(Settings settings) { - super(settings); - } - @Override public String getType() { return NAME; diff --git a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionTests.java b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionTests.java index 33e6239002eb1..9894dd9f4919b 100644 --- a/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionTests.java +++ b/modules/lang-expression/src/test/java/org/elasticsearch/script/expression/ExpressionTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.script.expression; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.fielddata.AtomicNumericFieldData; import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; @@ -64,7 +63,7 @@ public void setUp() throws Exception { when(fieldData.getFieldName()).thenReturn("field"); when(fieldData.load(anyObject())).thenReturn(atomicFieldData); - service = new ExpressionScriptEngine(Settings.EMPTY); + service = new ExpressionScriptEngine(); lookup = new SearchLookup(mapperService, ignored -> fieldData, null); } diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequest.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequest.java index caa9fa4831add..41ac8b720dc03 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequest.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequest.java @@ -46,7 +46,7 @@ public class MultiSearchTemplateRequest extends ActionRequest implements Composi private int maxConcurrentSearchRequests = 0; private List requests = new ArrayList<>(); - private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpenAndForbidClosed(); + private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpenAndForbidClosedIgnoreThrottled(); /** * Add a search template request to execute. Note, the order is important, the search response will be returned in the diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java index 81cc802916d4e..0ea3d4af81f79 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/Compiler.java @@ -48,11 +48,6 @@ */ final class Compiler { - /** - * The maximum number of characters allowed in the script source. - */ - static final int MAXIMUM_SOURCE_LENGTH = 16384; - /** * Define the class with lowest privileges. */ @@ -212,12 +207,6 @@ private static void addFactoryMethod(Map> additionalClasses, Cl * @return An executable script that implements both a specified interface and is a subclass of {@link PainlessScript} */ Constructor compile(Loader loader, MainMethodReserved reserved, String name, String source, CompilerSettings settings) { - if (source.length() > MAXIMUM_SOURCE_LENGTH) { - throw new IllegalArgumentException("Scripts may be no longer than " + MAXIMUM_SOURCE_LENGTH + - " characters. The passed in script is " + source.length() + " characters. Consider using a" + - " plugin if a script longer than this length is a requirement."); - } - ScriptClassInfo scriptClassInfo = new ScriptClassInfo(painlessLookup, scriptClass); SSource root = Walker.buildPainlessTree(scriptClassInfo, reserved, name, source, settings, painlessLookup, null); @@ -248,12 +237,6 @@ Constructor compile(Loader loader, MainMethodReserved reserved, String name, * @return The bytes for compilation. */ byte[] compile(String name, String source, CompilerSettings settings, Printer debugStream) { - if (source.length() > MAXIMUM_SOURCE_LENGTH) { - throw new IllegalArgumentException("Scripts may be no longer than " + MAXIMUM_SOURCE_LENGTH + - " characters. The passed in script is " + source.length() + " characters. Consider using a" + - " plugin if a script longer than this length is a requirement."); - } - ScriptClassInfo scriptClassInfo = new ScriptClassInfo(painlessLookup, scriptClass); SSource root = Walker.buildPainlessTree(scriptClassInfo, new MainMethodReserved(), name, source, settings, painlessLookup, debugStream); diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java index 5ed305751c8e8..be753de5721b0 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessScriptEngine.java @@ -21,7 +21,6 @@ import org.apache.lucene.index.LeafReaderContext; import org.elasticsearch.SpecialPermission; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.painless.Compiler.Loader; import org.elasticsearch.painless.lookup.PainlessLookupBuilder; @@ -57,7 +56,7 @@ /** * Implementation of a ScriptEngine for the Painless language. */ -public final class PainlessScriptEngine extends AbstractComponent implements ScriptEngine { +public final class PainlessScriptEngine implements ScriptEngine { /** * Standard name of the Painless language. @@ -93,8 +92,6 @@ public final class PainlessScriptEngine extends AbstractComponent implements Scr * @param settings The settings to initialize the engine with. */ public PainlessScriptEngine(Settings settings, Map, List> contexts) { - super(settings); - defaultCompilerSettings.setRegexesEnabled(CompilerSettings.REGEX_ENABLED.get(settings)); Map, Compiler> contextsToCompilers = new HashMap<>(); diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptedMetricAggContextsTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptedMetricAggContextsTests.java index 6ee021c695f99..4f7dab61df4d3 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptedMetricAggContextsTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/ScriptedMetricAggContextsTests.java @@ -21,16 +21,25 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Scorer; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.memory.MemoryIndex; import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptedMetricAggContexts; +import org.elasticsearch.search.lookup.LeafSearchLookup; +import org.elasticsearch.search.lookup.SearchLookup; +import org.elasticsearch.search.lookup.SourceLookup; +import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + public class ScriptedMetricAggContextsTests extends ScriptTestCase { @Override protected Map, List> scriptContexts() { @@ -58,7 +67,7 @@ public void testInitBasic() { assertEquals(10, state.get("testField")); } - public void testMapBasic() { + public void testMapBasic() throws IOException { ScriptedMetricAggContexts.MapScript.Factory factory = scriptEngine.compile("test", "state.testField = 2*_score", ScriptedMetricAggContexts.MapScript.CONTEXT, Collections.emptyMap()); @@ -86,6 +95,32 @@ public void testMapBasic() { assertEquals(1.0, state.get("testField")); } + public void testMapSourceAccess() throws IOException { + ScriptedMetricAggContexts.MapScript.Factory factory = scriptEngine.compile("test", + "state.testField = params._source.three", ScriptedMetricAggContexts.MapScript.CONTEXT, Collections.emptyMap()); + + Map params = new HashMap<>(); + Map state = new HashMap<>(); + + MemoryIndex index = new MemoryIndex(); + // we don't need a real index, just need to construct a LeafReaderContext which cannot be mocked + LeafReaderContext leafReaderContext = index.createSearcher().getIndexReader().leaves().get(0); + + SearchLookup lookup = mock(SearchLookup.class); + LeafSearchLookup leafLookup = mock(LeafSearchLookup.class); + when(lookup.getLeafSearchLookup(leafReaderContext)).thenReturn(leafLookup); + SourceLookup sourceLookup = mock(SourceLookup.class); + when(leafLookup.asMap()).thenReturn(Collections.singletonMap("_source", sourceLookup)); + when(sourceLookup.get("three")).thenReturn(3); + ScriptedMetricAggContexts.MapScript.LeafFactory leafFactory = factory.newFactory(params, state, lookup); + ScriptedMetricAggContexts.MapScript script = leafFactory.newInstance(leafReaderContext); + + script.execute(); + + assert(state.containsKey("testField")); + assertEquals(3, state.get("testField")); + } + public void testCombineBasic() { ScriptedMetricAggContexts.CombineScript.Factory factory = scriptEngine.compile("test", "state.testField = params.initialVal; return state.testField + params.inc", ScriptedMetricAggContexts.CombineScript.CONTEXT, diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java index 32d74d0837ccd..d1db6606c86aa 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/WhenThingsGoWrongTests.java @@ -24,7 +24,6 @@ import org.elasticsearch.script.ScriptException; import java.lang.invoke.WrongMethodTypeException; -import java.util.Arrays; import java.util.Collections; import static java.util.Collections.emptyMap; @@ -200,21 +199,6 @@ public void testLoopLimits() { "The maximum number of statements that can be executed in a loop has been reached.")); } - public void testSourceLimits() { - final char[] tooManyChars = new char[Compiler.MAXIMUM_SOURCE_LENGTH + 1]; - Arrays.fill(tooManyChars, '0'); - - IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, false, () -> { - exec(new String(tooManyChars)); - }); - assertTrue(expected.getMessage().contains("Scripts may be no longer than")); - - final char[] exactlyAtLimit = new char[Compiler.MAXIMUM_SOURCE_LENGTH]; - Arrays.fill(exactlyAtLimit, '0'); - // ok - assertEquals(0, exec(new String(exactlyAtLimit))); - } - public void testIllegalDynamicMethod() { IllegalArgumentException expected = expectScriptThrows(IllegalArgumentException.class, () -> { exec("def x = 'test'; return x.getClass().toString()"); diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/30_search.yml b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/30_search.yml index b7be116b38695..91d539de73c7f 100644 --- a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/30_search.yml +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/30_search.yml @@ -23,8 +23,8 @@ indices.refresh: {} - do: - index: test search: + index: test body: query: script: @@ -45,8 +45,8 @@ - match: { hits.hits.1.fields.sNum1.0: 3.0 } - do: - index: test search: + index: test body: query: script: @@ -70,8 +70,8 @@ - match: { hits.hits.1.fields.sNum1.0: 3.0 } - do: - index: test search: + index: test body: query: script: @@ -96,8 +96,8 @@ - match: { hits.hits.2.fields.sNum1.0: 3.0 } - do: - index: test search: + index: test body: query: script: @@ -127,8 +127,8 @@ indices.refresh: {} - do: - index: test search: + index: test body: query: function_score: @@ -149,8 +149,8 @@ - match: { hits.hits.1._id: "1" } - do: - index: test search: + index: test body: query: function_score: @@ -171,8 +171,8 @@ - match: { hits.hits.1._id: "2" } - do: - index: test search: + index: test body: query: function_score: @@ -193,8 +193,8 @@ - match: { hits.hits.1._id: "1" } - do: - index: test search: + index: test body: query: function_score: @@ -215,8 +215,8 @@ - match: { hits.hits.1._id: "1" } - do: - index: test search: + index: test body: query: function_score: @@ -237,8 +237,8 @@ - match: { hits.hits.1._id: "1" } - do: - index: test search: + index: test body: query: function_score: @@ -274,8 +274,8 @@ indices.refresh: {} - do: - index: test search: + index: test body: query: function_score: @@ -325,8 +325,8 @@ - do: - index: test search: + index: test body: query: function_score: @@ -364,8 +364,8 @@ - do: - index: test search: + index: test body: script_fields: foobar: @@ -391,8 +391,8 @@ - do: - index: test search: + index: test body: aggs: value_agg: @@ -428,8 +428,8 @@ - do: catch: bad_request - index: test search: + index: test body: aggs: genre: diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapperTests.java index 70d1c6169d0a6..22971f1484f57 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapperTests.java @@ -75,7 +75,7 @@ public void testDefaults() throws Exception { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointIndexDimensionCount()); assertFalse(pointField.fieldType().stored()); assertEquals(1230, pointField.numericValue().longValue()); IndexableField dvField = fields[1]; @@ -149,7 +149,7 @@ public void testNoDocValues() throws Exception { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(1, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointIndexDimensionCount()); assertEquals(1230, pointField.numericValue().longValue()); } @@ -173,7 +173,7 @@ public void testStore() throws Exception { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(3, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointIndexDimensionCount()); assertEquals(1230, pointField.numericValue().doubleValue(), 0d); IndexableField dvField = fields[1]; assertEquals(DocValuesType.SORTED_NUMERIC, dvField.fieldType().docValuesType()); @@ -202,7 +202,7 @@ public void testCoerce() throws Exception { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointIndexDimensionCount()); assertEquals(1230, pointField.numericValue().longValue()); IndexableField dvField = fields[1]; assertEquals(DocValuesType.SORTED_NUMERIC, dvField.fieldType().docValuesType()); @@ -317,7 +317,7 @@ public void testNullValue() throws IOException { IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); IndexableField pointField = fields[0]; - assertEquals(1, pointField.fieldType().pointDimensionCount()); + assertEquals(1, pointField.fieldType().pointIndexDimensionCount()); assertFalse(pointField.fieldType().stored()); assertEquals(25, pointField.numericValue().longValue()); IndexableField dvField = fields[1]; diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java index 23839a3a635ab..24b333bed6643 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java @@ -341,7 +341,7 @@ Tuple, Map>> extractTermsAndRanges(IndexRead extractedTerms.add(builder.toBytesRef()); } } - if (info.getPointDimensionCount() == 1) { // not != 0 because range fields are not supported + if (info.getPointIndexDimensionCount() == 1) { // not != 0 because range fields are not supported PointValues values = reader.getPointValues(info.name); List encodedPointValues = new ArrayList<>(); encodedPointValues.add(values.getMinPackedValue().clone()); diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhase.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhase.java index 44823f9aa012b..0338e0fba91df 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhase.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhase.java @@ -31,7 +31,6 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.text.Text; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.search.SearchHit; @@ -56,8 +55,8 @@ final class PercolatorHighlightSubFetchPhase implements FetchSubPhase { private final HighlightPhase highlightPhase; - PercolatorHighlightSubFetchPhase(Settings settings, Map highlighters) { - this.highlightPhase = new HighlightPhase(settings, highlighters); + PercolatorHighlightSubFetchPhase(Map highlighters) { + this.highlightPhase = new HighlightPhase(highlighters); } boolean hitsExecutionNeeded(SearchContext context) { // for testing diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorPlugin.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorPlugin.java index 7128060448cf1..659b9c422fc0e 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorPlugin.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorPlugin.java @@ -20,7 +20,6 @@ package org.elasticsearch.percolator; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.plugins.Plugin; @@ -35,13 +34,6 @@ import static java.util.Collections.singletonMap; public class PercolatorPlugin extends Plugin implements MapperPlugin, SearchPlugin { - - private final Settings settings; - - public PercolatorPlugin(Settings settings) { - this.settings = settings; - } - @Override public List> getQueries() { return singletonList(new QuerySpec<>(PercolateQueryBuilder.NAME, PercolateQueryBuilder::new, PercolateQueryBuilder::fromXContent)); @@ -51,7 +43,7 @@ public List> getQueries() { public List getFetchSubPhases(FetchPhaseConstructionContext context) { return Arrays.asList( new PercolatorMatchedSlotSubFetchPhase(), - new PercolatorHighlightSubFetchPhase(settings, context.getHighlighters()) + new PercolatorHighlightSubFetchPhase(context.getHighlighters()) ); } diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhaseTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhaseTests.java index f1b89d92ab11e..e49204506e588 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhaseTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolatorHighlightSubFetchPhaseTests.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery; import org.elasticsearch.common.lucene.search.function.RandomScoreFunction; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.search.fetch.subphase.highlight.SearchContextHighlight; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.ESTestCase; @@ -47,8 +46,7 @@ public class PercolatorHighlightSubFetchPhaseTests extends ESTestCase { public void testHitsExecutionNeeded() { PercolateQuery percolateQuery = new PercolateQuery("_name", ctx -> null, Collections.singletonList(new BytesArray("{}")), new MatchAllDocsQuery(), Mockito.mock(IndexSearcher.class), new MatchAllDocsQuery()); - PercolatorHighlightSubFetchPhase subFetchPhase = new PercolatorHighlightSubFetchPhase(Settings.EMPTY, - emptyMap()); + PercolatorHighlightSubFetchPhase subFetchPhase = new PercolatorHighlightSubFetchPhase(emptyMap()); SearchContext searchContext = Mockito.mock(SearchContext.class); Mockito.when(searchContext.highlight()).thenReturn(new SearchContextHighlight(Collections.emptyList())); Mockito.when(searchContext.query()).thenReturn(new MatchAllDocsQuery()); diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java index 7d594c852da5b..5c98cce9c8e59 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestIT.java @@ -286,7 +286,7 @@ public void testIndicesOptions() { // test that ignore_unavailable=true works but returns one result less assertTrue(client().admin().indices().prepareClose("test2").get().isAcknowledged()); - request.indicesOptions(IndicesOptions.fromParameters(null, "true", null, SearchRequest.DEFAULT_INDICES_OPTIONS)); + request.indicesOptions(IndicesOptions.fromParameters(null, "true", null, "false", SearchRequest.DEFAULT_INDICES_OPTIONS)); response = client().execute(RankEvalAction.INSTANCE, request).actionGet(); details = (PrecisionAtK.Detail) response.getPartialResults().get("amsterdam_query").getMetricDetails(); assertEquals(6, details.getRetrieved()); @@ -294,37 +294,37 @@ public void testIndicesOptions() { // test that ignore_unavailable=false or default settings throw an IndexClosedException assertTrue(client().admin().indices().prepareClose("test2").get().isAcknowledged()); - request.indicesOptions(IndicesOptions.fromParameters(null, "false", null, SearchRequest.DEFAULT_INDICES_OPTIONS)); + request.indicesOptions(IndicesOptions.fromParameters(null, "false", null, "false", SearchRequest.DEFAULT_INDICES_OPTIONS)); response = client().execute(RankEvalAction.INSTANCE, request).actionGet(); assertEquals(1, response.getFailures().size()); assertThat(response.getFailures().get("amsterdam_query"), instanceOf(IndexClosedException.class)); // test expand_wildcards request = new RankEvalRequest(task, new String[] { "tes*" }); - request.indicesOptions(IndicesOptions.fromParameters("none", null, null, SearchRequest.DEFAULT_INDICES_OPTIONS)); + request.indicesOptions(IndicesOptions.fromParameters("none", null, null, "false", SearchRequest.DEFAULT_INDICES_OPTIONS)); response = client().execute(RankEvalAction.INSTANCE, request).actionGet(); details = (PrecisionAtK.Detail) response.getPartialResults().get("amsterdam_query").getMetricDetails(); assertEquals(0, details.getRetrieved()); - request.indicesOptions(IndicesOptions.fromParameters("open", null, null, SearchRequest.DEFAULT_INDICES_OPTIONS)); + request.indicesOptions(IndicesOptions.fromParameters("open", null, null, "false", SearchRequest.DEFAULT_INDICES_OPTIONS)); response = client().execute(RankEvalAction.INSTANCE, request).actionGet(); details = (PrecisionAtK.Detail) response.getPartialResults().get("amsterdam_query").getMetricDetails(); assertEquals(6, details.getRetrieved()); assertEquals(5, details.getRelevantRetrieved()); - request.indicesOptions(IndicesOptions.fromParameters("closed", null, null, SearchRequest.DEFAULT_INDICES_OPTIONS)); + request.indicesOptions(IndicesOptions.fromParameters("closed", null, null, "false", SearchRequest.DEFAULT_INDICES_OPTIONS)); response = client().execute(RankEvalAction.INSTANCE, request).actionGet(); assertEquals(1, response.getFailures().size()); assertThat(response.getFailures().get("amsterdam_query"), instanceOf(IndexClosedException.class)); // test allow_no_indices request = new RankEvalRequest(task, new String[] { "bad*" }); - request.indicesOptions(IndicesOptions.fromParameters(null, null, "true", SearchRequest.DEFAULT_INDICES_OPTIONS)); + request.indicesOptions(IndicesOptions.fromParameters(null, null, "true", "false", SearchRequest.DEFAULT_INDICES_OPTIONS)); response = client().execute(RankEvalAction.INSTANCE, request).actionGet(); details = (PrecisionAtK.Detail) response.getPartialResults().get("amsterdam_query").getMetricDetails(); assertEquals(0, details.getRetrieved()); - request.indicesOptions(IndicesOptions.fromParameters(null, null, "false", SearchRequest.DEFAULT_INDICES_OPTIONS)); + request.indicesOptions(IndicesOptions.fromParameters(null, null, "false", "false", SearchRequest.DEFAULT_INDICES_OPTIONS)); response = client().execute(RankEvalAction.INSTANCE, request).actionGet(); assertEquals(1, response.getFailures().size()); assertThat(response.getFailures().get("amsterdam_query"), instanceOf(IndexNotFoundException.class)); diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestTests.java index 10e3611b30d31..1a16c311fcf3a 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RankEvalRequestTests.java @@ -59,7 +59,8 @@ protected RankEvalRequest createTestInstance() { } RankEvalRequest rankEvalRequest = new RankEvalRequest(RankEvalSpecTests.createTestItem(), indices); IndicesOptions indicesOptions = IndicesOptions.fromOptions( - randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()); + randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), + randomBoolean()); rankEvalRequest.indicesOptions(indicesOptions); return rankEvalRequest; } diff --git a/modules/reindex/src/test/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java b/modules/reindex/src/test/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java index 1a54f53749fb2..e2f935125ecbd 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java +++ b/modules/reindex/src/test/java/org/elasticsearch/client/documentation/ReindexDocumentationIT.java @@ -23,73 +23,133 @@ import org.elasticsearch.action.admin.cluster.node.tasks.get.GetTaskResponse; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.client.Client; +import org.elasticsearch.index.IndexModule; +import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.reindex.AbstractBulkByScrollRequestBuilder; import org.elasticsearch.index.reindex.BulkByScrollResponse; import org.elasticsearch.index.reindex.BulkByScrollTask; +import org.elasticsearch.index.reindex.CancelTests; import org.elasticsearch.index.reindex.DeleteByQueryAction; import org.elasticsearch.index.reindex.ReindexAction; +import org.elasticsearch.index.reindex.ReindexPlugin; +import org.elasticsearch.index.reindex.ReindexRequestBuilder; import org.elasticsearch.index.reindex.RethrottleAction; import org.elasticsearch.index.reindex.UpdateByQueryAction; import org.elasticsearch.index.reindex.UpdateByQueryRequestBuilder; +import org.elasticsearch.index.shard.IndexingOperationListener; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.test.ESIntegTestCase; +import org.hamcrest.Matcher; +import org.junit.Before; +import java.util.Arrays; +import java.util.Collection; import java.util.Collections; +import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.equalTo; public class ReindexDocumentationIT extends ESIntegTestCase { - public void reindex() { + // Semaphore used to allow & block indexing operations during the test + private static final Semaphore ALLOWED_OPERATIONS = new Semaphore(0); + private static final String INDEX_NAME = "source_index"; + + @Override + protected boolean ignoreExternalCluster() { + return true; + } + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(ReindexPlugin.class, ReindexCancellationPlugin.class); + } + + @Override + protected Collection> transportClientPlugins() { + return Collections.singletonList(ReindexPlugin.class); + } + + @Before + public void setup() { + client().admin().indices().prepareCreate(INDEX_NAME).get(); + } + + public void testReindex() { Client client = client(); // tag::reindex1 - BulkByScrollResponse response = ReindexAction.INSTANCE.newRequestBuilder(client) + BulkByScrollResponse response = + ReindexAction.INSTANCE.newRequestBuilder(client) + .source("source_index") .destination("target_index") .filter(QueryBuilders.matchQuery("category", "xzy")) // <1> .get(); // end::reindex1 } - public void updateByQuery() { + public void testUpdateByQuery() { Client client = client(); + client.admin().indices().prepareCreate("foo").get(); + client.admin().indices().prepareCreate("bar").get(); + client.admin().indices().preparePutMapping(INDEX_NAME).setType("_doc").setSource("cat", "type=keyword").get(); { // tag::update-by-query - UpdateByQueryRequestBuilder updateByQuery = UpdateByQueryAction.INSTANCE.newRequestBuilder(client); + UpdateByQueryRequestBuilder updateByQuery = + UpdateByQueryAction.INSTANCE.newRequestBuilder(client); updateByQuery.source("source_index").abortOnVersionConflict(false); BulkByScrollResponse response = updateByQuery.get(); // end::update-by-query } { // tag::update-by-query-filter - UpdateByQueryRequestBuilder updateByQuery = UpdateByQueryAction.INSTANCE.newRequestBuilder(client); + UpdateByQueryRequestBuilder updateByQuery = + UpdateByQueryAction.INSTANCE.newRequestBuilder(client); updateByQuery.source("source_index") .filter(QueryBuilders.termQuery("level", "awesome")) .size(1000) - .script(new Script(ScriptType.INLINE, "ctx._source.awesome = 'absolutely'", "painless", Collections.emptyMap())); + .script(new Script(ScriptType.INLINE, + "ctx._source.awesome = 'absolutely'", + "painless", + Collections.emptyMap())); BulkByScrollResponse response = updateByQuery.get(); // end::update-by-query-filter } { // tag::update-by-query-size - UpdateByQueryRequestBuilder updateByQuery = UpdateByQueryAction.INSTANCE.newRequestBuilder(client); + UpdateByQueryRequestBuilder updateByQuery = + UpdateByQueryAction.INSTANCE.newRequestBuilder(client); updateByQuery.source("source_index") - .source().setSize(500); + .source() + .setSize(500); BulkByScrollResponse response = updateByQuery.get(); // end::update-by-query-size } { // tag::update-by-query-sort - UpdateByQueryRequestBuilder updateByQuery = UpdateByQueryAction.INSTANCE.newRequestBuilder(client); - updateByQuery.source("source_index").size(100) - .source().addSort("cat", SortOrder.DESC); + UpdateByQueryRequestBuilder updateByQuery = + UpdateByQueryAction.INSTANCE.newRequestBuilder(client); + updateByQuery.source("source_index") + .size(100) + .source() + .addSort("cat", SortOrder.DESC); BulkByScrollResponse response = updateByQuery.get(); // end::update-by-query-sort } { // tag::update-by-query-script - UpdateByQueryRequestBuilder updateByQuery = UpdateByQueryAction.INSTANCE.newRequestBuilder(client); + UpdateByQueryRequestBuilder updateByQuery = + UpdateByQueryAction.INSTANCE.newRequestBuilder(client); updateByQuery.source("source_index") .script(new Script( ScriptType.INLINE, @@ -106,53 +166,67 @@ public void updateByQuery() { } { // tag::update-by-query-multi-index - UpdateByQueryRequestBuilder updateByQuery = UpdateByQueryAction.INSTANCE.newRequestBuilder(client); + UpdateByQueryRequestBuilder updateByQuery = + UpdateByQueryAction.INSTANCE.newRequestBuilder(client); updateByQuery.source("foo", "bar").source().setTypes("a", "b"); BulkByScrollResponse response = updateByQuery.get(); // end::update-by-query-multi-index } { // tag::update-by-query-routing - UpdateByQueryRequestBuilder updateByQuery = UpdateByQueryAction.INSTANCE.newRequestBuilder(client); + UpdateByQueryRequestBuilder updateByQuery = + UpdateByQueryAction.INSTANCE.newRequestBuilder(client); updateByQuery.source().setRouting("cat"); BulkByScrollResponse response = updateByQuery.get(); // end::update-by-query-routing } { // tag::update-by-query-pipeline - UpdateByQueryRequestBuilder updateByQuery = UpdateByQueryAction.INSTANCE.newRequestBuilder(client); + UpdateByQueryRequestBuilder updateByQuery = + UpdateByQueryAction.INSTANCE.newRequestBuilder(client); updateByQuery.setPipeline("hurray"); BulkByScrollResponse response = updateByQuery.get(); // end::update-by-query-pipeline } + } + + public void testTasks() throws Exception { + final Client client = client(); + final ReindexRequestBuilder builder = reindexAndPartiallyBlock(); + { // tag::update-by-query-list-tasks ListTasksResponse tasksList = client.admin().cluster().prepareListTasks() .setActions(UpdateByQueryAction.NAME).setDetailed(true).get(); for (TaskInfo info: tasksList.getTasks()) { TaskId taskId = info.getTaskId(); - BulkByScrollTask.Status status = (BulkByScrollTask.Status) info.getStatus(); + BulkByScrollTask.Status status = + (BulkByScrollTask.Status) info.getStatus(); // do stuff } // end::update-by-query-list-tasks } + + TaskInfo mainTask = CancelTests.findTaskToCancel(ReindexAction.NAME, builder.request().getSlices()); + BulkByScrollTask.Status status = (BulkByScrollTask.Status) mainTask.getStatus(); + assertNull(status.getReasonCancelled()); + TaskId taskId = mainTask.getTaskId(); { - TaskId taskId = null; // tag::update-by-query-get-task GetTaskResponse get = client.admin().cluster().prepareGetTask(taskId).get(); // end::update-by-query-get-task } { - TaskId taskId = null; // tag::update-by-query-cancel-task // Cancel all update-by-query requests - client.admin().cluster().prepareCancelTasks().setActions(UpdateByQueryAction.NAME).get().getTasks(); + client.admin().cluster().prepareCancelTasks() + .setActions(UpdateByQueryAction.NAME).get().getTasks(); // Cancel a specific update-by-query request - client.admin().cluster().prepareCancelTasks().setTaskId(taskId).get().getTasks(); + client.admin().cluster().prepareCancelTasks() + .setTaskId(taskId).get().getTasks(); // end::update-by-query-cancel-task } { - TaskId taskId = null; // tag::update-by-query-rethrottle RethrottleAction.INSTANCE.newRequestBuilder(client) .setTaskId(taskId) @@ -160,12 +234,18 @@ public void updateByQuery() { .get(); // end::update-by-query-rethrottle } + + // unblocking the blocked update + ALLOWED_OPERATIONS.release(builder.request().getSlices()); } - public void deleteByQuery() { + public void testDeleteByQuery() { Client client = client(); + client.admin().indices().prepareCreate("persons").get(); + // tag::delete-by-query-sync - BulkByScrollResponse response = DeleteByQueryAction.INSTANCE.newRequestBuilder(client) + BulkByScrollResponse response = + DeleteByQueryAction.INSTANCE.newRequestBuilder(client) .filter(QueryBuilders.matchQuery("gender", "male")) // <1> .source("persons") // <2> .get(); // <3> @@ -189,4 +269,76 @@ public void onFailure(Exception e) { // end::delete-by-query-async } + /** + * Similar to what CancelTests does: blocks some operations to be able to catch some tasks in running state + * @see CancelTests#testCancel(String, AbstractBulkByScrollRequestBuilder, CancelTests.CancelAssertion, Matcher) + */ + private ReindexRequestBuilder reindexAndPartiallyBlock() throws Exception { + final Client client = client(); + final int numDocs = randomIntBetween(10, 100); + ALLOWED_OPERATIONS.release(numDocs); + + indexRandom(true, false, true, IntStream.range(0, numDocs) + .mapToObj(i -> client().prepareIndex(INDEX_NAME, "_doc", Integer.toString(i)).setSource("n", Integer.toString(i))) + .collect(Collectors.toList())); + + // Checks that the all documents have been indexed and correctly counted + assertHitCount(client().prepareSearch(INDEX_NAME).setSize(0).get(), numDocs); + assertThat(ALLOWED_OPERATIONS.drainPermits(), equalTo(0)); + + ReindexRequestBuilder builder = new ReindexRequestBuilder(client, ReindexAction.INSTANCE).source(INDEX_NAME) + .destination("target_index", "_doc"); + // Scroll by 1 so that cancellation is easier to control + builder.source().setSize(1); + + int numModifiedDocs = randomIntBetween(builder.request().getSlices() * 2, numDocs); + // chose to modify some of docs - rest is still blocked + ALLOWED_OPERATIONS.release(numModifiedDocs - builder.request().getSlices()); + + // Now execute the reindex action... + builder.execute(); + + // 10 seconds is usually fine but on heavily loaded machines this can take a while + assertTrue("updates blocked", awaitBusy( + () -> ALLOWED_OPERATIONS.hasQueuedThreads() && ALLOWED_OPERATIONS.availablePermits() == 0, + 1, TimeUnit.MINUTES)); + return builder; + } + + public static class ReindexCancellationPlugin extends Plugin { + + @Override + public void onIndexModule(IndexModule indexModule) { + indexModule.addIndexOperationListener(new BlockingOperationListener()); + } + } + + public static class BlockingOperationListener implements IndexingOperationListener { + + @Override + public Engine.Index preIndex(ShardId shardId, Engine.Index index) { + return preCheck(index, index.type()); + } + + @Override + public Engine.Delete preDelete(ShardId shardId, Engine.Delete delete) { + return preCheck(delete, delete.type()); + } + + private T preCheck(T operation, String type) { + if (("_doc".equals(type) == false) || (operation.origin() != Engine.Operation.Origin.PRIMARY)) { + return operation; + } + + try { + if (ALLOWED_OPERATIONS.tryAcquire(30, TimeUnit.SECONDS)) { + return operation; + } + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + throw new IllegalStateException("Something went wrong"); + } + } + } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java index 6b7b21a55148d..6d6ae01f0626c 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/CancelTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.reindex; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; @@ -26,7 +27,6 @@ import org.elasticsearch.action.ingest.DeletePipelineRequest; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.engine.Engine; @@ -195,7 +195,7 @@ private void testCancel(String action, AbstractBulkByScrollRequestBuilder assertion.assertThat(response, numDocs, numModifiedDocs); } - private TaskInfo findTaskToCancel(String actionName, int workerCount) { + public static TaskInfo findTaskToCancel(String actionName, int workerCount) { ListTasksResponse tasks; long start = System.nanoTime(); do { @@ -298,7 +298,7 @@ public void onIndexModule(IndexModule indexModule) { } public static class BlockingOperationListener implements IndexingOperationListener { - private static final Logger log = Loggers.getLogger(CancelTests.class); + private static final Logger log = LogManager.getLogger(CancelTests.class); @Override public Engine.Index preIndex(ShardId shardId, Engine.Index index) { diff --git a/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobStore.java b/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobStore.java index 158ecff9b2b4e..a7042b8bfee2b 100644 --- a/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobStore.java +++ b/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobStore.java @@ -23,7 +23,6 @@ import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.blobstore.BlobStoreException; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -34,7 +33,7 @@ /** * Read-only URL-based blob store */ -public class URLBlobStore extends AbstractComponent implements BlobStore { +public class URLBlobStore implements BlobStore { private final URL path; @@ -53,7 +52,6 @@ public class URLBlobStore extends AbstractComponent implements BlobStore { * @param path base URL */ public URLBlobStore(Settings settings, URL path) { - super(settings); this.path = path; this.bufferSizeInBytes = (int) settings.getAsBytesSize("repositories.uri.buffer_size", new ByteSizeValue(100, ByteSizeUnit.KB)).getBytes(); diff --git a/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java b/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java index 98b8c0a1945a5..8f8ae805fd1e8 100644 --- a/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java +++ b/modules/repository-url/src/main/java/org/elasticsearch/repositories/url/URLRepository.java @@ -82,21 +82,21 @@ public URLRepository(RepositoryMetaData metadata, Environment environment, NamedXContentRegistry namedXContentRegistry) { super(metadata, environment.settings(), namedXContentRegistry); - if (URL_SETTING.exists(metadata.settings()) == false && REPOSITORIES_URL_SETTING.exists(settings) == false) { + if (URL_SETTING.exists(metadata.settings()) == false && REPOSITORIES_URL_SETTING.exists(environment.settings()) == false) { throw new RepositoryException(metadata.name(), "missing url"); } this.environment = environment; - supportedProtocols = SUPPORTED_PROTOCOLS_SETTING.get(settings); - urlWhiteList = ALLOWED_URLS_SETTING.get(settings).toArray(new URIPattern[]{}); + supportedProtocols = SUPPORTED_PROTOCOLS_SETTING.get(environment.settings()); + urlWhiteList = ALLOWED_URLS_SETTING.get(environment.settings()).toArray(new URIPattern[]{}); basePath = BlobPath.cleanPath(); url = URL_SETTING.exists(metadata.settings()) - ? URL_SETTING.get(metadata.settings()) : REPOSITORIES_URL_SETTING.get(settings); + ? URL_SETTING.get(metadata.settings()) : REPOSITORIES_URL_SETTING.get(environment.settings()); } @Override protected BlobStore createBlobStore() { URL normalizedURL = checkURL(url); - return new URLBlobStore(settings, normalizedURL); + return new URLBlobStore(environment.settings(), normalizedURL); } // only use for testing diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java index 71e3b42279ca4..5373f09a48d92 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java @@ -193,7 +193,7 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem byteSizeSetting("http.netty.receive_predictor_max", SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE, Property.NodeScope, Property.Deprecated); - + private final Settings settings; protected final NetworkService networkService; protected final BigArrays bigArrays; @@ -254,6 +254,7 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem public Netty4HttpServerTransport(Settings settings, NetworkService networkService, BigArrays bigArrays, ThreadPool threadPool, NamedXContentRegistry xContentRegistry, Dispatcher dispatcher) { super(settings); + this.settings = settings; Netty4Utils.setAvailableProcessors(EsExecutors.PROCESSORS_SETTING.get(settings)); this.networkService = networkService; this.bigArrays = bigArrays; diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4InternalESLogger.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4InternalESLogger.java index d4b3aead0d2fd..38527151695d8 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4InternalESLogger.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4InternalESLogger.java @@ -20,6 +20,7 @@ package org.elasticsearch.transport.netty4; import io.netty.util.internal.logging.AbstractInternalLogger; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; import org.elasticsearch.common.SuppressLoggerChecks; diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java index c8b96b8bf6da2..a6104bed48af8 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java @@ -36,13 +36,11 @@ import io.netty.util.AttributeKey; import io.netty.util.concurrent.Future; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.network.NetworkService; @@ -59,8 +57,6 @@ import java.io.IOException; import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.List; import java.util.Map; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; @@ -101,8 +97,9 @@ public class Netty4Transport extends TcpTransport { private final int workerCount; private final ByteSizeValue receivePredictorMin; private final ByteSizeValue receivePredictorMax; - private volatile Bootstrap clientBootstrap; private final Map serverBootstraps = newConcurrentMap(); + private volatile Bootstrap clientBootstrap; + private volatile NioEventLoopGroup eventLoopGroup; public Netty4Transport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays, NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService) { @@ -125,10 +122,12 @@ public Netty4Transport(Settings settings, ThreadPool threadPool, NetworkService protected void doStart() { boolean success = false; try { - clientBootstrap = createClientBootstrap(); + ThreadFactory threadFactory = daemonThreadFactory(settings, TRANSPORT_WORKER_THREAD_NAME_PREFIX); + eventLoopGroup = new NioEventLoopGroup(workerCount, threadFactory); + clientBootstrap = createClientBootstrap(eventLoopGroup); if (NetworkService.NETWORK_SERVER.get(settings)) { for (ProfileSettings profileSettings : profileSettings) { - createServerBootstrap(profileSettings); + createServerBootstrap(profileSettings, eventLoopGroup); bindServer(profileSettings); } } @@ -141,9 +140,9 @@ protected void doStart() { } } - private Bootstrap createClientBootstrap() { + private Bootstrap createClientBootstrap(NioEventLoopGroup eventLoopGroup) { final Bootstrap bootstrap = new Bootstrap(); - bootstrap.group(new NioEventLoopGroup(workerCount, daemonThreadFactory(settings, TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX))); + bootstrap.group(eventLoopGroup); bootstrap.channel(NioSocketChannel.class); bootstrap.option(ChannelOption.TCP_NODELAY, TCP_NO_DELAY.get(settings)); @@ -167,7 +166,7 @@ private Bootstrap createClientBootstrap() { return bootstrap; } - private void createServerBootstrap(ProfileSettings profileSettings) { + private void createServerBootstrap(ProfileSettings profileSettings, NioEventLoopGroup eventLoopGroup) { String name = profileSettings.profileName; if (logger.isDebugEnabled()) { logger.debug("using profile[{}], worker_count[{}], port[{}], bind_host[{}], publish_host[{}], compress[{}], " @@ -176,12 +175,9 @@ private void createServerBootstrap(ProfileSettings profileSettings) { receivePredictorMin, receivePredictorMax); } - - final ThreadFactory workerFactory = daemonThreadFactory(this.settings, TRANSPORT_SERVER_WORKER_THREAD_NAME_PREFIX, name); - final ServerBootstrap serverBootstrap = new ServerBootstrap(); - serverBootstrap.group(new NioEventLoopGroup(workerCount, workerFactory)); + serverBootstrap.group(eventLoopGroup); serverBootstrap.channel(NioServerSocketChannel.class); serverBootstrap.childHandler(getServerChannelInitializer(name)); @@ -279,25 +275,14 @@ long failedPingCount() { @SuppressForbidden(reason = "debug") protected void stopInternal() { Releasables.close(() -> { - final List>> serverBootstrapCloseFutures = new ArrayList<>(serverBootstraps.size()); - for (final Map.Entry entry : serverBootstraps.entrySet()) { - serverBootstrapCloseFutures.add( - Tuple.tuple(entry.getKey(), entry.getValue().config().group().shutdownGracefully(0, 5, TimeUnit.SECONDS))); + Future shutdownFuture = eventLoopGroup.shutdownGracefully(0, 5, TimeUnit.SECONDS); + shutdownFuture.awaitUninterruptibly(); + if (shutdownFuture.isSuccess() == false) { + logger.warn("Error closing netty event loop group", shutdownFuture.cause()); } - for (final Tuple> future : serverBootstrapCloseFutures) { - future.v2().awaitUninterruptibly(); - if (!future.v2().isSuccess()) { - logger.debug( - (Supplier) () -> new ParameterizedMessage( - "Error closing server bootstrap for profile [{}]", future.v1()), future.v2().cause()); - } - } - serverBootstraps.clear(); - if (clientBootstrap != null) { - clientBootstrap.config().group().shutdownGracefully(0, 5, TimeUnit.SECONDS).awaitUninterruptibly(); - clientBootstrap = null; - } + serverBootstraps.clear(); + clientBootstrap = null; }); } diff --git a/modules/tribe/src/main/java/org/elasticsearch/tribe/TribeDiscovery.java b/modules/tribe/src/main/java/org/elasticsearch/tribe/TribeDiscovery.java index d8e88e51e2232..6ffd7a941c385 100644 --- a/modules/tribe/src/main/java/org/elasticsearch/tribe/TribeDiscovery.java +++ b/modules/tribe/src/main/java/org/elasticsearch/tribe/TribeDiscovery.java @@ -45,10 +45,13 @@ */ public class TribeDiscovery extends SingleNodeDiscovery implements Discovery { + private final Settings settings; + @Inject public TribeDiscovery(Settings settings, TransportService transportService, MasterService masterService, ClusterApplier clusterApplier) { super(settings, transportService, masterService, clusterApplier); + this.settings = settings; } @Override diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.5.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.5.0.jar.sha1 deleted file mode 100644 index fca8c55d842be..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d156361604898e3d9e5c751c9308b7b856eec523 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.6.0-snapshot-f9598f335b.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.6.0-snapshot-f9598f335b.jar.sha1 new file mode 100644 index 0000000000000..ca19e2c37d9eb --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-7.6.0-snapshot-f9598f335b.jar.sha1 @@ -0,0 +1 @@ +4eaf937aa95487a518d458666da78b68fff35ef4 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.5.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.5.0.jar.sha1 deleted file mode 100644 index dd14f717dba43..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d412659d8b0dd6a579888cab192fe86d468953f3 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.6.0-snapshot-f9598f335b.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.6.0-snapshot-f9598f335b.jar.sha1 new file mode 100644 index 0000000000000..29593925f9050 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-7.6.0-snapshot-f9598f335b.jar.sha1 @@ -0,0 +1 @@ +cdb7c83e29aa6c365e2c06c843ddfe16cfba12cc \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.5.0.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.5.0.jar.sha1 deleted file mode 100644 index 0987a33f65af2..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9fdfafd182aa18dc42854f595bed9c65319786a5 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.6.0-snapshot-f9598f335b.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.6.0-snapshot-f9598f335b.jar.sha1 new file mode 100644 index 0000000000000..c43883bd34046 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analyzers-nori-7.6.0-snapshot-f9598f335b.jar.sha1 @@ -0,0 +1 @@ +00cb9ce94be5f24bf6e35e4a8cfe641b01c37cf3 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.5.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.5.0.jar.sha1 deleted file mode 100644 index 5ccd99775fb57..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -708120bc7ce6d6bde1a4a30b6bb7edeee1fc17ed \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.6.0-snapshot-f9598f335b.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.6.0-snapshot-f9598f335b.jar.sha1 new file mode 100644 index 0000000000000..cb2d33a265c5f --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-7.6.0-snapshot-f9598f335b.jar.sha1 @@ -0,0 +1 @@ +5fa1fa9c15ff4646dca8bf68696cfb4f85adbe15 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.5.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.5.0.jar.sha1 deleted file mode 100644 index 78d18568a8f43..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ec339902ac6c05440340732b992dd3b73d66d899 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.6.0-snapshot-f9598f335b.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.6.0-snapshot-f9598f335b.jar.sha1 new file mode 100644 index 0000000000000..f5adb601ba30a --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-7.6.0-snapshot-f9598f335b.jar.sha1 @@ -0,0 +1 @@ +54790d0478d6411ae9a237b4d29056030e48b87e \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.5.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.5.0.jar.sha1 deleted file mode 100644 index 94fa78a5da2a8..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a32e24d0781b97b1f2a3e86268d188180f4e41cb \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.6.0-snapshot-f9598f335b.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.6.0-snapshot-f9598f335b.jar.sha1 new file mode 100644 index 0000000000000..cb9b2365db7fb --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-7.6.0-snapshot-f9598f335b.jar.sha1 @@ -0,0 +1 @@ +d240eff790c236180562f20b697db2915d0100cf \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.5.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.5.0.jar.sha1 deleted file mode 100644 index fdbbc34edbba4..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ceafdf4a0d8cc9d61110c9ac6ba610e8485620b9 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.6.0-snapshot-f9598f335b.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.6.0-snapshot-f9598f335b.jar.sha1 new file mode 100644 index 0000000000000..dfde53098f73b --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-7.6.0-snapshot-f9598f335b.jar.sha1 @@ -0,0 +1 @@ +6bde728277806062a9473626c312a52c52b6f72a \ No newline at end of file diff --git a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/discovery/azure/classic/AzureUnicastHostsProvider.java b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/discovery/azure/classic/AzureUnicastHostsProvider.java index 1a9265de2a72f..987942ef4f031 100644 --- a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/discovery/azure/classic/AzureUnicastHostsProvider.java +++ b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/discovery/azure/classic/AzureUnicastHostsProvider.java @@ -93,6 +93,7 @@ public static Deployment fromString(String string) { } } + private final Settings settings; private final AzureComputeService azureComputeService; private TransportService transportService; private NetworkService networkService; @@ -107,7 +108,7 @@ public static Deployment fromString(String string) { public AzureUnicastHostsProvider(Settings settings, AzureComputeService azureComputeService, TransportService transportService, NetworkService networkService) { - super(settings); + this.settings = settings; this.azureComputeService = azureComputeService; this.transportService = transportService; this.networkService = networkService; diff --git a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java index 4255fc210fb6c..795db2846cef3 100644 --- a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java +++ b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/plugin/discovery/azure/classic/AzureDiscoveryPlugin.java @@ -19,11 +19,11 @@ package org.elasticsearch.plugin.discovery.azure.classic; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.cloud.azure.classic.management.AzureComputeService; import org.elasticsearch.cloud.azure.classic.management.AzureComputeServiceImpl; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -43,7 +43,7 @@ public class AzureDiscoveryPlugin extends Plugin implements DiscoveryPlugin { public static final String AZURE = "azure"; protected final Settings settings; - private static final Logger logger = Loggers.getLogger(AzureDiscoveryPlugin.class); + private static final Logger logger = LogManager.getLogger(AzureDiscoveryPlugin.class); private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); public AzureDiscoveryPlugin(Settings settings) { diff --git a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java index c5a5f1df98ee2..35c2e7336a7d6 100644 --- a/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java +++ b/plugins/discovery-azure-classic/src/test/java/org/elasticsearch/discovery/azure/classic/AzureDiscoveryClusterFormationTests.java @@ -24,10 +24,10 @@ import com.sun.net.httpserver.Headers; import com.sun.net.httpserver.HttpsConfigurator; import com.sun.net.httpserver.HttpsServer; +import org.apache.logging.log4j.LogManager; import org.elasticsearch.cloud.azure.classic.management.AzureComputeService; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.io.FileSystemUtils; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.DiscoveryModule; @@ -243,7 +243,7 @@ public static void startHttpd() throws Exception { responseBody.write(responseAsBytes); responseBody.close(); } catch (XMLStreamException e) { - Loggers.getLogger(AzureDiscoveryClusterFormationTests.class).error("Failed serializing XML", e); + LogManager.getLogger(AzureDiscoveryClusterFormationTests.class).error("Failed serializing XML", e); throw new RuntimeException(e); } }); diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index e32ba6948d62d..b08561ffde0ed 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -48,9 +48,30 @@ bundlePlugin { } } +task writeTestJavaPolicy { + doLast { + final File tmp = file("${buildDir}/tmp") + if (tmp.exists() == false && tmp.mkdirs() == false) { + throw new GradleException("failed to create temporary directory [${tmp}]") + } + final File javaPolicy = file("${tmp}/java.policy") + javaPolicy.write( + [ + "grant {", + " permission java.util.PropertyPermission \"com.amazonaws.sdk.ec2MetadataServiceEndpointOverride\", \"write\";", + "};" + ].join("\n")) + } +} + test { + dependsOn writeTestJavaPolicy // this is needed for insecure plugins, remove if possible! systemProperty 'tests.artifact', project.name + + // this is needed to manipulate com.amazonaws.sdk.ec2MetadataServiceEndpointOverride system property + // it is better rather disable security manager at all with `systemProperty 'tests.security.manager', 'false'` + systemProperty 'java.security.policy', "file://${buildDir}/tmp/java.policy" } check { diff --git a/plugins/discovery-ec2/qa/amazon-ec2/build.gradle b/plugins/discovery-ec2/qa/amazon-ec2/build.gradle index 8e0962db6037c..5caf7240a0ffc 100644 --- a/plugins/discovery-ec2/qa/amazon-ec2/build.gradle +++ b/plugins/discovery-ec2/qa/amazon-ec2/build.gradle @@ -55,7 +55,10 @@ integTestCluster { keystoreSetting 'discovery.ec2.access_key', 'ec2_integration_test_access_key' keystoreSetting 'discovery.ec2.secret_key', 'ec2_integration_test_secret_key' setting 'discovery.zen.hosts_provider', 'ec2' + setting 'network.host', '_ec2_' setting 'discovery.ec2.endpoint', "http://${-> ec2Fixture.addressAndPort}" + systemProperty "com.amazonaws.sdk.ec2MetadataServiceEndpointOverride", "http://${-> ec2Fixture.addressAndPort}" + unicastTransportUri = { seedNode, node, ant -> return null } waitCondition = { node, ant -> diff --git a/plugins/discovery-ec2/qa/amazon-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Fixture.java b/plugins/discovery-ec2/qa/amazon-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Fixture.java index 0cf4cbdeadb34..6027bd861590e 100644 --- a/plugins/discovery-ec2/qa/amazon-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Fixture.java +++ b/plugins/discovery-ec2/qa/amazon-ec2/src/test/java/org/elasticsearch/discovery/ec2/AmazonEC2Fixture.java @@ -19,6 +19,8 @@ package org.elasticsearch.discovery.ec2; import org.apache.http.NameValuePair; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPost; import org.apache.http.client.utils.URLEncodedUtils; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.rest.RestStatus; @@ -60,7 +62,7 @@ public static void main(String[] args) throws Exception { @Override protected Response handle(final Request request) throws IOException { - if ("/".equals(request.getPath()) && ("POST".equals(request.getMethod()))) { + if ("/".equals(request.getPath()) && (HttpPost.METHOD_NAME.equals(request.getMethod()))) { final String userAgent = request.getHeader("User-Agent"); if (userAgent != null && userAgent.startsWith("aws-sdk-java")) { // Simulate an EC2 DescribeInstancesResponse @@ -74,6 +76,9 @@ protected Response handle(final Request request) throws IOException { return new Response(RestStatus.OK.getStatus(), contentType("text/xml; charset=UTF-8"), responseBody); } } + if ("/latest/meta-data/local-ipv4".equals(request.getPath()) && (HttpGet.METHOD_NAME.equals(request.getMethod()))) { + return new Response(RestStatus.OK.getStatus(), TEXT_PLAIN_CONTENT_TYPE, "127.0.0.1".getBytes(UTF_8)); + } return null; } diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java index a65500d9e2289..6d677d03c3909 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceImpl.java @@ -33,7 +33,6 @@ import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.LazyInitializable; import java.util.Random; @@ -41,15 +40,9 @@ class AwsEc2ServiceImpl extends AbstractComponent implements AwsEc2Service { - public static final String EC2_METADATA_URL = "http://169.254.169.254/latest/meta-data/"; - private final AtomicReference> lazyClientReference = new AtomicReference<>(); - AwsEc2ServiceImpl(Settings settings) { - super(settings); - } - private AmazonEC2 buildClient(Ec2ClientSettings clientSettings) { final AWSCredentialsProvider credentials = buildCredentials(logger, clientSettings); final ClientConfiguration configuration = buildConfiguration(logger, clientSettings); diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java index 8f5037042986b..2817c1c3b60bf 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java @@ -69,7 +69,6 @@ class AwsEc2UnicastHostsProvider extends AbstractComponent implements UnicastHos private final TransportAddressesCache dynamicHosts; AwsEc2UnicastHostsProvider(Settings settings, TransportService transportService, AwsEc2Service awsEc2Service) { - super(settings); this.transportService = transportService; this.awsEc2Service = awsEc2Service; diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2ClientSettings.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2ClientSettings.java index d76c9e820b8b1..650e2747641e2 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2ClientSettings.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2ClientSettings.java @@ -24,9 +24,9 @@ import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.BasicAWSCredentials; import com.amazonaws.auth.BasicSessionCredentials; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.SecureSetting; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Setting; @@ -75,7 +75,7 @@ final class Ec2ClientSettings { static final Setting READ_TIMEOUT_SETTING = Setting.timeSetting("discovery.ec2.read_timeout", TimeValue.timeValueMillis(ClientConfiguration.DEFAULT_SOCKET_TIMEOUT), Property.NodeScope); - private static final Logger logger = Loggers.getLogger(Ec2ClientSettings.class); + private static final Logger logger = LogManager.getLogger(Ec2ClientSettings.class); private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(logger); diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPlugin.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPlugin.java index d476d00eef860..c35810774641a 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPlugin.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPlugin.java @@ -19,11 +19,12 @@ package org.elasticsearch.discovery.ec2; +import com.amazonaws.util.EC2MetadataUtils; import com.amazonaws.util.json.Jackson; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.SpecialPermission; import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -51,9 +52,7 @@ import java.util.function.Supplier; public class Ec2DiscoveryPlugin extends Plugin implements DiscoveryPlugin, ReloadablePlugin { - - private static Logger logger = Loggers.getLogger(Ec2DiscoveryPlugin.class); - + private static Logger logger = LogManager.getLogger(Ec2DiscoveryPlugin.class); public static final String EC2 = "ec2"; static { @@ -79,7 +78,7 @@ public class Ec2DiscoveryPlugin extends Plugin implements DiscoveryPlugin, Reloa protected final AwsEc2Service ec2Service; public Ec2DiscoveryPlugin(Settings settings) { - this(settings, new AwsEc2ServiceImpl(settings)); + this(settings, new AwsEc2ServiceImpl()); } protected Ec2DiscoveryPlugin(Settings settings, AwsEc2ServiceImpl ec2Service) { @@ -92,7 +91,7 @@ protected Ec2DiscoveryPlugin(Settings settings, AwsEc2ServiceImpl ec2Service) { @Override public NetworkService.CustomNameResolver getCustomNameResolver(Settings settings) { logger.debug("Register _ec2_, _ec2:xxx_ network names"); - return new Ec2NameResolver(settings); + return new Ec2NameResolver(); } @Override @@ -130,7 +129,8 @@ public Settings additionalSettings() { final Settings.Builder builder = Settings.builder(); // Adds a node attribute for the ec2 availability zone - final String azMetadataUrl = AwsEc2ServiceImpl.EC2_METADATA_URL + "placement/availability-zone"; + final String azMetadataUrl = EC2MetadataUtils.getHostAddressForEC2MetadataService() + + "/latest/meta-data/placement/availability-zone"; builder.put(getAvailabilityZoneNodeAttributes(settings, azMetadataUrl)); return builder.build(); } diff --git a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2NameResolver.java b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2NameResolver.java index 92bd01dd9aec7..e9dd3a10e4cc3 100644 --- a/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2NameResolver.java +++ b/plugins/discovery-ec2/src/main/java/org/elasticsearch/discovery/ec2/Ec2NameResolver.java @@ -19,11 +19,11 @@ package org.elasticsearch.discovery.ec2; +import com.amazonaws.util.EC2MetadataUtils; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.network.NetworkService.CustomNameResolver; -import org.elasticsearch.common.settings.Settings; import java.io.BufferedReader; import java.io.IOException; @@ -79,13 +79,6 @@ private enum Ec2HostnameType { } } - /** - * Construct a {@link CustomNameResolver}. - */ - Ec2NameResolver(Settings settings) { - super(settings); - } - /** * @param type the ec2 hostname type to discover. * @return the appropriate host resolved from ec2 meta-data, or null if it cannot be obtained. @@ -94,7 +87,7 @@ private enum Ec2HostnameType { @SuppressForbidden(reason = "We call getInputStream in doPrivileged and provide SocketPermission") public InetAddress[] resolve(Ec2HostnameType type) throws IOException { InputStream in = null; - String metadataUrl = AwsEc2ServiceImpl.EC2_METADATA_URL + type.ec2Name; + String metadataUrl = EC2MetadataUtils.getHostAddressForEC2MetadataService() + "/latest/meta-data/" + type.ec2Name; try { URL url = new URL(metadataUrl); logger.debug("obtaining ec2 hostname from ec2 meta-data url {}", url); diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceMock.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceMock.java index 0596dd697b2eb..e44087f941349 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceMock.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/AwsEc2ServiceMock.java @@ -24,8 +24,6 @@ import com.amazonaws.services.ec2.AmazonEC2; import com.amazonaws.services.ec2.model.Tag; -import org.elasticsearch.common.settings.Settings; - import java.util.List; public class AwsEc2ServiceMock extends AwsEc2ServiceImpl { @@ -33,8 +31,7 @@ public class AwsEc2ServiceMock extends AwsEc2ServiceImpl { private final int nodes; private final List> tagsList; - public AwsEc2ServiceMock(Settings settings, int nodes, List> tagsList) { - super(settings); + public AwsEc2ServiceMock(int nodes, List> tagsList) { this.nodes = nodes; this.tagsList = tagsList; } diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginMock.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginMock.java index a92bd243bc9b7..bc45a95c2f309 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginMock.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryPluginMock.java @@ -32,7 +32,7 @@ public class Ec2DiscoveryPluginMock extends Ec2DiscoveryPlugin { } public Ec2DiscoveryPluginMock(Settings settings, int nodes, List> tagsList) { - super(settings, new AwsEc2ServiceMock(settings, nodes, tagsList)); + super(settings, new AwsEc2ServiceMock(nodes, tagsList)); } } diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java index 295df0c818a91..aa619409c16eb 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryTests.java @@ -298,7 +298,7 @@ abstract class DummyEc2HostProvider extends AwsEc2UnicastHostsProvider { } public void testGetNodeListEmptyCache() throws Exception { - AwsEc2Service awsEc2Service = new AwsEc2ServiceMock(Settings.EMPTY, 1, null); + AwsEc2Service awsEc2Service = new AwsEc2ServiceMock(1, null); DummyEc2HostProvider provider = new DummyEc2HostProvider(Settings.EMPTY, transportService, awsEc2Service) { @Override protected List fetchDynamicNodes() { diff --git a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2NetworkTests.java b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2NetworkTests.java index 52bf7e67b0d7d..dedf56b836eb3 100644 --- a/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2NetworkTests.java +++ b/plugins/discovery-ec2/src/test/java/org/elasticsearch/discovery/ec2/Ec2NetworkTests.java @@ -19,35 +19,98 @@ package org.elasticsearch.discovery.ec2; +import com.sun.net.httpserver.HttpServer; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.mocksocket.MockHttpServer; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; import java.io.IOException; +import java.io.OutputStream; import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.Arrays; import java.util.Collections; +import java.util.function.BiConsumer; +import static com.amazonaws.SDKGlobalConfiguration.EC2_METADATA_SERVICE_OVERRIDE_SYSTEM_PROPERTY; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.hamcrest.Matchers.arrayContaining; -import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; /** * Test for EC2 network.host settings. + *

+ * Warning: This test doesn't assert that the exceptions are thrown. + * They aren't. */ +@SuppressForbidden(reason = "use http server") public class Ec2NetworkTests extends ESTestCase { + + private static HttpServer httpServer; + + @BeforeClass + public static void startHttp() throws Exception { + httpServer = MockHttpServer.createHttp(new InetSocketAddress(InetAddress.getLoopbackAddress().getHostAddress(), 0), 0); + + BiConsumer registerContext = (path, v) ->{ + final byte[] message = v.getBytes(UTF_8); + httpServer.createContext(path, (s) -> { + s.sendResponseHeaders(RestStatus.OK.getStatus(), message.length); + OutputStream responseBody = s.getResponseBody(); + responseBody.write(message); + responseBody.close(); + }); + }; + registerContext.accept("/latest/meta-data/local-ipv4","127.0.0.1"); + registerContext.accept("/latest/meta-data/public-ipv4","165.168.10.2"); + registerContext.accept("/latest/meta-data/public-hostname","165.168.10.3"); + registerContext.accept("/latest/meta-data/local-hostname","10.10.10.5"); + + httpServer.start(); + } + + @Before + public void setup() { + // redirect EC2 metadata service to httpServer + AccessController.doPrivileged((PrivilegedAction) () -> System.setProperty(EC2_METADATA_SERVICE_OVERRIDE_SYSTEM_PROPERTY, + "http://" + httpServer.getAddress().getHostName() + ":" + httpServer.getAddress().getPort())); + } + + @AfterClass + public static void stopHttp() { + httpServer.stop(0); + httpServer = null; + } + /** * Test for network.host: _ec2_ */ public void testNetworkHostEc2() throws IOException { - Settings nodeSettings = Settings.builder() - .put("network.host", "_ec2_") - .build(); + resolveEc2("_ec2_", InetAddress.getByName("127.0.0.1")); + } + + /** + * Test for network.host: _ec2_ + */ + public void testNetworkHostUnableToResolveEc2() { + // redirect EC2 metadata service to unknown location + AccessController.doPrivileged((PrivilegedAction) () -> System.setProperty(EC2_METADATA_SERVICE_OVERRIDE_SYSTEM_PROPERTY, + "http://127.0.0.1/")); - NetworkService networkService = new NetworkService(Collections.singletonList(new Ec2NameResolver(nodeSettings))); - // TODO we need to replace that with a mock. For now we check the URL we are supposed to reach. try { - networkService.resolveBindHostAddresses(null); + resolveEc2("_ec2_", (InetAddress[]) null); } catch (IOException e) { - assertThat(e.getMessage(), containsString("local-ipv4")); + assertThat(e.getMessage(), + equalTo("IOException caught when fetching InetAddress from [http://127.0.0.1//latest/meta-data/local-ipv4]")); } } @@ -55,102 +118,58 @@ public void testNetworkHostEc2() throws IOException { * Test for network.host: _ec2:publicIp_ */ public void testNetworkHostEc2PublicIp() throws IOException { - Settings nodeSettings = Settings.builder() - .put("network.host", "_ec2:publicIp_") - .build(); - - NetworkService networkService = new NetworkService(Collections.singletonList(new Ec2NameResolver(nodeSettings))); - // TODO we need to replace that with a mock. For now we check the URL we are supposed to reach. - try { - networkService.resolveBindHostAddresses(null); - } catch (IOException e) { - assertThat(e.getMessage(), containsString("public-ipv4")); - } + resolveEc2("_ec2:publicIp_", InetAddress.getByName("165.168.10.2")); } /** * Test for network.host: _ec2:privateIp_ */ public void testNetworkHostEc2PrivateIp() throws IOException { - Settings nodeSettings = Settings.builder() - .put("network.host", "_ec2:privateIp_") - .build(); - - NetworkService networkService = new NetworkService(Collections.singletonList(new Ec2NameResolver(nodeSettings))); - // TODO we need to replace that with a mock. For now we check the URL we are supposed to reach. - try { - networkService.resolveBindHostAddresses(null); - } catch (IOException e) { - assertThat(e.getMessage(), containsString("local-ipv4")); - } + resolveEc2("_ec2:privateIp_", InetAddress.getByName("127.0.0.1")); } /** * Test for network.host: _ec2:privateIpv4_ */ public void testNetworkHostEc2PrivateIpv4() throws IOException { - Settings nodeSettings = Settings.builder() - .put("network.host", "_ec2:privateIpv4_") - .build(); - - NetworkService networkService = new NetworkService(Collections.singletonList(new Ec2NameResolver(nodeSettings))); - // TODO we need to replace that with a mock. For now we check the URL we are supposed to reach. - try { - networkService.resolveBindHostAddresses(null); - } catch (IOException e) { - assertThat(e.getMessage(), containsString("local-ipv4")); - } + resolveEc2("_ec2:privateIpv4_", InetAddress.getByName("127.0.0.1")); } /** * Test for network.host: _ec2:privateDns_ */ public void testNetworkHostEc2PrivateDns() throws IOException { - Settings nodeSettings = Settings.builder() - .put("network.host", "_ec2:privateDns_") - .build(); - - NetworkService networkService = new NetworkService(Collections.singletonList(new Ec2NameResolver(nodeSettings))); - // TODO we need to replace that with a mock. For now we check the URL we are supposed to reach. - try { - networkService.resolveBindHostAddresses(null); - } catch (IOException e) { - assertThat(e.getMessage(), containsString("local-hostname")); - } + resolveEc2("_ec2:privateDns_", InetAddress.getByName("10.10.10.5")); } /** * Test for network.host: _ec2:publicIpv4_ */ public void testNetworkHostEc2PublicIpv4() throws IOException { - Settings nodeSettings = Settings.builder() - .put("network.host", "_ec2:publicIpv4_") - .build(); - - NetworkService networkService = new NetworkService(Collections.singletonList(new Ec2NameResolver(nodeSettings))); - // TODO we need to replace that with a mock. For now we check the URL we are supposed to reach. - try { - networkService.resolveBindHostAddresses(null); - } catch (IOException e) { - assertThat(e.getMessage(), containsString("public-ipv4")); - } + resolveEc2("_ec2:publicIpv4_", InetAddress.getByName("165.168.10.2")); } /** * Test for network.host: _ec2:publicDns_ */ public void testNetworkHostEc2PublicDns() throws IOException { + resolveEc2("_ec2:publicDns_", InetAddress.getByName("165.168.10.3")); + } + + private InetAddress[] resolveEc2(String host, InetAddress ... expected) throws IOException { Settings nodeSettings = Settings.builder() - .put("network.host", "_ec2:publicDns_") - .build(); + .put("network.host", host) + .build(); - NetworkService networkService = new NetworkService(Collections.singletonList(new Ec2NameResolver(nodeSettings))); - // TODO we need to replace that with a mock. For now we check the URL we are supposed to reach. - try { - networkService.resolveBindHostAddresses(null); - } catch (IOException e) { - assertThat(e.getMessage(), containsString("public-hostname")); + NetworkService networkService = new NetworkService(Collections.singletonList(new Ec2NameResolver())); + + InetAddress[] addresses = networkService.resolveBindHostAddresses( + NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING.get(nodeSettings).toArray(Strings.EMPTY_ARRAY)); + if (expected == null) { + fail("We should get an IOException, resolved addressed:" + Arrays.toString(addresses)); } + assertThat(addresses, arrayContaining(expected)); + return addresses; } /** @@ -158,11 +177,7 @@ public void testNetworkHostEc2PublicDns() throws IOException { * network.host: _local_ */ public void testNetworkHostCoreLocal() throws IOException { - Settings nodeSettings = Settings.builder() - .put("network.host", "_local_") - .build(); - - NetworkService networkService = new NetworkService(Collections.singletonList(new Ec2NameResolver(nodeSettings))); + NetworkService networkService = new NetworkService(Collections.singletonList(new Ec2NameResolver())); InetAddress[] addresses = networkService.resolveBindHostAddresses(null); assertThat(addresses, arrayContaining(networkService.resolveBindHostAddresses(new String[] { "_local_" }))); } diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesServiceImpl.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesServiceImpl.java index aab6e0c74ecdb..116bf1842d065 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesServiceImpl.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceInstancesServiceImpl.java @@ -94,6 +94,7 @@ public Collection instances() { return instances; } + private final Settings settings; private Compute client; private TimeValue refreshInterval = null; private long lastRefresh; @@ -107,7 +108,7 @@ public Collection instances() { private final boolean validateCerts; public GceInstancesServiceImpl(Settings settings) { - super(settings); + this.settings = settings; this.validateCerts = GCE_VALIDATE_CERTIFICATES.get(settings); this.project = resolveProject(); this.zones = resolveZones(); diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceMetadataService.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceMetadataService.java index c736862d426de..ca25fde742907 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceMetadataService.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceMetadataService.java @@ -44,11 +44,14 @@ public class GceMetadataService extends AbstractLifecycleComponent { public static final Setting GCE_HOST = new Setting<>("cloud.gce.host", "http://metadata.google.internal", Function.identity(), Setting.Property.NodeScope); + private final Settings settings; + /** Global instance of the HTTP transport. */ private HttpTransport gceHttpTransport; public GceMetadataService(Settings settings) { super(settings); + this.settings = settings; } protected synchronized HttpTransport getGceHttpTransport() throws GeneralSecurityException, IOException { diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceModule.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceModule.java index 71e9fbc7804df..064fe606244ee 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceModule.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceModule.java @@ -19,9 +19,9 @@ package org.elasticsearch.cloud.gce; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; public class GceModule extends AbstractModule { @@ -29,7 +29,7 @@ public class GceModule extends AbstractModule { static Class computeServiceImpl = GceInstancesServiceImpl.class; protected final Settings settings; - protected final Logger logger = Loggers.getLogger(GceModule.class); + protected final Logger logger = LogManager.getLogger(GceModule.class); public GceModule(Settings settings) { this.settings = settings; diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/network/GceNameResolver.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/network/GceNameResolver.java index 46c4ac7bac547..e53a1e241bb1d 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/network/GceNameResolver.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/network/GceNameResolver.java @@ -22,9 +22,7 @@ import org.elasticsearch.cloud.gce.GceMetadataService; import org.elasticsearch.cloud.gce.util.Access; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.network.NetworkService.CustomNameResolver; -import org.elasticsearch.common.settings.Settings; import java.io.IOException; import java.net.InetAddress; @@ -39,7 +37,7 @@ *

  • _gce:hostname_
  • * */ -public class GceNameResolver extends AbstractComponent implements CustomNameResolver { +public class GceNameResolver implements CustomNameResolver { private final GceMetadataService gceMetadataService; @@ -73,8 +71,7 @@ private enum GceAddressResolverType { /** * Construct a {@link CustomNameResolver}. */ - public GceNameResolver(Settings settings, GceMetadataService gceMetadataService) { - super(settings); + public GceNameResolver(GceMetadataService gceMetadataService) { this.gceMetadataService = gceMetadataService; } diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java index 36f8aa36b34d0..2d1bb07b23909 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/discovery/gce/GceUnicastHostsProvider.java @@ -58,6 +58,7 @@ static final class Status { private static final String TERMINATED = "TERMINATED"; } + private final Settings settings; private final GceInstancesService gceInstancesService; private TransportService transportService; private NetworkService networkService; @@ -73,7 +74,7 @@ static final class Status { public GceUnicastHostsProvider(Settings settings, GceInstancesService gceInstancesService, TransportService transportService, NetworkService networkService) { - super(settings); + this.settings = settings; this.gceInstancesService = gceInstancesService; this.transportService = transportService; this.networkService = networkService; diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java index d95886e349463..af318af312753 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/plugin/discovery/gce/GceDiscoveryPlugin.java @@ -21,19 +21,19 @@ import com.google.api.client.http.HttpHeaders; import com.google.api.client.util.ClassInfo; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.common.Booleans; -import org.elasticsearch.core.internal.io.IOUtils; import org.apache.lucene.util.SetOnce; import org.elasticsearch.cloud.gce.GceInstancesService; import org.elasticsearch.cloud.gce.GceInstancesServiceImpl; import org.elasticsearch.cloud.gce.GceMetadataService; import org.elasticsearch.cloud.gce.network.GceNameResolver; import org.elasticsearch.cloud.gce.util.Access; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.Booleans; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.discovery.gce.GceUnicastHostsProvider; import org.elasticsearch.discovery.zen.UnicastHostsProvider; import org.elasticsearch.plugins.DiscoveryPlugin; @@ -57,7 +57,7 @@ public class GceDiscoveryPlugin extends Plugin implements DiscoveryPlugin, Close public static final String GCE = "gce"; protected final Settings settings; - private static final Logger logger = Loggers.getLogger(GceDiscoveryPlugin.class); + private static final Logger logger = LogManager.getLogger(GceDiscoveryPlugin.class); // stashed when created in order to properly close private final SetOnce gceInstancesService = new SetOnce<>(); @@ -96,7 +96,7 @@ public Map> getZenHostsProviders(Transpor @Override public NetworkService.CustomNameResolver getCustomNameResolver(Settings settings) { logger.debug("Register _gce_, _gce:xxx network names"); - return new GceNameResolver(settings, new GceMetadataService(settings)); + return new GceNameResolver(new GceMetadataService(settings)); } @Override diff --git a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceMockUtils.java b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceMockUtils.java index 3a34e3629db80..03525eb266ba7 100644 --- a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceMockUtils.java +++ b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceMockUtils.java @@ -26,18 +26,18 @@ import com.google.api.client.testing.http.MockHttpTransport; import com.google.api.client.testing.http.MockLowLevelHttpRequest; import com.google.api.client.testing.http.MockLowLevelHttpResponse; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.io.Streams; -import org.elasticsearch.common.logging.Loggers; import java.io.IOException; import java.io.InputStream; import java.net.URL; public class GceMockUtils { - protected static final Logger logger = Loggers.getLogger(GceMockUtils.class); + protected static final Logger logger = LogManager.getLogger(GceMockUtils.class); public static final String GCE_METADATA_URL = "http://metadata.google.internal/computeMetadata/v1/"; diff --git a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceNetworkTests.java b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceNetworkTests.java index 1fe1297904bda..94f2959917d5b 100644 --- a/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceNetworkTests.java +++ b/plugins/discovery-gce/src/test/java/org/elasticsearch/discovery/gce/GceNetworkTests.java @@ -107,7 +107,7 @@ private void resolveGce(String gceNetworkSetting, InetAddress[] expected) throws .build(); GceMetadataServiceMock mock = new GceMetadataServiceMock(nodeSettings); - NetworkService networkService = new NetworkService(Collections.singletonList(new GceNameResolver(nodeSettings, mock))); + NetworkService networkService = new NetworkService(Collections.singletonList(new GceNameResolver(mock))); try { InetAddress[] addresses = networkService.resolveBindHostAddresses( NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING.get(nodeSettings).toArray(Strings.EMPTY_ARRAY)); diff --git a/plugins/examples/painless-whitelist/src/test/resources/rest-api-spec/test/painless_whitelist/20_whitelist.yml b/plugins/examples/painless-whitelist/src/test/resources/rest-api-spec/test/painless_whitelist/20_whitelist.yml index bbb0b44ef1d45..b864edaa2a962 100644 --- a/plugins/examples/painless-whitelist/src/test/resources/rest-api-spec/test/painless_whitelist/20_whitelist.yml +++ b/plugins/examples/painless-whitelist/src/test/resources/rest-api-spec/test/painless_whitelist/20_whitelist.yml @@ -11,8 +11,8 @@ indices.refresh: {} - do: - index: test search: + index: test body: query: match_all: {} diff --git a/plugins/examples/painless-whitelist/src/test/resources/rest-api-spec/test/painless_whitelist/30_static.yml b/plugins/examples/painless-whitelist/src/test/resources/rest-api-spec/test/painless_whitelist/30_static.yml index b659263729607..1dbaf655bf501 100644 --- a/plugins/examples/painless-whitelist/src/test/resources/rest-api-spec/test/painless_whitelist/30_static.yml +++ b/plugins/examples/painless-whitelist/src/test/resources/rest-api-spec/test/painless_whitelist/30_static.yml @@ -11,8 +11,8 @@ indices.refresh: {} - do: - index: test search: + index: test body: query: match_all: {} diff --git a/plugins/examples/painless-whitelist/src/test/resources/rest-api-spec/test/painless_whitelist/40_instance.yml b/plugins/examples/painless-whitelist/src/test/resources/rest-api-spec/test/painless_whitelist/40_instance.yml index 6cb7e4f3d401d..87afe9602dc35 100644 --- a/plugins/examples/painless-whitelist/src/test/resources/rest-api-spec/test/painless_whitelist/40_instance.yml +++ b/plugins/examples/painless-whitelist/src/test/resources/rest-api-spec/test/painless_whitelist/40_instance.yml @@ -11,8 +11,8 @@ indices.refresh: {} - do: - index: test search: + index: test body: query: match_all: {} @@ -26,8 +26,8 @@ - match: { hits.hits.0.fields.sNum1.0: 2 } - do: - index: test search: + index: test body: query: match_all: {} diff --git a/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseReaderLazyLoader.java b/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseReaderLazyLoader.java index b420e8d0a1198..a185e038582e0 100644 --- a/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseReaderLazyLoader.java +++ b/plugins/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseReaderLazyLoader.java @@ -19,11 +19,11 @@ package org.elasticsearch.ingest.geoip; import com.maxmind.geoip2.DatabaseReader; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.core.internal.io.IOUtils; import org.apache.lucene.util.SetOnce; import org.elasticsearch.common.CheckedSupplier; -import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.core.internal.io.IOUtils; import java.io.Closeable; import java.io.IOException; @@ -34,7 +34,7 @@ */ final class DatabaseReaderLazyLoader implements Closeable { - private static final Logger LOGGER = Loggers.getLogger(DatabaseReaderLazyLoader.class); + private static final Logger LOGGER = LogManager.getLogger(DatabaseReaderLazyLoader.class); private final String databaseFileName; private final CheckedSupplier loader; diff --git a/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java b/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java index e710a5971173e..1eae00ea49087 100644 --- a/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java +++ b/plugins/mapper-size/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java @@ -63,7 +63,7 @@ public void testSizeEnabled() throws Exception { boolean points = false; for (IndexableField field : doc.rootDoc().getFields("_size")) { stored |= field.fieldType().stored(); - points |= field.fieldType().pointDimensionCount() > 0; + points |= field.fieldType().pointIndexDimensionCount() > 0; } assertTrue(stored); assertTrue(points); diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java index 08041d54cd14a..e7c8478c3ed39 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobContainer.java @@ -21,12 +21,12 @@ import com.microsoft.azure.storage.LocationMode; import com.microsoft.azure.storage.StorageException; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.support.AbstractBlobContainer; -import org.elasticsearch.common.logging.Loggers; import java.io.IOException; import java.io.InputStream; @@ -37,7 +37,7 @@ public class AzureBlobContainer extends AbstractBlobContainer { - private final Logger logger = Loggers.getLogger(AzureBlobContainer.class); + private final Logger logger = LogManager.getLogger(AzureBlobContainer.class); private final AzureBlobStore blobStore; private final String keyPath; diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java index d519af370eb9e..a6eb849aa17ac 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/blobstore/AzureBlobStore.java @@ -30,7 +30,6 @@ import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import java.io.IOException; import java.io.InputStream; import java.net.URISyntaxException; @@ -49,9 +48,8 @@ public class AzureBlobStore extends AbstractComponent implements BlobStore { private final String container; private final LocationMode locationMode; - public AzureBlobStore(RepositoryMetaData metadata, Settings settings, AzureStorageService service) + public AzureBlobStore(RepositoryMetaData metadata, AzureStorageService service) throws URISyntaxException, StorageException { - super(settings); this.container = Repository.CONTAINER_SETTING.get(metadata.settings()); this.clientName = Repository.CLIENT_NAME.get(metadata.settings()); this.service = service; diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java index d5542444ac7de..f4b7b132c9bc6 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageService.java @@ -71,7 +71,6 @@ public class AzureStorageService extends AbstractComponent { volatile Map storageSettings = emptyMap(); public AzureStorageService(Settings settings) { - super(settings); // eagerly load client settings so that secure settings are read final Map clientsSettings = AzureStorageSettings.load(settings); refreshAndClearCache(clientsSettings); diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettings.java b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettings.java index f1b3066432a88..1649e4dead8d7 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettings.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/cloud/azure/storage/AzureStorageSettings.java @@ -58,7 +58,7 @@ public final class AzureStorageSettings { key -> SecureSetting.secureString(key, null)); /** max_retries: Number of retries in case of Azure errors. Defaults to 3 (RetryPolicy.DEFAULT_CLIENT_RETRY_COUNT). */ - private static final Setting MAX_RETRIES_SETTING = + public static final Setting MAX_RETRIES_SETTING = Setting.affixKeySetting(AZURE_CLIENT_PREFIX_KEY, "max_retries", (key) -> Setting.intSetting(key, RetryPolicy.DEFAULT_CLIENT_RETRY_COUNT, Setting.Property.NodeScope), ACCOUNT_SETTING, KEY_SETTING); diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java b/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java index d2eebcff75450..05f07cc374c33 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/plugin/repository/azure/AzureRepositoryPlugin.java @@ -64,6 +64,7 @@ public List> getSettings() { AzureStorageSettings.KEY_SETTING, AzureStorageSettings.ENDPOINT_SUFFIX_SETTING, AzureStorageSettings.TIMEOUT_SETTING, + AzureStorageSettings.MAX_RETRIES_SETTING, AzureStorageSettings.PROXY_TYPE_SETTING, AzureStorageSettings.PROXY_HOST_SETTING, AzureStorageSettings.PROXY_PORT_SETTING diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java index 4182e8f1f752b..90f536385d523 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -127,7 +127,7 @@ protected BlobStore getBlobStore() { */ @Override protected AzureBlobStore createBlobStore() throws URISyntaxException, StorageException { - final AzureBlobStore blobStore = new AzureBlobStore(metadata, environment.settings(), storageService); + final AzureBlobStore blobStore = new AzureBlobStore(metadata, storageService); logger.debug((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( "using container [{}], chunk_size [{}], compress [{}], base_path [{}]", diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceTests.java index a92c13e4f85a2..f7860356693cc 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceTests.java @@ -26,10 +26,12 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; +import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.plugin.repository.azure.AzureRepositoryPlugin; import org.elasticsearch.test.ESTestCase; import java.io.IOException; +import java.io.UncheckedIOException; import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.Proxy; @@ -37,6 +39,7 @@ import java.net.URISyntaxException; import java.net.UnknownHostException; import java.nio.charset.StandardCharsets; +import java.util.Collections; import java.util.Map; import static org.elasticsearch.cloud.azure.storage.AzureStorageService.blobNameFromUri; @@ -79,10 +82,24 @@ public void testReadSecuredSettings() { assertThat(loadedSettings.get("azure3").getEndpointSuffix(), equalTo("my_endpoint_suffix")); } + private AzureRepositoryPlugin pluginWithSettingsValidation(Settings settings) { + final AzureRepositoryPlugin plugin = new AzureRepositoryPlugin(settings); + new SettingsModule(settings, plugin.getSettings(), Collections.emptyList(), Collections.emptySet()); + return plugin; + } + + private AzureStorageService storageServiceWithSettingsValidation(Settings settings) { + try (AzureRepositoryPlugin plugin = pluginWithSettingsValidation(settings)) { + return plugin.azureStoreService; + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + public void testCreateClientWithEndpointSuffix() throws IOException { final Settings settings = Settings.builder().setSecureSettings(buildSecureSettings()) .put("azure.client.azure1.endpoint_suffix", "my_endpoint_suffix").build(); - try (AzureRepositoryPlugin plugin = new AzureRepositoryPlugin(settings)) { + try (AzureRepositoryPlugin plugin = pluginWithSettingsValidation(settings)) { final AzureStorageService azureStorageService = plugin.azureStoreService; final CloudBlobClient client1 = azureStorageService.client("azure1").v1(); assertThat(client1.getEndpoint().toString(), equalTo("https://myaccount1.blob.my_endpoint_suffix")); @@ -104,7 +121,7 @@ public void testReinitClientSettings() throws IOException { secureSettings2.setString("azure.client.azure3.account", "myaccount23"); secureSettings2.setString("azure.client.azure3.key", encodeKey("mykey23")); final Settings settings2 = Settings.builder().setSecureSettings(secureSettings2).build(); - try (AzureRepositoryPlugin plugin = new AzureRepositoryPlugin(settings1)) { + try (AzureRepositoryPlugin plugin = pluginWithSettingsValidation(settings1)) { final AzureStorageService azureStorageService = plugin.azureStoreService; final CloudBlobClient client11 = azureStorageService.client("azure1").v1(); assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount11.blob.core.windows.net")); @@ -136,7 +153,7 @@ public void testReinitClientEmptySettings() throws IOException { secureSettings.setString("azure.client.azure1.account", "myaccount1"); secureSettings.setString("azure.client.azure1.key", encodeKey("mykey11")); final Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); - try (AzureRepositoryPlugin plugin = new AzureRepositoryPlugin(settings)) { + try (AzureRepositoryPlugin plugin = pluginWithSettingsValidation(settings)) { final AzureStorageService azureStorageService = plugin.azureStoreService; final CloudBlobClient client11 = azureStorageService.client("azure1").v1(); assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount1.blob.core.windows.net")); @@ -160,7 +177,7 @@ public void testReinitClientWrongSettings() throws IOException { secureSettings2.setString("azure.client.azure1.account", "myaccount1"); // missing key final Settings settings2 = Settings.builder().setSecureSettings(secureSettings2).build(); - try (AzureRepositoryPlugin plugin = new AzureRepositoryPlugin(settings1)) { + try (AzureRepositoryPlugin plugin = pluginWithSettingsValidation(settings1)) { final AzureStorageService azureStorageService = plugin.azureStoreService; final CloudBlobClient client11 = azureStorageService.client("azure1").v1(); assertThat(client11.getEndpoint().toString(), equalTo("https://myaccount1.blob.core.windows.net")); @@ -173,7 +190,7 @@ public void testReinitClientWrongSettings() throws IOException { } public void testGetSelectedClientNonExisting() { - final AzureStorageService azureStorageService = new AzureStorageService(buildSettings()); + final AzureStorageService azureStorageService = storageServiceWithSettingsValidation(buildSettings()); final SettingsException e = expectThrows(SettingsException.class, () -> azureStorageService.client("azure4")); assertThat(e.getMessage(), is("Unable to find client with name [azure4]")); } @@ -199,7 +216,7 @@ public void testGetSelectedClientDefaultTimeout() { .setSecureSettings(buildSecureSettings()) .put("azure.client.azure3.timeout", "30s") .build(); - final AzureStorageService azureStorageService = new AzureStorageService(timeoutSettings); + final AzureStorageService azureStorageService = storageServiceWithSettingsValidation(timeoutSettings); final CloudBlobClient client1 = azureStorageService.client("azure1").v1(); assertThat(client1.getDefaultRequestOptions().getTimeoutIntervalInMs(), nullValue()); final CloudBlobClient client3 = azureStorageService.client("azure3").v1(); @@ -207,13 +224,13 @@ public void testGetSelectedClientDefaultTimeout() { } public void testGetSelectedClientNoTimeout() { - final AzureStorageService azureStorageService = new AzureStorageService(buildSettings()); + final AzureStorageService azureStorageService = storageServiceWithSettingsValidation(buildSettings()); final CloudBlobClient client1 = azureStorageService.client("azure1").v1(); assertThat(client1.getDefaultRequestOptions().getTimeoutIntervalInMs(), is(nullValue())); } public void testGetSelectedClientBackoffPolicy() { - final AzureStorageService azureStorageService = new AzureStorageService(buildSettings()); + final AzureStorageService azureStorageService = storageServiceWithSettingsValidation(buildSettings()); final CloudBlobClient client1 = azureStorageService.client("azure1").v1(); assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), is(notNullValue())); assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), instanceOf(RetryExponentialRetry.class)); @@ -225,7 +242,7 @@ public void testGetSelectedClientBackoffPolicyNbRetries() { .put("azure.client.azure1.max_retries", 7) .build(); - final AzureStorageService azureStorageService = new AzureStorageService(timeoutSettings); + final AzureStorageService azureStorageService = storageServiceWithSettingsValidation(timeoutSettings); final CloudBlobClient client1 = azureStorageService.client("azure1").v1(); assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), is(notNullValue())); assertThat(client1.getDefaultRequestOptions().getRetryPolicyFactory(), instanceOf(RetryExponentialRetry.class)); @@ -235,7 +252,7 @@ public void testNoProxy() { final Settings settings = Settings.builder() .setSecureSettings(buildSecureSettings()) .build(); - final AzureStorageService mock = new AzureStorageService(settings); + final AzureStorageService mock = storageServiceWithSettingsValidation(settings); assertThat(mock.storageSettings.get("azure1").getProxy(), nullValue()); assertThat(mock.storageSettings.get("azure2").getProxy(), nullValue()); assertThat(mock.storageSettings.get("azure3").getProxy(), nullValue()); @@ -248,7 +265,7 @@ public void testProxyHttp() throws UnknownHostException { .put("azure.client.azure1.proxy.port", 8080) .put("azure.client.azure1.proxy.type", "http") .build(); - final AzureStorageService mock = new AzureStorageService(settings); + final AzureStorageService mock = storageServiceWithSettingsValidation(settings); final Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy(); assertThat(azure1Proxy, notNullValue()); @@ -268,7 +285,7 @@ public void testMultipleProxies() throws UnknownHostException { .put("azure.client.azure2.proxy.port", 8081) .put("azure.client.azure2.proxy.type", "http") .build(); - final AzureStorageService mock = new AzureStorageService(settings); + final AzureStorageService mock = storageServiceWithSettingsValidation(settings); final Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy(); assertThat(azure1Proxy, notNullValue()); assertThat(azure1Proxy.type(), is(Proxy.Type.HTTP)); @@ -287,7 +304,7 @@ public void testProxySocks() throws UnknownHostException { .put("azure.client.azure1.proxy.port", 8080) .put("azure.client.azure1.proxy.type", "socks") .build(); - final AzureStorageService mock = new AzureStorageService(settings); + final AzureStorageService mock = storageServiceWithSettingsValidation(settings); final Proxy azure1Proxy = mock.storageSettings.get("azure1").getProxy(); assertThat(azure1Proxy, notNullValue()); assertThat(azure1Proxy.type(), is(Proxy.Type.SOCKS)); @@ -302,7 +319,7 @@ public void testProxyNoHost() { .put("azure.client.azure1.proxy.port", 8080) .put("azure.client.azure1.proxy.type", randomFrom("socks", "http")) .build(); - final SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageService(settings)); + final SettingsException e = expectThrows(SettingsException.class, () -> storageServiceWithSettingsValidation(settings)); assertEquals("Azure Proxy type has been set but proxy host or port is not defined.", e.getMessage()); } @@ -313,7 +330,7 @@ public void testProxyNoPort() { .put("azure.client.azure1.proxy.type", randomFrom("socks", "http")) .build(); - final SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageService(settings)); + final SettingsException e = expectThrows(SettingsException.class, () -> storageServiceWithSettingsValidation(settings)); assertEquals("Azure Proxy type has been set but proxy host or port is not defined.", e.getMessage()); } @@ -324,7 +341,7 @@ public void testProxyNoType() { .put("azure.client.azure1.proxy.port", 8080) .build(); - final SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageService(settings)); + final SettingsException e = expectThrows(SettingsException.class, () -> storageServiceWithSettingsValidation(settings)); assertEquals("Azure Proxy port or host have been set but proxy type is not defined.", e.getMessage()); } @@ -336,7 +353,7 @@ public void testProxyWrongHost() { .put("azure.client.azure1.proxy.port", 8080) .build(); - final SettingsException e = expectThrows(SettingsException.class, () -> new AzureStorageService(settings)); + final SettingsException e = expectThrows(SettingsException.class, () -> storageServiceWithSettingsValidation(settings)); assertEquals("Azure proxy host is unknown.", e.getMessage()); } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreContainerTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreContainerTests.java index 85ca44205aa94..7248254b7ca7b 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreContainerTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreContainerTests.java @@ -36,7 +36,7 @@ protected BlobStore newBlobStore() throws IOException { try { RepositoryMetaData repositoryMetaData = new RepositoryMetaData("azure", "ittest", Settings.EMPTY); AzureStorageServiceMock client = new AzureStorageServiceMock(); - return new AzureBlobStore(repositoryMetaData, Settings.EMPTY, client); + return new AzureBlobStore(repositoryMetaData, client); } catch (URISyntaxException | StorageException e) { throw new IOException(e); } diff --git a/plugins/repository-gcs/qa/google-cloud-storage/src/test/resources/rest-api-spec/test/repository_gcs/10_repository.yml b/plugins/repository-gcs/qa/google-cloud-storage/src/test/resources/rest-api-spec/test/repository_gcs/10_repository.yml index c7b8949a11335..bb8f148fc8ab1 100644 --- a/plugins/repository-gcs/qa/google-cloud-storage/src/test/resources/rest-api-spec/test/repository_gcs/10_repository.yml +++ b/plugins/repository-gcs/qa/google-cloud-storage/src/test/resources/rest-api-spec/test/repository_gcs/10_repository.yml @@ -11,7 +11,7 @@ setup: settings: bucket: ${bucket} client: "integration_test" - base_path: ${base_path} + base_path: "${base_path}" --- "Snapshot/Restore with repository-gcs": @@ -23,7 +23,7 @@ setup: - match: { repository.settings.bucket : ${bucket} } - match: { repository.settings.client : "integration_test" } - - match: { repository.settings.base_path : ${base_path} } + - match: { repository.settings.base_path : "${base_path}" } # Index documents - do: diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java index 1e94467f5a57e..7894f9fc7df63 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java @@ -37,7 +37,6 @@ import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.internal.io.Streams; import java.io.ByteArrayOutputStream; @@ -69,8 +68,7 @@ class GoogleCloudStorageBlobStore extends AbstractComponent implements BlobStore private final String clientName; private final GoogleCloudStorageService storageService; - GoogleCloudStorageBlobStore(Settings settings, String bucketName, String clientName, GoogleCloudStorageService storageService) { - super(settings); + GoogleCloudStorageBlobStore(String bucketName, String clientName, GoogleCloudStorageService storageService) { this.bucketName = bucketName; this.clientName = clientName; this.storageService = storageService; diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStoragePlugin.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStoragePlugin.java index 12e7fd26ff565..3186d2547a327 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStoragePlugin.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStoragePlugin.java @@ -38,14 +38,14 @@ public class GoogleCloudStoragePlugin extends Plugin implements RepositoryPlugin final GoogleCloudStorageService storageService; public GoogleCloudStoragePlugin(final Settings settings) { - this.storageService = createStorageService(settings); + this.storageService = createStorageService(); // eagerly load client settings so that secure settings are readable (not closed) reload(settings); } // overridable for tests - protected GoogleCloudStorageService createStorageService(Settings settings) { - return new GoogleCloudStorageService(settings); + protected GoogleCloudStorageService createStorageService() { + return new GoogleCloudStorageService(); } @Override diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java index f3a93c8647cf4..379fc10bc86f0 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; @@ -78,6 +79,7 @@ class GoogleCloudStorageRepository extends BlobStoreRepository { static final Setting HTTP_CONNECT_TIMEOUT = timeSetting("http.connect_timeout", NO_TIMEOUT, Property.NodeScope, Property.Dynamic); + private final Settings settings; private final GoogleCloudStorageService storageService; private final BlobPath basePath; private final boolean compress; @@ -89,6 +91,7 @@ class GoogleCloudStorageRepository extends BlobStoreRepository { NamedXContentRegistry namedXContentRegistry, GoogleCloudStorageService storageService) { super(metadata, environment.settings(), namedXContentRegistry); + this.settings = environment.settings(); this.storageService = storageService; String basePath = BASE_PATH.get(metadata.settings()); @@ -132,7 +135,7 @@ class GoogleCloudStorageRepository extends BlobStoreRepository { @Override protected GoogleCloudStorageBlobStore createBlobStore() { - return new GoogleCloudStorageBlobStore(settings, bucket, clientName, storageService); + return new GoogleCloudStorageBlobStore(bucket, clientName, storageService); } @Override diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java index 55c9573ce9cd7..37b40cb2cbf79 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java @@ -32,7 +32,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.LazyInitializable; @@ -70,10 +69,6 @@ public class GoogleCloudStorageService extends AbstractComponent { @Deprecated private TimeValue readTimeout; - public GoogleCloudStorageService(final Settings settings) { - super(settings); - } - /** * Refreshes the client settings and clears the client cache. Subsequent calls to * {@code GoogleCloudStorageService#client} will return new clients constructed diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java index 0cc1243f28311..2f23011d4d9b7 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreContainerTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.repositories.gcs; import org.elasticsearch.common.blobstore.BlobStore; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.ESBlobStoreContainerTestCase; import java.util.Locale; @@ -42,6 +41,6 @@ protected BlobStore newBlobStore() { } catch (final Exception e) { throw new RuntimeException(e); } - return new GoogleCloudStorageBlobStore(Settings.EMPTY, bucketName, clientName, storageService); + return new GoogleCloudStorageBlobStore(bucketName, clientName, storageService); } } diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java index 6d5c1bbf85310..db166a228b576 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java @@ -79,17 +79,12 @@ public MockGoogleCloudStoragePlugin(final Settings settings) { } @Override - protected GoogleCloudStorageService createStorageService(Settings settings) { - return new MockGoogleCloudStorageService(settings); + protected GoogleCloudStorageService createStorageService() { + return new MockGoogleCloudStorageService(); } } public static class MockGoogleCloudStorageService extends GoogleCloudStorageService { - - MockGoogleCloudStorageService(Settings settings) { - super(settings); - } - @Override public Storage client(String clientName) { return new MockStorage(BUCKET, blobs); diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreTests.java index 4634bd3274a70..e2adfed94bbc9 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.repositories.gcs; import org.elasticsearch.common.blobstore.BlobStore; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.ESBlobStoreTestCase; import java.util.Locale; @@ -42,6 +41,6 @@ protected BlobStore newBlobStore() { } catch (final Exception e) { throw new RuntimeException(e); } - return new GoogleCloudStorageBlobStore(Settings.EMPTY, bucketName, clientName, storageService); + return new GoogleCloudStorageBlobStore(bucketName, clientName, storageService); } } diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepositoryDeprecationTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepositoryDeprecationTests.java index 0e11286181e9a..c45947bc7feb5 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepositoryDeprecationTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepositoryDeprecationTests.java @@ -43,7 +43,7 @@ public void testDeprecatedSettings() throws Exception { final RepositoryMetaData repositoryMetaData = new RepositoryMetaData("test", "gcs", repositorySettings); final Environment environment = TestEnvironment.newEnvironment(Settings.builder().put("path.home", createTempDir()).build()); new GoogleCloudStorageRepository(repositoryMetaData, environment, NamedXContentRegistry.EMPTY, - new GoogleCloudStorageService(Settings.EMPTY) { + new GoogleCloudStorageService() { @Override public Storage client(String clientName) throws IOException { return new MockStorage("test", new ConcurrentHashMap<>()); diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java index 5fa2f3c83a2f2..e99310645999e 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java @@ -63,7 +63,7 @@ public void testClientInitializer() throws Exception { .put(GoogleCloudStorageClientSettings.ENDPOINT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), endpoint) .put(GoogleCloudStorageClientSettings.PROJECT_ID_SETTING.getConcreteSettingForNamespace(clientName).getKey(), projectIdName) .build(); - final GoogleCloudStorageService service = new GoogleCloudStorageService(settings); + final GoogleCloudStorageService service = new GoogleCloudStorageService(); service.refreshAndClearCache(GoogleCloudStorageClientSettings.load(settings)); final String deprecatedApplicationName = randomBoolean() ? null : "deprecated_" + randomAlphaOfLength(4); service.setOverrideApplicationName(deprecatedApplicationName); diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java index 75ac06dcfaa93..55641041a7051 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java @@ -18,15 +18,6 @@ */ package org.elasticsearch.repositories.hdfs; -import java.io.IOException; -import java.io.UncheckedIOException; -import java.net.InetAddress; -import java.net.URI; -import java.net.UnknownHostException; -import java.security.AccessController; -import java.security.PrivilegedAction; -import java.util.Locale; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.AbstractFileSystem; import org.apache.hadoop.fs.FileContext; @@ -36,13 +27,13 @@ import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.SpecialPermission; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.blobstore.BlobPath; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -50,9 +41,18 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import java.io.IOException; +import java.io.UncheckedIOException; +import java.net.InetAddress; +import java.net.URI; +import java.net.UnknownHostException; +import java.security.AccessController; +import java.security.PrivilegedAction; +import java.util.Locale; + public final class HdfsRepository extends BlobStoreRepository { - private static final Logger LOGGER = Loggers.getLogger(HdfsRepository.class); + private static final Logger LOGGER = LogManager.getLogger(HdfsRepository.class); private static final String CONF_SECURITY_PRINCIPAL = "security.principal"; diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java index 7715c7086a67b..e1ffc7a22d44c 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java @@ -32,7 +32,6 @@ import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.blobstore.BlobStoreException; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import java.io.IOException; @@ -55,9 +54,8 @@ class S3BlobStore extends AbstractComponent implements BlobStore { private final StorageClass storageClass; - S3BlobStore(Settings settings, S3Service service, String clientName, String bucket, boolean serverSideEncryption, + S3BlobStore(S3Service service, String clientName, String bucket, boolean serverSideEncryption, ByteSizeValue bufferSize, String cannedACL, String storageClass) { - super(settings); this.service = service; this.clientName = clientName; this.bucket = bucket; diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index a2f600a34ae2c..e0e34e40f3cf8 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -148,6 +148,8 @@ class S3Repository extends BlobStoreRepository { */ static final Setting BASE_PATH_SETTING = Setting.simpleString("base_path"); + private final Settings settings; + private final S3Service service; private final String bucket; @@ -178,6 +180,7 @@ class S3Repository extends BlobStoreRepository { final NamedXContentRegistry namedXContentRegistry, final S3Service service) { super(metadata, settings, namedXContentRegistry); + this.settings = settings; this.service = service; // Parse and validate the user's S3 Storage Class setting @@ -242,7 +245,7 @@ class S3Repository extends BlobStoreRepository { protected S3BlobStore createBlobStore() { if (reference != null) { assert S3ClientSettings.checkDeprecatedCredentials(metadata.settings()) : metadata.name(); - return new S3BlobStore(settings, service, clientName, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass) { + return new S3BlobStore(service, clientName, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass) { @Override public AmazonS3Reference clientReference() { if (reference.tryIncRef()) { @@ -253,7 +256,7 @@ public AmazonS3Reference clientReference() { } }; } else { - return new S3BlobStore(settings, service, clientName, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass); + return new S3BlobStore(service, clientName, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass); } } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java index da3219f2aef08..a2f9da5f846ef 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java @@ -64,7 +64,7 @@ public class S3RepositoryPlugin extends Plugin implements RepositoryPlugin, Relo protected final S3Service service; public S3RepositoryPlugin(final Settings settings) { - this(settings, new S3Service(settings)); + this(settings, new S3Service()); } S3RepositoryPlugin(final Settings settings, final S3Service service) { diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java index 7fd5659c8e1c4..e4d8abf65f836 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Service.java @@ -46,10 +46,6 @@ class S3Service extends AbstractComponent implements Closeable { private volatile Map clientsCache = emptyMap(); private volatile Map clientsSettings = emptyMap(); - S3Service(Settings settings) { - super(settings); - } - /** * Refreshes the settings for the AmazonS3 clients and clears the cache of * existing clients. New clients will be build using these new settings. Old diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java index 1c3c47943a06e..ec5d5578a03a2 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/RepositoryCredentialsTests.java @@ -68,11 +68,6 @@ public HeadBucketResult headBucket(HeadBucketRequest headBucketRequest) throws A } static final class ProxyS3Service extends S3Service { - - ProxyS3Service(Settings settings) { - super(settings); - } - @Override AmazonS3 buildClient(final S3ClientSettings clientSettings) { final AmazonS3 client = super.buildClient(clientSettings); @@ -82,7 +77,7 @@ AmazonS3 buildClient(final S3ClientSettings clientSettings) { } ProxyS3RepositoryPlugin(Settings settings) { - super(settings, new ProxyS3Service(settings)); + super(settings, new ProxyS3Service()); } @Override diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index bdb5f8468cee2..49c76e289d243 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -122,7 +122,7 @@ public TestS3RepositoryPlugin(final Settings settings) { @Override public Map getRepositories(final Environment env, final NamedXContentRegistry registry) { return Collections.singletonMap(S3Repository.TYPE, - (metadata) -> new S3Repository(metadata, env.settings(), registry, new S3Service(env.settings()) { + (metadata) -> new S3Repository(metadata, env.settings(), registry, new S3Service() { @Override AmazonS3 buildClient(S3ClientSettings clientSettings) { return new MockAmazonS3(blobs, bucket, serverSideEncryption, cannedACL, storageClass); diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreTests.java index 55df03ff34a3f..a44ad706b2361 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreTests.java @@ -24,7 +24,6 @@ import com.amazonaws.services.s3.model.StorageClass; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.blobstore.BlobStoreException; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.repositories.ESBlobStoreTestCase; @@ -117,13 +116,13 @@ public static S3BlobStore randomMockS3BlobStore() { final String theClientName = randomAlphaOfLength(4); final AmazonS3 client = new MockAmazonS3(new ConcurrentHashMap<>(), bucket, serverSideEncryption, cannedACL, storageClass); - final S3Service service = new S3Service(Settings.EMPTY) { + final S3Service service = new S3Service() { @Override public synchronized AmazonS3Reference client(String clientName) { assert theClientName.equals(clientName); return new AmazonS3Reference(client); } }; - return new S3BlobStore(Settings.EMPTY, service, theClientName, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass); + return new S3BlobStore(service, theClientName, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass); } } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java index b76af23402c05..ecfa8e8d97dc1 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryTests.java @@ -57,10 +57,6 @@ public void shutdown() { } private static class DummyS3Service extends S3Service { - DummyS3Service() { - super(Settings.EMPTY); - } - @Override public AmazonS3Reference client(String clientName) { return new AmazonS3Reference(new DummyS3Client()); diff --git a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml index 39ce992b7a58e..66f38ca043776 100644 --- a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml +++ b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/20_repository_permanent_credentials.yml @@ -12,7 +12,7 @@ setup: settings: bucket: ${permanent_bucket} client: integration_test_permanent - base_path: ${permanent_base_path} + base_path: "${permanent_base_path}" canned_acl: private storage_class: standard @@ -26,7 +26,7 @@ setup: - match: { repository_permanent.settings.bucket : ${permanent_bucket} } - match: { repository_permanent.settings.client : "integration_test_permanent" } - - match: { repository_permanent.settings.base_path : ${permanent_base_path} } + - match: { repository_permanent.settings.base_path : "${permanent_base_path}" } - match: { repository_permanent.settings.canned_acl : "private" } - match: { repository_permanent.settings.storage_class : "standard" } - is_false: repository_permanent.settings.access_key diff --git a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml index 497d85db752db..3f5685aa561fe 100644 --- a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml +++ b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/30_repository_temporary_credentials.yml @@ -12,7 +12,7 @@ setup: settings: bucket: ${temporary_bucket} client: integration_test_temporary - base_path: ${temporary_base_path} + base_path: "${temporary_base_path}" canned_acl: private storage_class: standard @@ -26,7 +26,7 @@ setup: - match: { repository_temporary.settings.bucket : ${temporary_bucket} } - match: { repository_temporary.settings.client : "integration_test_temporary" } - - match: { repository_temporary.settings.base_path : ${temporary_base_path} } + - match: { repository_temporary.settings.base_path : "${temporary_base_path}" } - match: { repository_temporary.settings.canned_acl : "private" } - match: { repository_temporary.settings.storage_class : "standard" } - is_false: repository_temporary.settings.access_key diff --git a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/40_repository_ec2_credentials.yml b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/40_repository_ec2_credentials.yml index 2df3b8290a19b..d021df267934d 100644 --- a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/40_repository_ec2_credentials.yml +++ b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/40_repository_ec2_credentials.yml @@ -12,7 +12,7 @@ setup: settings: bucket: ${ec2_bucket} client: integration_test_ec2 - base_path: ${ec2_base_path} + base_path: "${ec2_base_path}" canned_acl: private storage_class: standard @@ -26,7 +26,7 @@ setup: - match: { repository_ec2.settings.bucket : ${ec2_bucket} } - match: { repository_ec2.settings.client : "integration_test_ec2" } - - match: { repository_ec2.settings.base_path : ${ec2_base_path} } + - match: { repository_ec2.settings.base_path : "${ec2_base_path}" } - match: { repository_ec2.settings.canned_acl : "private" } - match: { repository_ec2.settings.storage_class : "standard" } - is_false: repository_ec2.settings.access_key diff --git a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/50_repository_ecs_credentials.yml b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/50_repository_ecs_credentials.yml index 54929e6e3ad82..dec0476edc713 100644 --- a/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/50_repository_ecs_credentials.yml +++ b/plugins/repository-s3/src/test/resources/rest-api-spec/test/repository_s3/50_repository_ecs_credentials.yml @@ -12,7 +12,7 @@ setup: settings: bucket: ${ecs_bucket} client: integration_test_ecs - base_path: ${ecs_base_path} + base_path: "${ecs_base_path}" canned_acl: private storage_class: standard @@ -26,7 +26,7 @@ setup: - match: { repository_ecs.settings.bucket : ${ecs_bucket} } - match: { repository_ecs.settings.client : "integration_test_ecs" } - - match: { repository_ecs.settings.base_path : ${ecs_base_path} } + - match: { repository_ecs.settings.base_path : "${ecs_base_path}" } - match: { repository_ecs.settings.canned_acl : "private" } - match: { repository_ecs.settings.storage_class : "standard" } - is_false: repository_ecs.settings.access_key diff --git a/qa/full-cluster-restart/build.gradle b/qa/full-cluster-restart/build.gradle index adb6cad288930..b1f5aff86b096 100644 --- a/qa/full-cluster-restart/build.gradle +++ b/qa/full-cluster-restart/build.gradle @@ -101,7 +101,7 @@ test.enabled = false // no unit tests for rolling upgrades, only the rest integr // basic integ tests includes testing bwc against the most recent version task integTest { if (project.bwc_tests_enabled) { - for (final def version : bwcVersions.snapshotsIndexCompatible) { + for (final def version : bwcVersions.unreleasedIndexCompatible) { dependsOn "v${version}#bwcTest" } } diff --git a/qa/mixed-cluster/build.gradle b/qa/mixed-cluster/build.gradle index 576601166a035..d926317e43962 100644 --- a/qa/mixed-cluster/build.gradle +++ b/qa/mixed-cluster/build.gradle @@ -68,7 +68,7 @@ test.enabled = false // no unit tests for rolling upgrades, only the rest integr // basic integ tests includes testing bwc against the most recent version task integTest { if (project.bwc_tests_enabled) { - for (final def version : bwcVersions.snapshotsWireCompatible) { + for (final def version : bwcVersions.unreleasedWireCompatible) { dependsOn "v${version}#bwcTest" } } diff --git a/qa/rolling-upgrade/build.gradle b/qa/rolling-upgrade/build.gradle index 83440ceeeb602..be8269ae941da 100644 --- a/qa/rolling-upgrade/build.gradle +++ b/qa/rolling-upgrade/build.gradle @@ -152,7 +152,7 @@ test.enabled = false // no unit tests for rolling upgrades, only the rest integr // basic integ tests includes testing bwc against the most recent version task integTest { if (project.bwc_tests_enabled) { - for (final def version : bwcVersions.snapshotsWireCompatible) { + for (final def version : bwcVersions.unreleasedWireCompatible) { dependsOn "v${version}#bwcTest" } } diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml index ab33d60c66b0a..91f2cabb73305 100644 --- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/upgraded_cluster/10_basic.yml @@ -107,6 +107,9 @@ --- "Find a task result record from the old cluster": + - skip: + features: headers + - do: search: index: .tasks diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java index 6236a4a4d6e31..6878f3b5f34ee 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java @@ -300,7 +300,7 @@ public Collection createComponents(Client client, ClusterService cluster ResourceWatcherService resourceWatcherService, ScriptService scriptService, NamedXContentRegistry xContentRegistry, Environment environment, NodeEnvironment nodeEnvironment, NamedWriteableRegistry namedWriteableRegistry) { - loggingFilter.set(new LoggingFilter(clusterService.getSettings(), threadPool)); + loggingFilter.set(new LoggingFilter(threadPool)); return Collections.emptyList(); } @@ -315,8 +315,7 @@ public static class LoggingFilter extends ActionFilter.Simple { private final ThreadPool threadPool; - public LoggingFilter(Settings settings, ThreadPool pool) { - super(settings); + public LoggingFilter(ThreadPool pool) { this.threadPool = pool; } diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/TestDeprecationHeaderRestAction.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/TestDeprecationHeaderRestAction.java index ad38d89654a21..27b2f18b0919f 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/TestDeprecationHeaderRestAction.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/TestDeprecationHeaderRestAction.java @@ -58,7 +58,7 @@ public class TestDeprecationHeaderRestAction extends BaseRestHandler { Setting.boolSetting("test.setting.not_deprecated", false, Setting.Property.NodeScope, Setting.Property.Dynamic); - private static final Map> SETTINGS; + private static final Map> SETTINGS_MAP; static { Map> settingsMap = new HashMap<>(3); @@ -67,14 +67,17 @@ public class TestDeprecationHeaderRestAction extends BaseRestHandler { settingsMap.put(TEST_DEPRECATED_SETTING_TRUE2.getKey(), TEST_DEPRECATED_SETTING_TRUE2); settingsMap.put(TEST_NOT_DEPRECATED_SETTING.getKey(), TEST_NOT_DEPRECATED_SETTING); - SETTINGS = Collections.unmodifiableMap(settingsMap); + SETTINGS_MAP = Collections.unmodifiableMap(settingsMap); } public static final String DEPRECATED_ENDPOINT = "[/_test_cluster/deprecated_settings] exists for deprecated tests"; public static final String DEPRECATED_USAGE = "[deprecated_settings] usage is deprecated. use [settings] instead"; + private final Settings settings; + public TestDeprecationHeaderRestAction(Settings settings, RestController controller) { super(settings); + this.settings = settings; controller.registerAsDeprecatedHandler(RestRequest.Method.GET, "/_test_cluster/deprecated_settings", this, DEPRECATED_ENDPOINT, deprecationLogger); @@ -107,7 +110,7 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client builder.startObject().startArray("settings"); for (String setting : settings) { - builder.startObject().field(setting, SETTINGS.get(setting).getRaw(this.settings)).endObject(); + builder.startObject().field(setting, SETTINGS_MAP.get(setting).getRaw(this.settings)).endObject(); } builder.endArray().endObject(); channel.sendResponse(new BytesRestResponse(RestStatus.OK, builder)); diff --git a/qa/verify-version-constants/build.gradle b/qa/verify-version-constants/build.gradle index 0e4f29fa38ad0..0878934979978 100644 --- a/qa/verify-version-constants/build.gradle +++ b/qa/verify-version-constants/build.gradle @@ -61,7 +61,7 @@ test.enabled = false task integTest { if (project.bwc_tests_enabled) { - final def version = bwcVersions.snapshotsIndexCompatible.first() + final def version = bwcVersions.unreleasedIndexCompatible.first() dependsOn "v${version}#bwcTest" } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/count.json b/rest-api-spec/src/main/resources/rest-api-spec/api/count.json index 96fa4daf12b95..d14f4ab784a57 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/count.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/count.json @@ -20,6 +20,10 @@ "type" : "boolean", "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)" }, + "ignore_throttled": { + "type" : "boolean", + "description" : "Whether specified concrete, expanded or aliased indices should be ignored when throttled" + }, "allow_no_indices": { "type" : "boolean", "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json index 28f8a07db4cc0..12e13aace4ed0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json @@ -54,6 +54,10 @@ "type" : "boolean", "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)" }, + "ignore_throttled": { + "type" : "boolean", + "description" : "Whether specified concrete, expanded or aliased indices should be ignored when throttled" + }, "allow_no_indices": { "type" : "boolean", "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search_template.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search_template.json index a78295dd4f5a3..df1fc1b079923 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search_template.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search_template.json @@ -20,6 +20,10 @@ "type" : "boolean", "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)" }, + "ignore_throttled": { + "type" : "boolean", + "description" : "Whether specified concrete, expanded or aliased indices should be ignored when throttled" + }, "allow_no_indices": { "type" : "boolean", "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/get/70_source_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/get/70_source_filtering.yml index 05bc3fceef701..a65fb4677c8ff 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/get/70_source_filtering.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/get/70_source_filtering.yml @@ -71,7 +71,7 @@ "Deprecated _source_include and _source_exclude": - skip: - version: " - 6.99.99" + version: " - 6.5.99" reason: _source_include and _source_exclude are deprecated from 6.6.0 features: "warnings" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/10_basic.yml index 5c30bbc214387..c5d389e56bae3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/msearch/10_basic.yml @@ -66,8 +66,8 @@ setup: "Least impact smoke test": # only passing these parameters to make sure they are consumed - skip: - version: " - 6.99.99" - reason: "AwaitsFix'ing, see https://github.com/elastic/elasticsearch/issues/34827" + version: " - 6.4.99" + reason: "max_concurrent_shard_requests was added in 6.5.0" - do: msearch: max_concurrent_shard_requests: 1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/270_median_absolute_deviation_metric.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/270_median_absolute_deviation_metric.yml new file mode 100644 index 0000000000000..b0d44a8a096b0 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/270_median_absolute_deviation_metric.yml @@ -0,0 +1,143 @@ +setup: + - skip: + version: " - 6.5.99" + reason: "added in 6.6.0" + - do: + indices.create: + index: test + body: + settings: + number_of_replicas: 0 + mappings: + _doc: + properties: + int_field: + type: integer + double_field: + type: double + incomplete_field: + type: integer + - do: + bulk: + refresh: true + body: + - index: + _index: test + _type: _doc + - int_field: 100 + double_field: 100.0 + incomplete_field: 1000 + - index: + _index: test + _type: _doc + - int_field: 200 + double_field: 200.0 + incomplete_field: 2000 + - index: + _index: test + _type: _doc + - int_field: 300 + double_field: 300.0 + +--- +"basic test": + + - do: + search: + body: + aggs: + mad_int: + median_absolute_deviation: + field: int_field + mad_double: + median_absolute_deviation: + field: double_field + + - match: { hits.total: 3 } + - length: { hits.hits: 3 } + + - match: { aggregations.mad_int.value: 100 } + - match: { aggregations.mad_double.value: 100 } + +--- +"with setting compression": + + - do: + search: + body: + aggs: + mad_int: + median_absolute_deviation: + field: int_field + compression: 500 + mad_double: + median_absolute_deviation: + field: double_field + compression: 500 + + - match: { hits.total: 3 } + - length: { hits.hits: 3 } + + - match: { aggregations.mad_int.value: 100 } + - match: { aggregations.mad_double.value: 100 } + +--- +"no documents": + + - do: + search: + body: + query: + bool: + filter: + term: + non_existent_field: non_existent_value + aggs: + mad_no_docs: + median_absolute_deviation: + field: non_existent_field + + - match: { hits.total: 0 } + - length: { hits.hits: 0 } + + - is_false: aggregations.mad_no_docs.value + +--- +"missing value": + + - do: + search: + body: + aggs: + mad_missing: + median_absolute_deviation: + field: incomplete_field + missing: 3000 + + - match: { hits.total: 3 } + - length: { hits.hits: 3 } + + - match: { aggregations.mad_missing.value: 1000 } + +--- +"bad arguments": + + - do: + catch: /\[compression\] must be greater than 0. Found \[0.0\] in \[mad\]/ + search: + body: + aggs: + mad: + median_absolute_deviation: + field: int_field + compression: 0 + + - do: + catch: /\[compression\] must be greater than 0. Found \[-1.0\] in \[mad\]/ + search: + body: + aggs: + mad: + median_absolute_deviation: + field: int_field + compression: -1 diff --git a/server/licenses/lucene-analyzers-common-7.5.0.jar.sha1 b/server/licenses/lucene-analyzers-common-7.5.0.jar.sha1 deleted file mode 100644 index 644e74b25e959..0000000000000 --- a/server/licenses/lucene-analyzers-common-7.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e7fbdfc2297c3ff5d194d7bef95810504e52710e \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-7.6.0-snapshot-f9598f335b.jar.sha1 b/server/licenses/lucene-analyzers-common-7.6.0-snapshot-f9598f335b.jar.sha1 new file mode 100644 index 0000000000000..36e0c85b2b588 --- /dev/null +++ b/server/licenses/lucene-analyzers-common-7.6.0-snapshot-f9598f335b.jar.sha1 @@ -0,0 +1 @@ +57a504b6795f22a25b8a1f054d9943039a85a18d \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-7.5.0.jar.sha1 b/server/licenses/lucene-backward-codecs-7.5.0.jar.sha1 deleted file mode 100644 index 40b47aea708cd..0000000000000 --- a/server/licenses/lucene-backward-codecs-7.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3ebd10f4a1fe71a92b69c0d653136bcf9790a165 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-7.6.0-snapshot-f9598f335b.jar.sha1 b/server/licenses/lucene-backward-codecs-7.6.0-snapshot-f9598f335b.jar.sha1 new file mode 100644 index 0000000000000..146a816d44c76 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-7.6.0-snapshot-f9598f335b.jar.sha1 @@ -0,0 +1 @@ +77e50b66ce562a20b55a7580c2d5be6ee37ff23c \ No newline at end of file diff --git a/server/licenses/lucene-core-7.5.0.jar.sha1 b/server/licenses/lucene-core-7.5.0.jar.sha1 deleted file mode 100644 index d3e7ba985dfed..0000000000000 --- a/server/licenses/lucene-core-7.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -736e94305e2a0e803563c5b184877df5c7d4cb69 \ No newline at end of file diff --git a/server/licenses/lucene-core-7.6.0-snapshot-f9598f335b.jar.sha1 b/server/licenses/lucene-core-7.6.0-snapshot-f9598f335b.jar.sha1 new file mode 100644 index 0000000000000..d8d15caa0b4c3 --- /dev/null +++ b/server/licenses/lucene-core-7.6.0-snapshot-f9598f335b.jar.sha1 @@ -0,0 +1 @@ +ec080318251807172626c0ba8dc931276d52b15e \ No newline at end of file diff --git a/server/licenses/lucene-grouping-7.5.0.jar.sha1 b/server/licenses/lucene-grouping-7.5.0.jar.sha1 deleted file mode 100644 index e801ae199c658..0000000000000 --- a/server/licenses/lucene-grouping-7.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f326a63640a288c302c713fcf2965fdc827b3cca \ No newline at end of file diff --git a/server/licenses/lucene-grouping-7.6.0-snapshot-f9598f335b.jar.sha1 b/server/licenses/lucene-grouping-7.6.0-snapshot-f9598f335b.jar.sha1 new file mode 100644 index 0000000000000..7a8008878c2dc --- /dev/null +++ b/server/licenses/lucene-grouping-7.6.0-snapshot-f9598f335b.jar.sha1 @@ -0,0 +1 @@ +3855c79ab2ca091e84ba46d8326a149f1d67bb51 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-7.5.0.jar.sha1 b/server/licenses/lucene-highlighter-7.5.0.jar.sha1 deleted file mode 100644 index bd71986c8c24d..0000000000000 --- a/server/licenses/lucene-highlighter-7.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -59420a5e30f12885f160e49bb975ab6b5985bd3d \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-7.6.0-snapshot-f9598f335b.jar.sha1 b/server/licenses/lucene-highlighter-7.6.0-snapshot-f9598f335b.jar.sha1 new file mode 100644 index 0000000000000..46d63211696f0 --- /dev/null +++ b/server/licenses/lucene-highlighter-7.6.0-snapshot-f9598f335b.jar.sha1 @@ -0,0 +1 @@ +236febdc0f3211640b265d4a6a682f98481d0e20 \ No newline at end of file diff --git a/server/licenses/lucene-join-7.5.0.jar.sha1 b/server/licenses/lucene-join-7.5.0.jar.sha1 deleted file mode 100644 index f80a5f6fe678f..0000000000000 --- a/server/licenses/lucene-join-7.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -065d3c275b094383640d393579cdb7aff22bcd1d \ No newline at end of file diff --git a/server/licenses/lucene-join-7.6.0-snapshot-f9598f335b.jar.sha1 b/server/licenses/lucene-join-7.6.0-snapshot-f9598f335b.jar.sha1 new file mode 100644 index 0000000000000..19f31755df727 --- /dev/null +++ b/server/licenses/lucene-join-7.6.0-snapshot-f9598f335b.jar.sha1 @@ -0,0 +1 @@ +b5ce0f6d8a6730bbcfd187401168052bd2c8450b \ No newline at end of file diff --git a/server/licenses/lucene-memory-7.5.0.jar.sha1 b/server/licenses/lucene-memory-7.5.0.jar.sha1 deleted file mode 100644 index d535e0bae6459..0000000000000 --- a/server/licenses/lucene-memory-7.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b8cc0c311a823287264653fbd05f866e059d303c \ No newline at end of file diff --git a/server/licenses/lucene-memory-7.6.0-snapshot-f9598f335b.jar.sha1 b/server/licenses/lucene-memory-7.6.0-snapshot-f9598f335b.jar.sha1 new file mode 100644 index 0000000000000..69f88a20d3e96 --- /dev/null +++ b/server/licenses/lucene-memory-7.6.0-snapshot-f9598f335b.jar.sha1 @@ -0,0 +1 @@ +3794e1321c23187399c0b246548827a7613ea15c \ No newline at end of file diff --git a/server/licenses/lucene-misc-7.5.0.jar.sha1 b/server/licenses/lucene-misc-7.5.0.jar.sha1 deleted file mode 100644 index a3bd848e17905..0000000000000 --- a/server/licenses/lucene-misc-7.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -60d0a02b9297b5423d5400a6b0aa114915232b60 \ No newline at end of file diff --git a/server/licenses/lucene-misc-7.6.0-snapshot-f9598f335b.jar.sha1 b/server/licenses/lucene-misc-7.6.0-snapshot-f9598f335b.jar.sha1 new file mode 100644 index 0000000000000..6f3e0b71cc3a5 --- /dev/null +++ b/server/licenses/lucene-misc-7.6.0-snapshot-f9598f335b.jar.sha1 @@ -0,0 +1 @@ +605faa9cdb9ba356e289cb5be02fa687d5de1efb \ No newline at end of file diff --git a/server/licenses/lucene-queries-7.5.0.jar.sha1 b/server/licenses/lucene-queries-7.5.0.jar.sha1 deleted file mode 100644 index 1c6c12f5b563d..0000000000000 --- a/server/licenses/lucene-queries-7.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -bfbf786b75c60a25daf33d389efd6458e17218d9 \ No newline at end of file diff --git a/server/licenses/lucene-queries-7.6.0-snapshot-f9598f335b.jar.sha1 b/server/licenses/lucene-queries-7.6.0-snapshot-f9598f335b.jar.sha1 new file mode 100644 index 0000000000000..221e60a9fd14a --- /dev/null +++ b/server/licenses/lucene-queries-7.6.0-snapshot-f9598f335b.jar.sha1 @@ -0,0 +1 @@ +12ecd339212886c2b37934ef2a3af4c85d89b4e3 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-7.5.0.jar.sha1 b/server/licenses/lucene-queryparser-7.5.0.jar.sha1 deleted file mode 100644 index d5fb221371a2a..0000000000000 --- a/server/licenses/lucene-queryparser-7.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3eefb522e150f2ba0009df20f3bcfa91d60e7090 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-7.6.0-snapshot-f9598f335b.jar.sha1 b/server/licenses/lucene-queryparser-7.6.0-snapshot-f9598f335b.jar.sha1 new file mode 100644 index 0000000000000..000f3fbf36352 --- /dev/null +++ b/server/licenses/lucene-queryparser-7.6.0-snapshot-f9598f335b.jar.sha1 @@ -0,0 +1 @@ +a74e9bac2cd10c878cb6aa005753267958a52101 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-7.5.0.jar.sha1 b/server/licenses/lucene-sandbox-7.5.0.jar.sha1 deleted file mode 100644 index 85730c40bed38..0000000000000 --- a/server/licenses/lucene-sandbox-7.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9fa3bb1c81179e112a62c0db6749767127c616bd \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-7.6.0-snapshot-f9598f335b.jar.sha1 b/server/licenses/lucene-sandbox-7.6.0-snapshot-f9598f335b.jar.sha1 new file mode 100644 index 0000000000000..605843f5fd9c2 --- /dev/null +++ b/server/licenses/lucene-sandbox-7.6.0-snapshot-f9598f335b.jar.sha1 @@ -0,0 +1 @@ +a1bb75649b0252e4ae8f9140eb9a457b68e3f8aa \ No newline at end of file diff --git a/server/licenses/lucene-spatial-7.5.0.jar.sha1 b/server/licenses/lucene-spatial-7.5.0.jar.sha1 deleted file mode 100644 index ad600d067e3de..0000000000000 --- a/server/licenses/lucene-spatial-7.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8de64a6a8ad22b5849f2ab5f824917723de7a349 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-7.6.0-snapshot-f9598f335b.jar.sha1 b/server/licenses/lucene-spatial-7.6.0-snapshot-f9598f335b.jar.sha1 new file mode 100644 index 0000000000000..a11485f5d86c7 --- /dev/null +++ b/server/licenses/lucene-spatial-7.6.0-snapshot-f9598f335b.jar.sha1 @@ -0,0 +1 @@ +38ef3c295d4d8b79490ff8e9451e4e65f3e6212e \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-7.5.0.jar.sha1 b/server/licenses/lucene-spatial-extras-7.5.0.jar.sha1 deleted file mode 100644 index 0c3b0dd3a6b44..0000000000000 --- a/server/licenses/lucene-spatial-extras-7.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b0892bbc0dc16ca07cf98897df3b3e9ed3069615 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-7.6.0-snapshot-f9598f335b.jar.sha1 b/server/licenses/lucene-spatial-extras-7.6.0-snapshot-f9598f335b.jar.sha1 new file mode 100644 index 0000000000000..76518e44c5466 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-7.6.0-snapshot-f9598f335b.jar.sha1 @@ -0,0 +1 @@ +ba81a0520b8e085a7890d91c9a90343d21bbbc65 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-7.5.0.jar.sha1 b/server/licenses/lucene-spatial3d-7.5.0.jar.sha1 deleted file mode 100644 index 8ea67d1bde5f9..0000000000000 --- a/server/licenses/lucene-spatial3d-7.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -672920a7fb48624bcb84ce0ee67dd0c7bc080c94 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-7.6.0-snapshot-f9598f335b.jar.sha1 b/server/licenses/lucene-spatial3d-7.6.0-snapshot-f9598f335b.jar.sha1 new file mode 100644 index 0000000000000..4598466e048f7 --- /dev/null +++ b/server/licenses/lucene-spatial3d-7.6.0-snapshot-f9598f335b.jar.sha1 @@ -0,0 +1 @@ +edb2e853646d236154050fa8f1e68f908e73d10a \ No newline at end of file diff --git a/server/licenses/lucene-suggest-7.5.0.jar.sha1 b/server/licenses/lucene-suggest-7.5.0.jar.sha1 deleted file mode 100644 index 8de76642e9996..0000000000000 --- a/server/licenses/lucene-suggest-7.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -aa7d170871711ebd4dbd367d7b8ee0f1eb5e88b7 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-7.6.0-snapshot-f9598f335b.jar.sha1 b/server/licenses/lucene-suggest-7.6.0-snapshot-f9598f335b.jar.sha1 new file mode 100644 index 0000000000000..4013d790cfb1f --- /dev/null +++ b/server/licenses/lucene-suggest-7.6.0-snapshot-f9598f335b.jar.sha1 @@ -0,0 +1 @@ +ee715c9d8dc562b98572eb036b2aee356c23ae23 \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 3d82b51b7a633..996de8e2d6b35 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -128,6 +128,8 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_5_6_12 = new Version(V_5_6_12_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); public static final int V_5_6_13_ID = 5061399; public static final Version V_5_6_13 = new Version(V_5_6_13_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); + public static final int V_5_6_14_ID = 5061499; + public static final Version V_5_6_14 = new Version(V_5_6_14_ID, org.apache.lucene.util.Version.LUCENE_6_6_1); public static final int V_6_0_0_alpha1_ID = 6000001; public static final Version V_6_0_0_alpha1 = new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_0_0); public static final int V_6_0_0_alpha2_ID = 6000002; @@ -184,10 +186,12 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_6_4_2 = new Version(V_6_4_2_ID, org.apache.lucene.util.Version.LUCENE_7_4_0); public static final int V_6_4_3_ID = 6040399; public static final Version V_6_4_3 = new Version(V_6_4_3_ID, org.apache.lucene.util.Version.LUCENE_7_4_0); + public static final int V_6_4_4_ID = 6040499; + public static final Version V_6_4_4 = new Version(V_6_4_4_ID, org.apache.lucene.util.Version.LUCENE_7_4_0); public static final int V_6_5_0_ID = 6050099; public static final Version V_6_5_0 = new Version(V_6_5_0_ID, org.apache.lucene.util.Version.LUCENE_7_5_0); public static final int V_6_6_0_ID = 6060099; - public static final Version V_6_6_0 = new Version(V_6_6_0_ID, org.apache.lucene.util.Version.LUCENE_7_5_0); + public static final Version V_6_6_0 = new Version(V_6_6_0_ID, org.apache.lucene.util.Version.LUCENE_7_6_0); public static final Version CURRENT = V_6_6_0; @@ -206,6 +210,8 @@ public static Version fromId(int id) { return V_6_6_0; case V_6_5_0_ID: return V_6_5_0; + case V_6_4_4_ID: + return V_6_4_4; case V_6_4_3_ID: return V_6_4_3; case V_6_4_2_ID: @@ -256,6 +262,8 @@ public static Version fromId(int id) { return V_6_0_0_alpha2; case V_6_0_0_alpha1_ID: return V_6_0_0_alpha1; + case V_5_6_14_ID: + return V_5_6_14; case V_5_6_13_ID: return V_5_6_13; case V_5_6_12_ID: diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 1e25233ff51ea..ef2814c087343 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -387,7 +387,7 @@ public ActionModule(boolean transportClient, Settings settings, IndexNameExpress if (transportClient) { restController = null; } else { - restController = new RestController(settings, headers, restWrapper, nodeClient, circuitBreakerService, usageService); + restController = new RestController(headers, restWrapper, nodeClient, circuitBreakerService, usageService); } } diff --git a/server/src/main/java/org/elasticsearch/action/TransportActionNodeProxy.java b/server/src/main/java/org/elasticsearch/action/TransportActionNodeProxy.java index dc42da765c6ee..d91e962b961ec 100644 --- a/server/src/main/java/org/elasticsearch/action/TransportActionNodeProxy.java +++ b/server/src/main/java/org/elasticsearch/action/TransportActionNodeProxy.java @@ -20,7 +20,6 @@ package org.elasticsearch.action; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.transport.TransportRequestOptions; import org.elasticsearch.transport.TransportService; @@ -28,14 +27,13 @@ /** * A generic proxy that will execute the given action against a specific node. */ -public class TransportActionNodeProxy extends AbstractComponent { +public class TransportActionNodeProxy { private final TransportService transportService; private final GenericAction action; private final TransportRequestOptions transportOptions; public TransportActionNodeProxy(Settings settings, GenericAction action, TransportService transportService) { - super(settings); this.action = action; this.transportService = transportService; this.transportOptions = action.transportOptions(settings); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java index 9249550871c12..5bc1dfa33b28f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java @@ -67,7 +67,7 @@ public class IndicesAliasesRequest extends AcknowledgedRequest { + private final Settings settings; private final IndicesService indicesService; private final Environment environment; @@ -89,6 +90,7 @@ public TransportAnalyzeAction(Settings settings, ThreadPool threadPool, ClusterS IndexNameExpressionResolver indexNameExpressionResolver, Environment environment) { super(settings, AnalyzeAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, AnalyzeRequest::new, ThreadPool.Names.ANALYZE); + this.settings = settings; this.indicesService = indicesService; this.environment = environment; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequest.java index 249d22e7c5bd7..59300cfcecb3a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexRequest.java @@ -38,7 +38,7 @@ public class DeleteIndexRequest extends AcknowledgedRequest private String[] indices; // Delete index should work by default on both open and closed indices. - private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, true, true, true, false, false, true); + private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, true, true, true, false, false, true, false); public DeleteIndexRequest() { } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java index afbc9a554ed5e..1b385ed9d0dbc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java @@ -71,6 +71,10 @@ public final String toString() { return "[" + name + ": " + value + "]"; } + public T value() { + return value; + } + /** * Holder for index stats used to evaluate conditions */ diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java index fe5ad65c4799b..f36636594a4d6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java @@ -196,7 +196,7 @@ public boolean isDryRun() { return dryRun; } - Map getConditions() { + public Map getConditions() { return conditions; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java index b7e4294a5635c..356f805c24bd4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java @@ -68,8 +68,8 @@ public final class RolloverResponse extends ShardsAcknowledgedResponse implement RolloverResponse() { } - RolloverResponse(String oldIndex, String newIndex, Map conditionResults, - boolean dryRun, boolean rolledOver, boolean acknowledged, boolean shardsAcknowledged) { + public RolloverResponse(String oldIndex, String newIndex, Map conditionResults, + boolean dryRun, boolean rolledOver, boolean acknowledged, boolean shardsAcknowledged) { super(acknowledged, shardsAcknowledged); this.oldIndex = oldIndex; this.newIndex = newIndex; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java index 1734ad0514160..25b6ec0b88376 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -82,7 +82,7 @@ public TransportRolloverAction(Settings settings, TransportService transportServ this.createIndexService = createIndexService; this.indexAliasesService = indexAliasesService; this.client = client; - this.activeShardsObserver = new ActiveShardsObserver(settings, clusterService, threadPool); + this.activeShardsObserver = new ActiveShardsObserver(clusterService, threadPool); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java index 18c7d506c7275..96804248f62d9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java @@ -88,7 +88,7 @@ public String[] indices() { return indices; } - Settings settings() { + public Settings settings() { return settings; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeResponse.java index c116bd896c81f..ffe59f1e3ae18 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeResponse.java @@ -38,7 +38,7 @@ public final class ResizeResponse extends CreateIndexResponse { ResizeResponse() { } - ResizeResponse(boolean acknowledged, boolean shardsAcknowledged, String index) { + public ResizeResponse(boolean acknowledged, boolean shardsAcknowledged, String index) { super(acknowledged, shardsAcknowledged, index); } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BackoffPolicy.java b/server/src/main/java/org/elasticsearch/action/bulk/BackoffPolicy.java index 81084e22377e5..00462df4d895c 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BackoffPolicy.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BackoffPolicy.java @@ -34,7 +34,8 @@ * semantics: * *
      - *
    • #hasNext() determines whether the progression has more elements. Return true for infinite progressions
    • + *
    • #hasNext() determines whether the progression has more elements. Return true for infinite progressions + *
    • *
    • #next() determines the next element in the progression, i.e. the next wait time period
    • *
    * diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java index f11444dd02288..34c18029ead5d 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java @@ -219,12 +219,13 @@ public void close() { * Closes the processor. If flushing by time is enabled, then it's shutdown. Any remaining bulk actions are flushed. *

    * If concurrent requests are not enabled, returns {@code true} immediately. - * If concurrent requests are enabled, waits for up to the specified timeout for all bulk requests to complete then returns {@code true}, + * If concurrent requests are enabled, waits for up to the specified timeout for all bulk requests to complete then returns {@code true} * If the specified waiting time elapses before all bulk requests complete, {@code false} is returned. * * @param timeout The maximum time to wait for the bulk requests to complete * @param unit The time unit of the {@code timeout} argument - * @return {@code true} if all bulk requests completed and {@code false} if the waiting time elapsed before all the bulk requests completed + * @return {@code true} if all bulk requests completed and {@code false} if the waiting time elapsed before all the bulk requests + * completed * @throws InterruptedException If the current thread is interrupted */ public synchronized boolean awaitClose(long timeout, TimeUnit unit) throws InterruptedException { @@ -300,7 +301,8 @@ public BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nu * Adds the data from the bytes to be processed by the bulk processor */ public synchronized BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, - @Nullable String defaultPipeline, @Nullable Object payload, XContentType xContentType) throws Exception { + @Nullable String defaultPipeline, @Nullable Object payload, + XContentType xContentType) throws Exception { bulkRequest.add(data, defaultIndex, defaultType, null, null, null, defaultPipeline, payload, true, xContentType); executeIfNeeded(); return this; diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java index 9c6bcfc59ea36..a8d4e01a24924 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java @@ -376,11 +376,13 @@ public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Null } else if (PIPELINE.match(currentFieldName, parser.getDeprecationHandler())) { pipeline = parser.text(); } else if (FIELDS.match(currentFieldName, parser.getDeprecationHandler())) { - throw new IllegalArgumentException("Action/metadata line [" + line + "] contains a simple value for parameter [fields] while a list is expected"); + throw new IllegalArgumentException("Action/metadata line [" + line + "] contains a simple value for" + + " parameter [fields] while a list is expected"); } else if (SOURCE.match(currentFieldName, parser.getDeprecationHandler())) { fetchSourceContext = FetchSourceContext.fromXContent(parser); } else { - throw new IllegalArgumentException("Action/metadata line [" + line + "] contains an unknown parameter [" + currentFieldName + "]"); + throw new IllegalArgumentException("Action/metadata line [" + line + "] contains an unknown parameter [" + + currentFieldName + "]"); } } else if (token == XContentParser.Token.START_ARRAY) { if (FIELDS.match(currentFieldName, parser.getDeprecationHandler())) { @@ -388,21 +390,25 @@ public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Null List values = parser.list(); fields = values.toArray(new String[values.size()]); } else { - throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected a simple value for field [" + currentFieldName + "] but found [" + token + "]"); + throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected a simple " + + "value for field [" + currentFieldName + "] but found [" + token + "]"); } - } else if (token == XContentParser.Token.START_OBJECT && SOURCE.match(currentFieldName, parser.getDeprecationHandler())) { + } else if (token == XContentParser.Token.START_OBJECT && SOURCE.match(currentFieldName, + parser.getDeprecationHandler())) { fetchSourceContext = FetchSourceContext.fromXContent(parser); } else if (token != XContentParser.Token.VALUE_NULL) { - throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected a simple value for field [" + currentFieldName + "] but found [" + token + "]"); + throw new IllegalArgumentException("Malformed action/metadata line [" + line + + "], expected a simple value for field [" + currentFieldName + "] but found [" + token + "]"); } } } else if (token != XContentParser.Token.END_OBJECT) { - throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected " + XContentParser.Token.START_OBJECT - + " or " + XContentParser.Token.END_OBJECT + " but found [" + token + "]"); + throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected " + + XContentParser.Token.START_OBJECT + " or " + XContentParser.Token.END_OBJECT + " but found [" + token + "]"); } if ("delete".equals(action)) { - add(new DeleteRequest(index, type, id).routing(routing).parent(parent).version(version).versionType(versionType), payload); + add(new DeleteRequest(index, type, id).routing(routing).parent(parent).version(version) + .versionType(versionType), payload); } else { nextMarker = findNextMarker(marker, from, data, length); if (nextMarker == -1) { @@ -414,21 +420,21 @@ public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Null // of index request. if ("index".equals(action)) { if (opType == null) { - internalAdd(new IndexRequest(index, type, id).routing(routing).parent(parent).version(version).versionType(versionType) - .setPipeline(pipeline) + internalAdd(new IndexRequest(index, type, id).routing(routing).parent(parent).version(version) + .versionType(versionType).setPipeline(pipeline) .source(sliceTrimmingCarriageReturn(data, from, nextMarker,xContentType), xContentType), payload); } else { - internalAdd(new IndexRequest(index, type, id).routing(routing).parent(parent).version(version).versionType(versionType) - .create("create".equals(opType)).setPipeline(pipeline) + internalAdd(new IndexRequest(index, type, id).routing(routing).parent(parent).version(version) + .versionType(versionType).create("create".equals(opType)).setPipeline(pipeline) .source(sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType), xContentType), payload); } } else if ("create".equals(action)) { - internalAdd(new IndexRequest(index, type, id).routing(routing).parent(parent).version(version).versionType(versionType) - .create(true).setPipeline(pipeline) + internalAdd(new IndexRequest(index, type, id).routing(routing).parent(parent).version(version) + .versionType(versionType).create(true).setPipeline(pipeline) .source(sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType), xContentType), payload); } else if ("update".equals(action)) { - UpdateRequest updateRequest = new UpdateRequest(index, type, id).routing(routing).parent(parent).retryOnConflict(retryOnConflict) - .version(version).versionType(versionType) + UpdateRequest updateRequest = new UpdateRequest(index, type, id).routing(routing).parent(parent) + .retryOnConflict(retryOnConflict).version(version).versionType(versionType) .routing(routing) .parent(parent); // EMPTY is safe here because we never call namedObject diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java index 30bf2dc14773b..d474dcee3639b 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkResponse.java @@ -109,7 +109,8 @@ public String buildFailureMessage() { BulkItemResponse response = responses[i]; if (response.isFailed()) { sb.append("\n[").append(i) - .append("]: index [").append(response.getIndex()).append("], type [").append(response.getType()).append("], id [").append(response.getId()) + .append("]: index [").append(response.getIndex()).append("], type [") + .append(response.getType()).append("], id [").append(response.getId()) .append("], message [").append(response.getFailureMessage()).append("]"); } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 0d9daff392e2d..4ef96acf6809f 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -260,9 +260,11 @@ void createIndex(String index, TimeValue timeout, ActionListener responses, int idx, DocWriteRequest request, String index, Exception e) { + private boolean setResponseFailureIfIndexMatches(AtomicArray responses, int idx, DocWriteRequest request, + String index, Exception e) { if (index.equals(request.index())) { - responses.set(idx, new BulkItemResponse(idx, request.opType(), new BulkItemResponse.Failure(request.index(), request.type(), request.id(), e))); + responses.set(idx, new BulkItemResponse(idx, request.opType(), new BulkItemResponse.Failure(request.index(), request.type(), + request.id(), e))); return true; } return false; @@ -331,19 +333,23 @@ protected void doRun() throws Exception { indexRequest.process(indexCreated, mappingMd, concreteIndex.getName()); break; case UPDATE: - TransportUpdateAction.resolveAndValidateRouting(metaData, concreteIndex.getName(), (UpdateRequest) docWriteRequest); + TransportUpdateAction.resolveAndValidateRouting(metaData, concreteIndex.getName(), + (UpdateRequest) docWriteRequest); break; case DELETE: - docWriteRequest.routing(metaData.resolveWriteIndexRouting(docWriteRequest.parent(), docWriteRequest.routing(), docWriteRequest.index())); + docWriteRequest.routing(metaData.resolveWriteIndexRouting(docWriteRequest.parent(), docWriteRequest.routing(), + docWriteRequest.index())); // check if routing is required, if so, throw error if routing wasn't specified - if (docWriteRequest.routing() == null && metaData.routingRequired(concreteIndex.getName(), docWriteRequest.type())) { + if (docWriteRequest.routing() == null && metaData.routingRequired(concreteIndex.getName(), + docWriteRequest.type())) { throw new RoutingMissingException(concreteIndex.getName(), docWriteRequest.type(), docWriteRequest.id()); } break; default: throw new AssertionError("request type not supported: [" + docWriteRequest.opType() + "]"); } } catch (ElasticsearchParseException | IllegalArgumentException | RoutingMissingException e) { - BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex.getName(), docWriteRequest.type(), docWriteRequest.id(), e); + BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex.getName(), docWriteRequest.type(), + docWriteRequest.id(), e); BulkItemResponse bulkItemResponse = new BulkItemResponse(i, docWriteRequest.opType(), failure); responses.set(i, bulkItemResponse); // make sure the request gets never processed again @@ -359,13 +365,15 @@ protected void doRun() throws Exception { continue; } String concreteIndex = concreteIndices.getConcreteIndex(request.index()).getName(); - ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, request.id(), request.routing()).shardId(); + ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, request.id(), + request.routing()).shardId(); List shardRequests = requestsByShard.computeIfAbsent(shardId, shard -> new ArrayList<>()); shardRequests.add(new BulkItemRequest(i, request)); } if (requestsByShard.isEmpty()) { - listener.onResponse(new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTimeNanos))); + listener.onResponse(new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), + buildTookInMillis(startTimeNanos))); return; } @@ -411,7 +419,8 @@ public void onFailure(Exception e) { } private void finishHim() { - listener.onResponse(new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), buildTookInMillis(startTimeNanos))); + listener.onResponse(new BulkResponse(responses.toArray(new BulkItemResponse[responses.length()]), + buildTookInMillis(startTimeNanos))); } }); } @@ -539,7 +548,8 @@ void processBulkIndexIngestRequest(Task task, BulkRequest original, ActionListen } else { long ingestTookInMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - ingestStartTimeInNanos); BulkRequest bulkRequest = bulkRequestModifier.getBulkRequest(); - ActionListener actionListener = bulkRequestModifier.wrapActionListenerIfNeeded(ingestTookInMillis, listener); + ActionListener actionListener = bulkRequestModifier.wrapActionListenerIfNeeded(ingestTookInMillis, + listener); if (bulkRequest.requests().isEmpty()) { // at this stage, the transport bulk action can't deal with a bulk request with no requests, // so we stop and send an empty response back to the client. @@ -632,7 +642,8 @@ void markCurrentItemAsFailed(Exception e) { // 2) Add a bulk item failure for this request // 3) Continue with the next request in the bulk. failedSlots.set(currentSlot); - BulkItemResponse.Failure failure = new BulkItemResponse.Failure(indexRequest.index(), indexRequest.type(), indexRequest.id(), e); + BulkItemResponse.Failure failure = new BulkItemResponse.Failure(indexRequest.index(), indexRequest.type(), + indexRequest.id(), e); itemResponses.add(new BulkItemResponse(currentSlot, indexRequest.opType(), failure)); } @@ -645,7 +656,8 @@ static final class IngestBulkResponseListener implements ActionListener itemResponses; private final ActionListener actionListener; - IngestBulkResponseListener(long ingestTookInMillis, int[] originalSlots, List itemResponses, ActionListener actionListener) { + IngestBulkResponseListener(long ingestTookInMillis, int[] originalSlots, List itemResponses, + ActionListener actionListener) { this.ingestTookInMillis = ingestTookInMillis; this.itemResponses = itemResponses; this.actionListener = actionListener; diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 32a2d5f056af9..dc07caf63923e 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -97,7 +97,7 @@ public TransportShardBulkAction(Settings settings, TransportService transportSer } @Override - protected TransportRequestOptions transportOptions() { + protected TransportRequestOptions transportOptions(Settings settings) { return BulkAction.INSTANCE.transportOptions(settings); } diff --git a/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java b/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java index 50d1f4cb8e9c7..ef4adaae4b0dd 100644 --- a/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java +++ b/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.uid.Versions; @@ -45,7 +46,8 @@ * @see org.elasticsearch.client.Client#delete(DeleteRequest) * @see org.elasticsearch.client.Requests#deleteRequest(String) */ -public class DeleteRequest extends ReplicatedWriteRequest implements DocWriteRequest, CompositeIndicesRequest { +public class DeleteRequest extends ReplicatedWriteRequest + implements DocWriteRequest, CompositeIndicesRequest { private String type; private String id; @@ -83,14 +85,15 @@ public DeleteRequest(String index, String type, String id) { @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = super.validate(); - if (type == null) { + if (Strings.isEmpty(type)) { validationException = addValidationError("type is missing", validationException); } - if (id == null) { + if (Strings.isEmpty(id)) { validationException = addValidationError("id is missing", validationException); } - if (!versionType.validateVersionForWrites(version)) { - validationException = addValidationError("illegal version value [" + version + "] for version type [" + versionType.name() + "]", validationException); + if (versionType.validateVersionForWrites(version) == false) { + validationException = addValidationError("illegal version value [" + version + "] for version type [" + + versionType.name() + "]", validationException); } if (versionType == VersionType.FORCE) { validationException = addValidationError("version type [force] may no longer be used", validationException); diff --git a/server/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java b/server/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java index 6fdf355c0670c..ee18dfd8d2140 100644 --- a/server/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java +++ b/server/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java @@ -34,6 +34,8 @@ import java.io.IOException; +import static org.elasticsearch.action.ValidateActions.addValidationError; + /** * Explain request encapsulating the explain query and document identifier to get an explanation for. */ @@ -152,11 +154,11 @@ public ExplainRequest filteringAlias(AliasFilter filteringAlias) { @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = super.validateNonNullIndex(); - if (type == null) { - validationException = ValidateActions.addValidationError("type is missing", validationException); + if (Strings.isEmpty(type)) { + validationException = addValidationError("type is missing", validationException); } - if (id == null) { - validationException = ValidateActions.addValidationError("id is missing", validationException); + if (Strings.isEmpty(id)) { + validationException = addValidationError("id is missing", validationException); } if (query == null) { validationException = ValidateActions.addValidationError("query is missing", validationException); diff --git a/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java b/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java index b539f20dd96e4..7e6c18c3e5cfb 100644 --- a/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java +++ b/server/src/main/java/org/elasticsearch/action/explain/TransportExplainAction.java @@ -114,7 +114,8 @@ protected ExplainResponse shardOperation(ExplainRequest request, ShardId shardId // Advantage is that we're not opening a second searcher to retrieve the _source. Also // because we are working in the same searcher in engineGetResult we can be sure that a // doc isn't deleted between the initial get and this call. - GetResult getResult = context.indexShard().getService().get(result, request.id(), request.type(), request.storedFields(), request.fetchSourceContext()); + GetResult getResult = context.indexShard().getService().get(result, request.id(), request.type(), request.storedFields(), + request.fetchSourceContext()); return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), true, explanation, getResult); } else { return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), true, explanation); @@ -134,7 +135,8 @@ protected ExplainResponse newResponse() { @Override protected ShardIterator shards(ClusterState state, InternalRequest request) { return clusterService.operationRouting().getShards( - clusterService.state(), request.concreteIndex(), request.request().id(), request.request().routing(), request.request().preference() + clusterService.state(), request.concreteIndex(), request.request().id(), request.request().routing(), + request.request().preference() ); } diff --git a/server/src/main/java/org/elasticsearch/action/get/GetRequest.java b/server/src/main/java/org/elasticsearch/action/get/GetRequest.java index 7a9b4ae18d39b..998899446e802 100644 --- a/server/src/main/java/org/elasticsearch/action/get/GetRequest.java +++ b/server/src/main/java/org/elasticsearch/action/get/GetRequest.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.ValidateActions; import org.elasticsearch.action.support.single.shard.SingleShardRequest; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.uid.Versions; @@ -32,6 +33,8 @@ import java.io.IOException; +import static org.elasticsearch.action.ValidateActions.addValidationError; + /** * A request to get a document (its source) from an index based on its type (optional) and id. Best created using * {@link org.elasticsearch.client.Requests#getRequest(String)}. @@ -91,15 +94,15 @@ public GetRequest(String index, String type, String id) { @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = super.validateNonNullIndex(); - if (type == null) { - validationException = ValidateActions.addValidationError("type is missing", validationException); + if (Strings.isEmpty(type)) { + validationException = addValidationError("type is missing", validationException); } - if (id == null) { - validationException = ValidateActions.addValidationError("id is missing", validationException); + if (Strings.isEmpty(id)) { + validationException = addValidationError("id is missing", validationException); } - if (!versionType.validateVersionForReads(version)) { - validationException = ValidateActions.addValidationError("illegal version value [" + version + "] for version type [" + versionType.name() + "]", - validationException); + if (versionType.validateVersionForReads(version) == false) { + validationException = ValidateActions.addValidationError("illegal version value [" + version + "] for version type [" + + versionType.name() + "]", validationException); } if (versionType == VersionType.FORCE) { validationException = ValidateActions.addValidationError("version type [force] may no longer be used", validationException); diff --git a/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java b/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java index 6246ca38ef75d..2244e6c158369 100644 --- a/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java +++ b/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java @@ -372,7 +372,8 @@ public MultiGetRequest add(@Nullable String defaultIndex, @Nullable String defau currentFieldName = parser.currentName(); } else if (token == Token.START_ARRAY) { if ("docs".equals(currentFieldName)) { - parseDocuments(parser, this.items, defaultIndex, defaultType, defaultFields, defaultFetchSource, defaultRouting, allowExplicitIndex); + parseDocuments(parser, this.items, defaultIndex, defaultType, defaultFields, defaultFetchSource, defaultRouting, + allowExplicitIndex); } else if ("ids".equals(currentFieldName)) { parseIds(parser, this.items, defaultIndex, defaultType, defaultFields, defaultFetchSource, defaultRouting); } else { @@ -396,7 +397,9 @@ public MultiGetRequest add(@Nullable String defaultIndex, @Nullable String defau return this; } - private static void parseDocuments(XContentParser parser, List items, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, @Nullable String defaultRouting, boolean allowExplicitIndex) throws IOException { + private static void parseDocuments(XContentParser parser, List items, @Nullable String defaultIndex, @Nullable String defaultType, + @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, + @Nullable String defaultRouting, boolean allowExplicitIndex) throws IOException { String currentFieldName = null; Token token; while ((token = parser.nextToken()) != Token.END_ARRAY) { @@ -510,18 +513,22 @@ private static void parseDocuments(XContentParser parser, List items, @Nul } else { aFields = defaultFields; } - items.add(new Item(index, type, id).routing(routing).storedFields(aFields).parent(parent).version(version).versionType(versionType) + items.add(new Item(index, type, id).routing(routing).storedFields(aFields).parent(parent).version(version) + .versionType(versionType) .fetchSourceContext(fetchSourceContext == FetchSourceContext.FETCH_SOURCE ? defaultFetchSource : fetchSourceContext)); } } - public static void parseIds(XContentParser parser, List items, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, @Nullable String defaultRouting) throws IOException { + public static void parseIds(XContentParser parser, List items, @Nullable String defaultIndex, @Nullable String defaultType, + @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, + @Nullable String defaultRouting) throws IOException { Token token; while ((token = parser.nextToken()) != Token.END_ARRAY) { if (!token.isValue()) { throw new IllegalArgumentException("ids array element should only contain ids"); } - items.add(new Item(defaultIndex, defaultType, parser.text()).storedFields(defaultFields).fetchSourceContext(defaultFetchSource).routing(defaultRouting)); + items.add(new Item(defaultIndex, defaultType, parser.text()).storedFields(defaultFields).fetchSourceContext(defaultFetchSource) + .routing(defaultRouting)); } } diff --git a/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java b/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java index f27012ef8220e..93d1e882d58e6 100644 --- a/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java +++ b/server/src/main/java/org/elasticsearch/action/get/TransportGetAction.java @@ -61,14 +61,16 @@ protected boolean resolveIndex(GetRequest request) { @Override protected ShardIterator shards(ClusterState state, InternalRequest request) { return clusterService.operationRouting() - .getShards(clusterService.state(), request.concreteIndex(), request.request().id(), request.request().routing(), request.request().preference()); + .getShards(clusterService.state(), request.concreteIndex(), request.request().id(), request.request().routing(), + request.request().preference()); } @Override protected void resolveRequest(ClusterState state, InternalRequest request) { IndexMetaData indexMeta = state.getMetaData().index(request.concreteIndex()); // update the routing (request#index here is possibly an alias) - request.request().routing(state.metaData().resolveIndexRouting(request.request().parent(), request.request().routing(), request.request().index())); + request.request().routing(state.metaData().resolveIndexRouting(request.request().parent(), request.request().routing(), + request.request().index())); // Fail fast on the node that received the request. if (request.request().routing() == null && state.getMetaData().routingRequired(request.concreteIndex(), request.request().type())) { throw new RoutingMissingException(request.concreteIndex(), request.request().type(), request.request().id()); diff --git a/server/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java b/server/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java index 7a7c02ad476e7..46d0b374a59c1 100644 --- a/server/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java +++ b/server/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java @@ -86,8 +86,8 @@ protected MultiGetShardResponse shardOperation(MultiGetShardRequest request, Sha for (int i = 0; i < request.locations.size(); i++) { MultiGetRequest.Item item = request.items.get(i); try { - GetResult getResult = indexShard.getService().get(item.type(), item.id(), item.storedFields(), request.realtime(), item.version(), - item.versionType(), item.fetchSourceContext()); + GetResult getResult = indexShard.getService().get(item.type(), item.id(), item.storedFields(), request.realtime(), + item.version(), item.versionType(), item.fetchSourceContext()); response.add(request.locations.get(i), new GetResponse(getResult)); } catch (RuntimeException e) { if (TransportActions.isShardNotAvailableException(e)) { diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java index 7240b62876c05..57bc50ac36211 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -156,12 +156,14 @@ public ActionRequestValidationException validate() { final long resolvedVersion = resolveVersionDefaults(); if (opType() == OpType.CREATE) { if (versionType != VersionType.INTERNAL) { - validationException = addValidationError("create operations only support internal versioning. use index instead", validationException); + validationException = addValidationError("create operations only support internal versioning. use index instead", + validationException); return validationException; } if (resolvedVersion != Versions.MATCH_DELETED) { - validationException = addValidationError("create operations do not support explicit versions. use index instead", validationException); + validationException = addValidationError("create operations do not support explicit versions. use index instead", + validationException); return validationException; } } @@ -171,7 +173,8 @@ public ActionRequestValidationException validate() { } if (!versionType.validateVersionForWrites(resolvedVersion)) { - validationException = addValidationError("illegal version value [" + resolvedVersion + "] for version type [" + versionType.name() + "]", validationException); + validationException = addValidationError("illegal version value [" + resolvedVersion + "] for version type [" + + versionType.name() + "]", validationException); } if (versionType == VersionType.FORCE) { @@ -362,7 +365,8 @@ public IndexRequest source(XContentType xContentType, Object... source) { throw new IllegalArgumentException("The number of object passed must be even but was [" + source.length + "]"); } if (source.length == 2 && source[0] instanceof BytesReference && source[1] instanceof Boolean) { - throw new IllegalArgumentException("you are using the removed method for source with bytes and unsafe flag, the unsafe flag was removed, please just use source(BytesReference)"); + throw new IllegalArgumentException("you are using the removed method for source with bytes and unsafe flag, the unsafe flag" + + " was removed, please just use source(BytesReference)"); } try { XContentBuilder builder = XContentFactory.contentBuilder(xContentType); diff --git a/server/src/main/java/org/elasticsearch/action/main/TransportMainAction.java b/server/src/main/java/org/elasticsearch/action/main/TransportMainAction.java index 368696a9553d9..75c2eee25e4fb 100644 --- a/server/src/main/java/org/elasticsearch/action/main/TransportMainAction.java +++ b/server/src/main/java/org/elasticsearch/action/main/TransportMainAction.java @@ -36,6 +36,7 @@ public class TransportMainAction extends HandledTransportAction { + private final String nodeName; private final ClusterService clusterService; @Inject @@ -43,16 +44,16 @@ public TransportMainAction(Settings settings, ThreadPool threadPool, TransportSe ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, ClusterService clusterService) { super(settings, MainAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, MainRequest::new); + this.nodeName = Node.NODE_NAME_SETTING.get(settings); this.clusterService = clusterService; } @Override protected void doExecute(MainRequest request, ActionListener listener) { ClusterState clusterState = clusterService.state(); - assert Node.NODE_NAME_SETTING.exists(settings); final boolean available = clusterState.getBlocks().hasGlobalBlock(RestStatus.SERVICE_UNAVAILABLE) == false; listener.onResponse( - new MainResponse(Node.NODE_NAME_SETTING.get(settings), Version.CURRENT, clusterState.getClusterName(), + new MainResponse(nodeName, Version.CURRENT, clusterState.getClusterName(), clusterState.metaData().clusterUUID(), Build.CURRENT, available)); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java index 772ce0fa4b056..0fa0f238699a6 100644 --- a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java @@ -58,7 +58,7 @@ public class MultiSearchRequest extends ActionRequest implements CompositeIndice private int maxConcurrentSearchRequests = 0; private List requests = new ArrayList<>(); - private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpenAndForbidClosed(); + private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpenAndForbidClosedIgnoreThrottled(); /** * Add a search request to execute. Note, the order is important, the search response will be returned in the @@ -288,7 +288,7 @@ public static byte[] writeMultiLineFormat(MultiSearchRequest multiSearchRequest, } return output.toByteArray(); } - + public static void writeSearchRequestParams(SearchRequest request, XContentBuilder xContentBuilder) throws IOException { xContentBuilder.startObject(); if (request.indices() != null) { diff --git a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequestBuilder.java index 6cebb73fb4fa6..f08fe1ed57890 100644 --- a/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/search/MultiSearchRequestBuilder.java @@ -28,6 +28,7 @@ */ public class MultiSearchRequestBuilder extends ActionRequestBuilder { + public MultiSearchRequestBuilder(ElasticsearchClient client, MultiSearchAction action) { super(client, action, new MultiSearchRequest()); } @@ -40,7 +41,8 @@ public MultiSearchRequestBuilder(ElasticsearchClient client, MultiSearchAction a * will not be used (if set). */ public MultiSearchRequestBuilder add(SearchRequest request) { - if (request.indicesOptions() == IndicesOptions.strictExpandOpenAndForbidClosed() && request().indicesOptions() != IndicesOptions.strictExpandOpenAndForbidClosed()) { + if (request.indicesOptions() == IndicesOptions.strictExpandOpenAndForbidClosed() + && request().indicesOptions() != IndicesOptions.strictExpandOpenAndForbidClosed()) { request.indicesOptions(request().indicesOptions()); } @@ -53,7 +55,8 @@ public MultiSearchRequestBuilder add(SearchRequest request) { * same order as the search requests. */ public MultiSearchRequestBuilder add(SearchRequestBuilder request) { - if (request.request().indicesOptions() == IndicesOptions.strictExpandOpenAndForbidClosed() && request().indicesOptions() != IndicesOptions.strictExpandOpenAndForbidClosed()) { + if (request.request().indicesOptions() == SearchRequest.DEFAULT_INDICES_OPTIONS + && request().indicesOptions() != SearchRequest.DEFAULT_INDICES_OPTIONS) { request.request().indicesOptions(request().indicesOptions()); } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index 1c67d7cbb6811..fb74b211c9011 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -33,8 +33,6 @@ import org.apache.lucene.search.TopFieldDocs; import org.apache.lucene.search.grouping.CollapseTopFieldDocs; import org.elasticsearch.common.collect.HppcMaps; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; @@ -68,7 +66,7 @@ import java.util.stream.Collectors; import java.util.stream.StreamSupport; -public final class SearchPhaseController extends AbstractComponent { +public final class SearchPhaseController { private static final ScoreDoc[] EMPTY_DOCS = new ScoreDoc[0]; @@ -76,11 +74,9 @@ public final class SearchPhaseController extends AbstractComponent { /** * Constructor. - * @param settings Node settings * @param reduceContextFunction A function that builds a context for the reduce of an {@link InternalAggregation} */ - public SearchPhaseController(Settings settings, Function reduceContextFunction) { - super(settings); + public SearchPhaseController(Function reduceContextFunction) { this.reduceContextFunction = reduceContextFunction; } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java index 105d5d3aa7b08..8aa849b509ce3 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -91,7 +91,7 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest private String[] types = Strings.EMPTY_ARRAY; - public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.strictExpandOpenAndForbidClosed(); + public static final IndicesOptions DEFAULT_INDICES_OPTIONS = IndicesOptions.strictExpandOpenAndForbidClosedIgnoreThrottled(); private IndicesOptions indicesOptions = DEFAULT_INDICES_OPTIONS; diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index 199f1104954c0..0eee0ad7ea624 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -27,11 +27,9 @@ import org.elasticsearch.action.support.HandledTransportAction.ChannelActionListener; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchService; @@ -70,7 +68,7 @@ * An encapsulation of {@link org.elasticsearch.search.SearchService} operations exposed through * transport. */ -public class SearchTransportService extends AbstractComponent { +public class SearchTransportService { public static final String FREE_CONTEXT_SCROLL_ACTION_NAME = "indices:data/read/search[free_context/scroll]"; public static final String FREE_CONTEXT_ACTION_NAME = "indices:data/read/search[free_context]"; @@ -88,9 +86,8 @@ public class SearchTransportService extends AbstractComponent { private final BiFunction responseWrapper; private final Map clientConnections = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency(); - public SearchTransportService(Settings settings, TransportService transportService, + public SearchTransportService(TransportService transportService, BiFunction responseWrapper) { - super(settings); this.transportService = transportService; this.responseWrapper = responseWrapper; } diff --git a/server/src/main/java/org/elasticsearch/action/support/ActionFilter.java b/server/src/main/java/org/elasticsearch/action/support/ActionFilter.java index 3e12d0cc84223..c23fe476dcc16 100644 --- a/server/src/main/java/org/elasticsearch/action/support/ActionFilter.java +++ b/server/src/main/java/org/elasticsearch/action/support/ActionFilter.java @@ -22,8 +22,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; /** @@ -47,12 +45,7 @@ void apply(Task * filter chain. This base class should serve any action filter implementations that doesn't require * to apply async filtering logic. */ - abstract class Simple extends AbstractComponent implements ActionFilter { - - protected Simple(Settings settings) { - super(settings); - } - + abstract class Simple implements ActionFilter { @Override public final void apply(Task task, String action, Request request, ActionListener listener, ActionFilterChain chain) { diff --git a/server/src/main/java/org/elasticsearch/action/support/ActiveShardsObserver.java b/server/src/main/java/org/elasticsearch/action/support/ActiveShardsObserver.java index 30d6461ef614b..b87ff9f7ec3bd 100644 --- a/server/src/main/java/org/elasticsearch/action/support/ActiveShardsObserver.java +++ b/server/src/main/java/org/elasticsearch/action/support/ActiveShardsObserver.java @@ -24,7 +24,6 @@ import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.threadpool.ThreadPool; @@ -42,8 +41,7 @@ public class ActiveShardsObserver extends AbstractComponent { private final ClusterService clusterService; private final ThreadPool threadPool; - public ActiveShardsObserver(final Settings settings, final ClusterService clusterService, final ThreadPool threadPool) { - super(settings); + public ActiveShardsObserver(final ClusterService clusterService, final ThreadPool threadPool) { this.clusterService = clusterService; this.threadPool = threadPool; } diff --git a/server/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java b/server/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java index 56d5bf206f370..583c34e09641a 100644 --- a/server/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java +++ b/server/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.support; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -28,7 +27,7 @@ /** * Helper for dealing with destructive operations and wildcard usage. */ -public final class DestructiveOperations extends AbstractComponent { +public final class DestructiveOperations { /** * Setting which controls whether wildcard usage (*, prefix*, _all) is allowed. @@ -38,7 +37,6 @@ public final class DestructiveOperations extends AbstractComponent { private volatile boolean destructiveRequiresName; public DestructiveOperations(Settings settings, ClusterSettings clusterSettings) { - super(settings); destructiveRequiresName = REQUIRES_NAME_SETTING.get(settings); clusterSettings.addSettingsUpdateConsumer(REQUIRES_NAME_SETTING, this::setDestructiveRequiresName); } diff --git a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java index c707fed6ddf7b..48a479d72b6a7 100644 --- a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java +++ b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java @@ -81,7 +81,8 @@ public enum Option { IGNORE_ALIASES, ALLOW_NO_INDICES, FORBID_ALIASES_TO_MULTIPLE_INDICES, - FORBID_CLOSED_INDICES; + FORBID_CLOSED_INDICES, + IGNORE_THROTTLED; public static final EnumSet