diff --git a/.github/workflows/version.yml b/.github/workflows/version.yml index eaf4d085c6946..fdf42a9a2731e 100644 --- a/.github/workflows/version.yml +++ b/.github/workflows/version.yml @@ -30,7 +30,11 @@ jobs: CURRENT_VERSION_ARRAY[2]=$((CURRENT_VERSION_ARRAY[2]+1)) NEXT_VERSION=$(IFS=. ; echo "${CURRENT_VERSION_ARRAY[*]:0:3}") NEXT_VERSION_UNDERSCORE=$(IFS=_ ; echo "V_${CURRENT_VERSION_ARRAY[*]:0:3}") - NEXT_VERSION_ID=$(IFS=0 ; echo "${CURRENT_VERSION_ARRAY[*]:0:3}99") + if [[ ${#CURRENT_VERSION_ARRAY[2]} -gt 1 ]]; then + NEXT_VERSION_ID="${CURRENT_VERSION_ARRAY[0]:0:3}0${CURRENT_VERSION_ARRAY[1]:0:3}${CURRENT_VERSION_ARRAY[2]:0:3}99" + else + NEXT_VERSION_ID=$(IFS=0 ; echo "${CURRENT_VERSION_ARRAY[*]:0:3}99") + fi echo "TAG=$TAG" >> $GITHUB_ENV echo "BASE=$BASE" >> $GITHUB_ENV echo "BASE_X=$BASE_X" >> $GITHUB_ENV diff --git a/CHANGELOG.md b/CHANGELOG.md index b183dddc3c489..0ad8838bd8c91 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,7 +14,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Dependencies - Bump `log4j-core` from 2.18.0 to 2.19.0 - Bump `forbiddenapis` from 3.3 to 3.4 -- Bump `avro` from 1.11.0 to 1.11.1 +- Bump `avro` from 1.11.1 to 1.11.2 - Bump `woodstox-core` from 6.3.0 to 6.3.1 - Bump `xmlbeans` from 5.1.0 to 5.1.1 ([#4354](https://github.com/opensearch-project/OpenSearch/pull/4354)) - Bump `reactor-netty-core` from 1.0.19 to 1.0.22 ([#4447](https://github.com/opensearch-project/OpenSearch/pull/4447)) @@ -51,6 +51,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Improve summary error message for invalid setting updates ([#4792](https://github.com/opensearch-project/OpenSearch/pull/4792)) - Pass localNode info to all plugins on node start ([#7919](https://github.com/opensearch-project/OpenSearch/pull/7919)) - Improved performance of parsing floating point numbers ([#7909](https://github.com/opensearch-project/OpenSearch/pull/7909)) +- Move span actions to Scope ([#8411](https://github.com/opensearch-project/OpenSearch/pull/8411)) ### Deprecated @@ -76,6 +77,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Support OpenSSL Provider with default Netty allocator ([#5460](https://github.com/opensearch-project/OpenSearch/pull/5460)) - Replaces ZipInputStream with ZipFile to fix Zip Slip vulnerability ([#7230](https://github.com/opensearch-project/OpenSearch/pull/7230)) - Add missing validation/parsing of SearchBackpressureMode of SearchBackpressureSettings ([#7541](https://github.com/opensearch-project/OpenSearch/pull/7541)) +- Adds log4j configuration for telemetry LogSpanExporter ([#8393](https://github.com/opensearch-project/OpenSearch/pull/8393)) ### Security @@ -95,6 +97,11 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Start replication checkpointTimers on primary before segments upload to remote store. ([#8221]()https://github.com/opensearch-project/OpenSearch/pull/8221) - Add distributed tracing framework ([#7543](https://github.com/opensearch-project/OpenSearch/issues/7543)) - Enable Point based optimization for custom comparators ([#8168](https://github.com/opensearch-project/OpenSearch/pull/8168)) +- [Extensions] Support extension additional settings with extension REST initialization ([#8414](https://github.com/opensearch-project/OpenSearch/pull/8414)) +- Adds mock implementation for TelemetryPlugin ([#7545](https://github.com/opensearch-project/OpenSearch/issues/7545)) +- Support transport action names when registering NamedRoutes ([#7957](https://github.com/opensearch-project/OpenSearch/pull/7957)) +- Create concept of persistent ThreadContext headers that are unstashable ([#8291]()https://github.com/opensearch-project/OpenSearch/pull/8291) +- Enable Partial Flat Object ([#7997](https://github.com/opensearch-project/OpenSearch/pull/7997)) ### Dependencies - Bump `com.azure:azure-storage-common` from 12.21.0 to 12.21.1 (#7566, #7814) @@ -127,6 +134,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Upgrade] Lucene 9.7.0 release (#8272) - Bump `org.jboss.resteasy:resteasy-jackson2-provider` from 3.0.26.Final to 6.2.4.Final in /qa/wildfly ([#8209](https://github.com/opensearch-project/OpenSearch/pull/8209)) - Bump `com.google.api-client:google-api-client` from 1.34.0 to 2.2.0 ([#8276](https://github.com/opensearch-project/OpenSearch/pull/8276)) +- Update Apache HttpCore/ HttpClient and Apache HttpCore5 / HttpClient5 dependencies ([#8434](https://github.com/opensearch-project/OpenSearch/pull/8434)) +- Bump `org.apache.maven:maven-model` from 3.9.2 to 3.9.3 (#8403) +- Bump `io.projectreactor.netty:reactor-netty` and `io.projectreactor.netty:reactor-netty-core` from 1.1.7 to 1.1.8 (#8405) ### Changed - Replace jboss-annotations-api_1.2_spec with jakarta.annotation-api ([#7836](https://github.com/opensearch-project/OpenSearch/pull/7836)) @@ -144,6 +154,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add self-organizing hash table to improve the performance of bucket aggregations ([#7652](https://github.com/opensearch-project/OpenSearch/pull/7652)) - Check UTF16 string size before converting to String to avoid OOME ([#7963](https://github.com/opensearch-project/OpenSearch/pull/7963)) - Move ZSTD compression codecs out of the sandbox ([#7908](https://github.com/opensearch-project/OpenSearch/pull/7908)) +- Update ZSTD default compression level ([#8471](https://github.com/opensearch-project/OpenSearch/pull/8471)) +- [Search Pipelines] Pass pipeline creation context to processor factories ([#8164](https://github.com/opensearch-project/OpenSearch/pull/8164)) ### Deprecated diff --git a/benchmarks/build.gradle b/benchmarks/build.gradle index 4a7825e9ba35b..02aa9319cc583 100644 --- a/benchmarks/build.gradle +++ b/benchmarks/build.gradle @@ -32,10 +32,16 @@ import org.opensearch.gradle.info.BuildParams apply plugin: 'opensearch.build' apply plugin: 'application' -mainClassName = 'org.openjdk.jmh.Main' assemble.enabled = false -archivesBaseName = 'opensearch-benchmarks' + +application { + mainClass = 'org.openjdk.jmh.Main' +} + +base { + archivesBaseName = 'opensearch-benchmarks' +} test.enabled = false diff --git a/build.gradle b/build.gradle index eec01e840149e..ca4c6c3635d57 100644 --- a/build.gradle +++ b/build.gradle @@ -327,7 +327,7 @@ allprojects { javadoc.options.addStringOption('Xwerror', '-quiet') } javadoc.options.tags = ["opensearch.internal", "opensearch.api", "opensearch.experimental"] - javadoc.options.addStringOption("-release", targetCompatibility.majorVersion) + javadoc.options.addStringOption("-release", java.targetCompatibility.majorVersion) } // support for reproducible builds @@ -375,7 +375,7 @@ allprojects { } else { // Link to non-shadowed dependant projects project.javadoc.dependsOn "${upstreamProject.path}:javadoc" - String externalLinkName = upstreamProject.archivesBaseName + String externalLinkName = upstreamProject.base.archivesBaseName String artifactPath = dep.group.replaceAll('\\.', '/') + '/' + externalLinkName.replaceAll('\\.', '/') + '/' + dep.version String projectRelativePath = project.relativePath(upstreamProject.buildDir) project.javadoc.options.linksOffline artifactsHost + "/javadoc/" + artifactPath, "${projectRelativePath}/docs/javadoc/" @@ -452,7 +452,7 @@ gradle.projectsEvaluated { testReportAggregation it } subprojects.findAll { it.pluginManager.hasPlugin('jacoco') }.forEach { - jacocoAggregation it + jacocoAggregation it } } } diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 35f3fb87560e7..feb8da7c20984 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -34,7 +34,6 @@ import org.gradle.util.GradleVersion plugins { id 'java-gradle-plugin' id 'groovy' - id 'java-test-fixtures' } group = 'org.opensearch.gradle' @@ -79,9 +78,17 @@ if (JavaVersion.current() < JavaVersion.VERSION_11) { } sourceSets { + test { + java { + srcDirs += ['src/testFixtures/java'] + } + } integTest { compileClasspath += sourceSets["main"].output + configurations["testRuntimeClasspath"] runtimeClasspath += output + compileClasspath + java { + srcDirs += ['src/testFixtures/java'] + } } } @@ -111,23 +118,23 @@ dependencies { api 'org.apache.rat:apache-rat:0.15' api 'commons-io:commons-io:2.13.0' api "net.java.dev.jna:jna:5.13.0" - api 'gradle.plugin.com.github.johnrengelman:shadow:8.0.0' + api 'com.github.johnrengelman:shadow:8.1.1' api 'org.jdom:jdom2:2.0.6.1' api "org.jetbrains.kotlin:kotlin-stdlib-jdk8:${props.getProperty('kotlin')}" api 'de.thetaphi:forbiddenapis:3.5.1' api 'com.avast.gradle:gradle-docker-compose-plugin:0.16.12' api "org.yaml:snakeyaml:${props.getProperty('snakeyaml')}" - api 'org.apache.maven:maven-model:3.9.2' + api 'org.apache.maven:maven-model:3.9.3' api 'com.networknt:json-schema-validator:1.0.85' api 'org.jruby.jcodings:jcodings:1.0.58' api 'org.jruby.joni:joni:2.2.1' api "com.fasterxml.jackson.core:jackson-databind:${props.getProperty('jackson_databind')}" api "org.ajoberstar.grgit:grgit-core:5.2.0" - testFixturesApi "junit:junit:${props.getProperty('junit')}" - testFixturesApi "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${props.getProperty('randomizedrunner')}" - testFixturesApi gradleApi() - testFixturesApi gradleTestKit() + testImplementation "junit:junit:${props.getProperty('junit')}" + testImplementation "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${props.getProperty('randomizedrunner')}" + testRuntimeOnly gradleApi() + testRuntimeOnly gradleTestKit() testImplementation 'com.github.tomakehurst:wiremock-jre8-standalone:2.35.0' testImplementation "org.mockito:mockito-core:${props.getProperty('mockito')}" integTestImplementation('org.spockframework:spock-core:2.3-groovy-3.0') { @@ -164,8 +171,10 @@ if (project != rootProject) { apply plugin: 'opensearch.publish' allprojects { - targetCompatibility = JavaVersion.VERSION_11 - sourceCompatibility = JavaVersion.VERSION_11 + java { + targetCompatibility = JavaVersion.VERSION_11 + sourceCompatibility = JavaVersion.VERSION_11 + } } // groovydoc succeeds, but has some weird internal exception... @@ -174,7 +183,7 @@ if (project != rootProject) { // build-tools is not ready for primetime with these... tasks.named("dependencyLicenses").configure { it.enabled = false } dependenciesInfo.enabled = false - disableTasks('forbiddenApisMain', 'forbiddenApisTest', 'forbiddenApisIntegTest', 'forbiddenApisTestFixtures') + disableTasks('forbiddenApisMain', 'forbiddenApisTest', 'forbiddenApisIntegTest') jarHell.enabled = false thirdPartyAudit.enabled = false if (org.opensearch.gradle.info.BuildParams.inFipsJvm) { @@ -241,12 +250,6 @@ if (project != rootProject) { } } - // disable fail-on-warnings for this specific task which trips Java 11 bug - // https://bugs.openjdk.java.net/browse/JDK-8209058 - tasks.named("compileTestFixturesJava").configure { - options.compilerArgs -= '-Werror' - } - tasks.register("integTest", Test) { inputs.dir(file("src/testKit")).withPropertyName("testkit dir").withPathSensitivity(PathSensitivity.RELATIVE) systemProperty 'test.version_under_test', version diff --git a/buildSrc/reaper/build.gradle b/buildSrc/reaper/build.gradle index 4ccbec894e30e..58d06b02e9f4b 100644 --- a/buildSrc/reaper/build.gradle +++ b/buildSrc/reaper/build.gradle @@ -11,8 +11,10 @@ apply plugin: 'java' -targetCompatibility = JavaVersion.VERSION_11 -sourceCompatibility = JavaVersion.VERSION_11 +java { + targetCompatibility = JavaVersion.VERSION_11 + sourceCompatibility = JavaVersion.VERSION_11 +} jar { archiveFileName = "${project.name}.jar" diff --git a/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/PluginBuildPlugin.groovy index 2428d9e2fa4fa..556763333d279 100644 --- a/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/opensearch/gradle/plugin/PluginBuildPlugin.groovy @@ -44,6 +44,8 @@ import org.gradle.api.Plugin import org.gradle.api.Project import org.gradle.api.Task import org.gradle.api.plugins.BasePlugin +import org.gradle.api.plugins.BasePluginExtension +import org.gradle.api.plugins.JavaPluginExtension import org.gradle.api.publish.maven.MavenPublication import org.gradle.api.publish.maven.plugins.MavenPublishPlugin import org.gradle.api.publish.maven.tasks.GenerateMavenPom @@ -85,7 +87,9 @@ class PluginBuildPlugin implements Plugin { PluginPropertiesExtension extension1 = project.getExtensions().getByType(PluginPropertiesExtension.class) configurePublishing(project, extension1) String name = extension1.name - project.archivesBaseName = name + + BasePluginExtension base = project.getExtensions().findByType(BasePluginExtension.class) + base.archivesBaseName = name project.description = extension1.description if (extension1.name == null) { @@ -98,12 +102,13 @@ class PluginBuildPlugin implements Plugin { throw new InvalidUserDataException('classname is a required setting for opensearchplugin') } + JavaPluginExtension java = project.getExtensions().findByType(JavaPluginExtension.class) Map properties = [ 'name' : extension1.name, 'description' : extension1.description, 'version' : extension1.version, 'opensearchVersion' : Version.fromString(VersionProperties.getOpenSearch()).toString(), - 'javaVersion' : project.targetCompatibility as String, + 'javaVersion' : java.targetCompatibility as String, 'classname' : extension1.classname, 'customFolderName' : extension1.customFolderName, 'extendedPlugins' : extension1.extendedPlugins.join(','), @@ -156,8 +161,9 @@ class PluginBuildPlugin implements Plugin { } // always configure publishing for client jars project.publishing.publications.nebula(MavenPublication).artifactId(extension.name + "-client") + final BasePluginExtension base = project.getExtensions().findByType(BasePluginExtension.class) project.tasks.withType(GenerateMavenPom.class).configureEach { GenerateMavenPom generatePOMTask -> - generatePOMTask.destination = "${project.buildDir}/distributions/${project.archivesBaseName}-client-${project.versions.opensearch}.pom" + generatePOMTask.destination = "${project.buildDir}/distributions/${base.archivesBaseName}-client-${project.versions.opensearch}.pom" } } else { if (project.plugins.hasPlugin(MavenPublishPlugin)) { diff --git a/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java index 1015a2dfb7d72..97e923c366598 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java @@ -46,7 +46,7 @@ import org.gradle.api.XmlProvider; import org.gradle.api.artifacts.ProjectDependency; import org.gradle.api.plugins.BasePlugin; -import org.gradle.api.plugins.BasePluginConvention; +import org.gradle.api.plugins.BasePluginExtension; import org.gradle.api.plugins.JavaPlugin; import org.gradle.api.publish.PublishingExtension; import org.gradle.api.publish.maven.MavenPublication; @@ -77,7 +77,7 @@ public void apply(Project project) { } private static String getArchivesBaseName(Project project) { - return project.getConvention().getPlugin(BasePluginConvention.class).getArchivesBaseName(); + return project.getExtensions().getByType(BasePluginExtension.class).getArchivesBaseName(); } /**Configuration generation of maven poms. */ diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ForbiddenPatternsTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ForbiddenPatternsTask.java index 754743b9b784c..6ef1e77f5138f 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/ForbiddenPatternsTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/ForbiddenPatternsTask.java @@ -36,7 +36,7 @@ import org.gradle.api.InvalidUserDataException; import org.gradle.api.file.FileCollection; import org.gradle.api.file.FileTree; -import org.gradle.api.plugins.JavaPluginConvention; +import org.gradle.api.plugins.JavaPluginExtension; import org.gradle.api.tasks.IgnoreEmptyDirectories; import org.gradle.api.tasks.Input; import org.gradle.api.tasks.InputFiles; @@ -106,8 +106,8 @@ public ForbiddenPatternsTask() { @IgnoreEmptyDirectories @PathSensitive(PathSensitivity.RELATIVE) public FileCollection getFiles() { - return getProject().getConvention() - .getPlugin(JavaPluginConvention.class) + return getProject().getExtensions() + .getByType(JavaPluginExtension.class) .getSourceSets() .stream() .map(sourceSet -> sourceSet.getAllSource().matching(filesFilter)) diff --git a/buildSrc/src/main/java/org/opensearch/gradle/precommit/LoggerUsageTask.java b/buildSrc/src/main/java/org/opensearch/gradle/precommit/LoggerUsageTask.java index ff9f6619d64e6..db215fb65ef95 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/precommit/LoggerUsageTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/precommit/LoggerUsageTask.java @@ -34,7 +34,7 @@ import org.opensearch.gradle.LoggedExec; import org.gradle.api.file.FileCollection; -import org.gradle.api.plugins.JavaPluginConvention; +import org.gradle.api.plugins.JavaPluginExtension; import org.gradle.api.tasks.CacheableTask; import org.gradle.api.tasks.Classpath; import org.gradle.api.tasks.IgnoreEmptyDirectories; @@ -82,8 +82,8 @@ public void setClasspath(FileCollection classpath) { @SkipWhenEmpty @IgnoreEmptyDirectories public FileCollection getClassDirectories() { - return getProject().getConvention() - .getPlugin(JavaPluginConvention.class) + return getProject().getExtensions() + .getByType(JavaPluginExtension.class) .getSourceSets() .stream() // Don't pick up all source sets like the java9 ones as logger-check doesn't support the class format diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestApiTask.java b/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestApiTask.java index 1468c4cb1b537..0275664276877 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestApiTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestApiTask.java @@ -41,7 +41,7 @@ import org.gradle.api.file.ConfigurableFileCollection; import org.gradle.api.file.FileSystemOperations; import org.gradle.api.file.FileTree; -import org.gradle.api.plugins.JavaPluginConvention; +import org.gradle.api.plugins.JavaPluginExtension; import org.gradle.api.provider.ListProperty; import org.gradle.api.tasks.IgnoreEmptyDirectories; import org.gradle.api.tasks.Input; @@ -240,7 +240,7 @@ private File getTestOutputResourceDir() { private Optional getSourceSet() { Project project = getProject(); - return project.getConvention().findPlugin(JavaPluginConvention.class) == null + return project.getExtensions().findByType(JavaPluginExtension.class) == null ? Optional.empty() : Optional.ofNullable(GradleUtils.getJavaSourceSets(project).findByName(getSourceSetName())); } diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestTestsTask.java b/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestTestsTask.java index dd94d040cb9d8..ebd6c49fd6157 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestTestsTask.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/test/rest/CopyRestTestsTask.java @@ -41,7 +41,7 @@ import org.gradle.api.file.ConfigurableFileCollection; import org.gradle.api.file.FileSystemOperations; import org.gradle.api.file.FileTree; -import org.gradle.api.plugins.JavaPluginConvention; +import org.gradle.api.plugins.JavaPluginExtension; import org.gradle.api.provider.ListProperty; import org.gradle.api.tasks.IgnoreEmptyDirectories; import org.gradle.api.tasks.Input; @@ -178,7 +178,7 @@ void copy() { private Optional getSourceSet() { Project project = getProject(); - return project.getConvention().findPlugin(JavaPluginConvention.class) == null + return project.getExtensions().findByType(JavaPluginExtension.class) == null ? Optional.empty() : Optional.ofNullable(GradleUtils.getJavaSourceSets(project).findByName(getSourceSetName())); } diff --git a/buildSrc/src/main/java/org/opensearch/gradle/util/GradleUtils.java b/buildSrc/src/main/java/org/opensearch/gradle/util/GradleUtils.java index 054f01788d126..031fee2d1127f 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/util/GradleUtils.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/util/GradleUtils.java @@ -40,7 +40,7 @@ import org.gradle.api.artifacts.Configuration; import org.gradle.api.artifacts.Dependency; import org.gradle.api.plugins.JavaBasePlugin; -import org.gradle.api.plugins.JavaPluginConvention; +import org.gradle.api.plugins.JavaPluginExtension; import org.gradle.api.provider.Provider; import org.gradle.api.services.BuildService; import org.gradle.api.services.BuildServiceRegistration; @@ -68,7 +68,7 @@ public static Action noop() { } public static SourceSetContainer getJavaSourceSets(Project project) { - return project.getConvention().getPlugin(JavaPluginConvention.class).getSourceSets(); + return project.getExtensions().getByType(JavaPluginExtension.class).getSourceSets(); } public static TaskProvider maybeRegister(TaskContainer tasks, String name, Class clazz, Action action) { diff --git a/buildSrc/src/main/java/org/opensearch/gradle/util/Util.java b/buildSrc/src/main/java/org/opensearch/gradle/util/Util.java index 71b1e5040340d..a4d2c59cf8cad 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/util/Util.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/util/Util.java @@ -37,7 +37,7 @@ import org.gradle.api.GradleException; import org.gradle.api.Project; import org.gradle.api.file.FileTree; -import org.gradle.api.plugins.JavaPluginConvention; +import org.gradle.api.plugins.JavaPluginExtension; import org.gradle.api.tasks.SourceSet; import org.gradle.api.tasks.util.PatternFilterable; @@ -149,7 +149,7 @@ public static FileTree getJavaTestAndMainSourceResources(Project project, Action * @return An Optional that contains the Java test SourceSet if it exists. */ public static Optional getJavaTestSourceSet(Project project) { - return project.getConvention().findPlugin(JavaPluginConvention.class) == null + return project.getExtensions().findByType(JavaPluginExtension.class) == null ? Optional.empty() : Optional.ofNullable(GradleUtils.getJavaSourceSets(project).findByName(SourceSet.TEST_SOURCE_SET_NAME)); } @@ -159,7 +159,7 @@ public static Optional getJavaTestSourceSet(Project project) { * @return An Optional that contains the Java main SourceSet if it exists. */ public static Optional getJavaMainSourceSet(Project project) { - return project.getConvention().findPlugin(JavaPluginConvention.class) == null + return project.getExtensions().findByType(JavaPluginExtension.class) == null ? Optional.empty() : Optional.ofNullable(GradleUtils.getJavaSourceSets(project).findByName(SourceSet.MAIN_SOURCE_SET_NAME)); } diff --git a/buildSrc/version.properties b/buildSrc/version.properties index f9eac9516cb18..408b03e60cc5d 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -33,10 +33,10 @@ netty = 4.1.94.Final joda = 2.12.2 # client dependencies -httpclient5 = 5.1.4 -httpcore5 = 5.1.5 -httpclient = 4.5.13 -httpcore = 4.4.15 +httpclient5 = 5.2.1 +httpcore5 = 5.2.2 +httpclient = 4.5.14 +httpcore = 4.4.16 httpasyncclient = 4.1.5 commonslogging = 1.2 commonscodec = 1.15 diff --git a/client/benchmark/build.gradle b/client/benchmark/build.gradle index 4aa4d7171e366..6fd5262f0ab4f 100644 --- a/client/benchmark/build.gradle +++ b/client/benchmark/build.gradle @@ -31,13 +31,17 @@ apply plugin: 'opensearch.build' apply plugin: 'application' -group = 'org.opensearch.client' +base { + group = 'org.opensearch.client' + archivesBaseName = 'client-benchmarks' +} // Not published so no need to assemble assemble.enabled = true -archivesBaseName = 'client-benchmarks' -mainClassName = 'org.opensearch.client.benchmark.BenchmarkMain' +application { + mainClass = 'org.opensearch.client.benchmark.BenchmarkMain' +} // never try to invoke tests on the benchmark project - there aren't any test.enabled = false diff --git a/client/rest-high-level/build.gradle b/client/rest-high-level/build.gradle index 43758560b2a15..7daf4761b9310 100644 --- a/client/rest-high-level/build.gradle +++ b/client/rest-high-level/build.gradle @@ -37,8 +37,10 @@ apply plugin: 'opensearch.rest-test' apply plugin: 'opensearch.publish' apply plugin: 'opensearch.rest-resources' -group = 'org.opensearch.client' -archivesBaseName = 'opensearch-rest-high-level-client' +base { + group = 'org.opensearch.client' + archivesBaseName = 'opensearch-rest-high-level-client' +} restResources { //we need to copy the yaml spec so we can check naming (see RestHighlevelClientTests#testApiNamingConventions) diff --git a/client/rest/build.gradle b/client/rest/build.gradle index 9ea7ad4ddb964..2c437c909fb03 100644 --- a/client/rest/build.gradle +++ b/client/rest/build.gradle @@ -33,11 +33,15 @@ import de.thetaphi.forbiddenapis.gradle.CheckForbiddenApis apply plugin: 'opensearch.build' apply plugin: 'opensearch.publish' -targetCompatibility = JavaVersion.VERSION_11 -sourceCompatibility = JavaVersion.VERSION_11 +java { + targetCompatibility = JavaVersion.VERSION_11 + sourceCompatibility = JavaVersion.VERSION_11 +} -group = 'org.opensearch.client' -archivesBaseName = 'opensearch-rest-client' +base { + group = 'org.opensearch.client' + archivesBaseName = 'opensearch-rest-client' +} dependencies { api "org.apache.httpcomponents.client5:httpclient5:${versions.httpclient5}" diff --git a/client/rest/licenses/httpclient5-5.1.4.jar.sha1 b/client/rest/licenses/httpclient5-5.1.4.jar.sha1 deleted file mode 100644 index 3c0cb1335fb88..0000000000000 --- a/client/rest/licenses/httpclient5-5.1.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -208f9eed6d6ab709e2ae7a75b457ef60c0baefa5 \ No newline at end of file diff --git a/client/rest/licenses/httpclient5-5.2.1.jar.sha1 b/client/rest/licenses/httpclient5-5.2.1.jar.sha1 new file mode 100644 index 0000000000000..3555fe22f8e12 --- /dev/null +++ b/client/rest/licenses/httpclient5-5.2.1.jar.sha1 @@ -0,0 +1 @@ +0c900514d3446d9ce5d9dbd90c21192048125440 \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-5.1.5.jar.sha1 b/client/rest/licenses/httpcore5-5.1.5.jar.sha1 deleted file mode 100644 index 8da253152e970..0000000000000 --- a/client/rest/licenses/httpcore5-5.1.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -df9da3a1fa2351c4790245400ed28d78a8ddd3fc \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-5.2.2.jar.sha1 b/client/rest/licenses/httpcore5-5.2.2.jar.sha1 new file mode 100644 index 0000000000000..b641256c7d4a4 --- /dev/null +++ b/client/rest/licenses/httpcore5-5.2.2.jar.sha1 @@ -0,0 +1 @@ +6da28f5aa6c2b129ef49632e041a5203ce7507b2 \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-h2-5.1.5.jar.sha1 b/client/rest/licenses/httpcore5-h2-5.1.5.jar.sha1 deleted file mode 100644 index 097e6cc2a3be8..0000000000000 --- a/client/rest/licenses/httpcore5-h2-5.1.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -624660339afd5006d427457e6b10b10b32fd86f1 \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-h2-5.2.2.jar.sha1 b/client/rest/licenses/httpcore5-h2-5.2.2.jar.sha1 new file mode 100644 index 0000000000000..94bc0fa49bdb0 --- /dev/null +++ b/client/rest/licenses/httpcore5-h2-5.2.2.jar.sha1 @@ -0,0 +1 @@ +54ee1ed58fe8ac40be1083ea9873a6c734939ab9 \ No newline at end of file diff --git a/client/sniffer/build.gradle b/client/sniffer/build.gradle index 9823bc9afd347..f645b2dbbc933 100644 --- a/client/sniffer/build.gradle +++ b/client/sniffer/build.gradle @@ -30,11 +30,15 @@ apply plugin: 'opensearch.build' apply plugin: 'opensearch.publish' -targetCompatibility = JavaVersion.VERSION_11 -sourceCompatibility = JavaVersion.VERSION_11 +java { + targetCompatibility = JavaVersion.VERSION_11 + sourceCompatibility = JavaVersion.VERSION_11 +} -group = 'org.opensearch.client' -archivesBaseName = 'opensearch-rest-client-sniffer' +base { + group = 'org.opensearch.client' + archivesBaseName = 'opensearch-rest-client-sniffer' +} dependencies { api project(":client:rest") diff --git a/client/sniffer/licenses/httpclient5-5.1.4.jar.sha1 b/client/sniffer/licenses/httpclient5-5.1.4.jar.sha1 deleted file mode 100644 index 3c0cb1335fb88..0000000000000 --- a/client/sniffer/licenses/httpclient5-5.1.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -208f9eed6d6ab709e2ae7a75b457ef60c0baefa5 \ No newline at end of file diff --git a/client/sniffer/licenses/httpclient5-5.2.1.jar.sha1 b/client/sniffer/licenses/httpclient5-5.2.1.jar.sha1 new file mode 100644 index 0000000000000..3555fe22f8e12 --- /dev/null +++ b/client/sniffer/licenses/httpclient5-5.2.1.jar.sha1 @@ -0,0 +1 @@ +0c900514d3446d9ce5d9dbd90c21192048125440 \ No newline at end of file diff --git a/client/sniffer/licenses/httpcore5-5.1.5.jar.sha1 b/client/sniffer/licenses/httpcore5-5.1.5.jar.sha1 deleted file mode 100644 index 8da253152e970..0000000000000 --- a/client/sniffer/licenses/httpcore5-5.1.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -df9da3a1fa2351c4790245400ed28d78a8ddd3fc \ No newline at end of file diff --git a/client/sniffer/licenses/httpcore5-5.2.2.jar.sha1 b/client/sniffer/licenses/httpcore5-5.2.2.jar.sha1 new file mode 100644 index 0000000000000..b641256c7d4a4 --- /dev/null +++ b/client/sniffer/licenses/httpcore5-5.2.2.jar.sha1 @@ -0,0 +1 @@ +6da28f5aa6c2b129ef49632e041a5203ce7507b2 \ No newline at end of file diff --git a/client/test/build.gradle b/client/test/build.gradle index 13e9bd6b9e34a..f81a009389681 100644 --- a/client/test/build.gradle +++ b/client/test/build.gradle @@ -29,10 +29,14 @@ */ apply plugin: 'opensearch.build' -targetCompatibility = JavaVersion.VERSION_11 -sourceCompatibility = JavaVersion.VERSION_11 +java { + targetCompatibility = JavaVersion.VERSION_11 + sourceCompatibility = JavaVersion.VERSION_11 +} -group = "${group}.client.test" +base { + group = "${group}.client.test" +} dependencies { api "org.apache.httpcomponents.core5:httpcore5:${versions.httpcore5}" diff --git a/distribution/archives/integ-test-zip/build.gradle b/distribution/archives/integ-test-zip/build.gradle index f3f848797961c..9418223b0a44d 100644 --- a/distribution/archives/integ-test-zip/build.gradle +++ b/distribution/archives/integ-test-zip/build.gradle @@ -36,7 +36,10 @@ apply plugin: 'opensearch.rest-test' apply plugin: 'opensearch.publish' apply plugin: 'com.netflix.nebula.maven-publish' -group = "org.opensearch.distribution.integ-test-zip" +base { + group = "org.opensearch.distribution.integ-test-zip" + archivesBaseName = "opensearch" +} integTest { dependsOn assemble @@ -48,7 +51,6 @@ processTestResources { } // make the pom file name use opensearch instead of the project name -archivesBaseName = "opensearch" ext.buildDist = parent.tasks.named("buildIntegTestZip") publishing { diff --git a/distribution/tools/java-version-checker/build.gradle b/distribution/tools/java-version-checker/build.gradle index 9480a86ce6fb7..7fd16b910b293 100644 --- a/distribution/tools/java-version-checker/build.gradle +++ b/distribution/tools/java-version-checker/build.gradle @@ -11,8 +11,10 @@ apply plugin: 'opensearch.build' -sourceCompatibility = JavaVersion.VERSION_11 -targetCompatibility = JavaVersion.VERSION_11 +java { + sourceCompatibility = JavaVersion.VERSION_11 + targetCompatibility = JavaVersion.VERSION_11 +} // targetting very old java versions enables a warning by default on newer JDK: disable it. compileJava.options.compilerArgs += '-Xlint:-options' diff --git a/distribution/tools/launchers/build.gradle b/distribution/tools/launchers/build.gradle index 7ebe5c7e64416..e75267f7c4a74 100644 --- a/distribution/tools/launchers/build.gradle +++ b/distribution/tools/launchers/build.gradle @@ -38,7 +38,9 @@ dependencies { testImplementation "org.hamcrest:hamcrest:${versions.hamcrest}" } -archivesBaseName = 'opensearch-launchers' +base { + archivesBaseName = 'opensearch-launchers' +} tasks.withType(CheckForbiddenApis).configureEach { replaceSignatureFiles 'jdk-signatures' diff --git a/distribution/tools/plugin-cli/build.gradle b/distribution/tools/plugin-cli/build.gradle index 477f66625e124..e68fbf8d6fc3c 100644 --- a/distribution/tools/plugin-cli/build.gradle +++ b/distribution/tools/plugin-cli/build.gradle @@ -30,7 +30,9 @@ apply plugin: 'opensearch.build' -archivesBaseName = 'opensearch-plugin-cli' +base { + archivesBaseName = 'opensearch-plugin-cli' +} dependencies { compileOnly project(":server") diff --git a/distribution/tools/upgrade-cli/build.gradle b/distribution/tools/upgrade-cli/build.gradle index cb87faf7788fa..54d51936159ec 100644 --- a/distribution/tools/upgrade-cli/build.gradle +++ b/distribution/tools/upgrade-cli/build.gradle @@ -9,7 +9,9 @@ apply plugin: 'opensearch.build' -archivesBaseName = 'opensearch-upgrade-cli' +base { + archivesBaseName = 'opensearch-upgrade-cli' +} dependencies { compileOnly project(":server") diff --git a/doc-tools/build.gradle b/doc-tools/build.gradle index c47097c3d6035..e6ace21420dda 100644 --- a/doc-tools/build.gradle +++ b/doc-tools/build.gradle @@ -2,8 +2,10 @@ plugins { id 'java' } -group 'org.opensearch' -version '1.0.0-SNAPSHOT' +base { + group 'org.opensearch' + version '1.0.0-SNAPSHOT' +} repositories { mavenCentral() diff --git a/gradle.properties b/gradle.properties index 73df0940ce181..7c359ed2b652c 100644 --- a/gradle.properties +++ b/gradle.properties @@ -21,6 +21,9 @@ org.gradle.jvmargs=-Xmx3g -XX:+HeapDumpOnOutOfMemoryError -Xss2m \ --add-exports jdk.compiler/com.sun.tools.javac.util=ALL-UNNAMED options.forkOptions.memoryMaximumSize=3g +# Disable Gradle Enterprise Gradle plugin's test retry +systemProp.gradle.enterprise.testretry.enabled=false + # Disable duplicate project id detection # See https://docs.gradle.org/current/userguide/upgrading_version_6.html#duplicate_project_names_may_cause_publication_to_fail systemProp.org.gradle.dependency.duplicate.project.detection=false diff --git a/gradle/code-coverage.gradle b/gradle/code-coverage.gradle index d5a717b5ea720..dfb4ddba24113 100644 --- a/gradle/code-coverage.gradle +++ b/gradle/code-coverage.gradle @@ -19,7 +19,7 @@ repositories { allprojects { plugins.withId('jacoco') { - jacoco.toolVersion = '0.8.9' + jacoco.toolVersion = '0.8.10' } } diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar index c1962a79e29d3..033e24c4cdf41 100644 Binary files a/gradle/wrapper/gradle-wrapper.jar and b/gradle/wrapper/gradle-wrapper.jar differ diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties index ee9745cc621ae..f00d0c8442459 100644 --- a/gradle/wrapper/gradle-wrapper.properties +++ b/gradle/wrapper/gradle-wrapper.properties @@ -11,7 +11,7 @@ distributionBase=GRADLE_USER_HOME distributionPath=wrapper/dists -distributionUrl=https\://services.gradle.org/distributions/gradle-8.1.1-all.zip +distributionUrl=https\://services.gradle.org/distributions/gradle-8.2-all.zip zipStoreBase=GRADLE_USER_HOME zipStorePath=wrapper/dists -distributionSha256Sum=5625a0ae20fe000d9225d000b36909c7a0e0e8dda61c19b12da769add847c975 +distributionSha256Sum=5022b0b25fe182b0e50867e77f484501dba44feeea88f5c1f13b6b4660463640 diff --git a/gradlew b/gradlew index aeb74cbb43e39..fcb6fca147c0c 100755 --- a/gradlew +++ b/gradlew @@ -130,10 +130,13 @@ location of your Java installation." fi else JAVACMD=java - which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + if ! command -v java >/dev/null 2>&1 + then + die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. Please set the JAVA_HOME variable in your environment to match the location of your Java installation." + fi fi # Increase the maximum file descriptors if we can. diff --git a/libs/common/build.gradle b/libs/common/build.gradle index 003a04ad4bfa7..973fe30d09842 100644 --- a/libs/common/build.gradle +++ b/libs/common/build.gradle @@ -13,7 +13,9 @@ import org.opensearch.gradle.info.BuildParams apply plugin: 'opensearch.publish' -archivesBaseName = 'opensearch-common' +base { + archivesBaseName = 'opensearch-common' +} dependencies { // This dependency is used only by :libs:core for null-checking interop with other tools diff --git a/libs/core/build.gradle b/libs/core/build.gradle index 65d507c37b3ff..46b6f4471731f 100644 --- a/libs/core/build.gradle +++ b/libs/core/build.gradle @@ -32,7 +32,9 @@ import org.opensearch.gradle.info.BuildParams apply plugin: 'opensearch.publish' -archivesBaseName = 'opensearch-core' +base { + archivesBaseName = 'opensearch-core' +} // we want to keep the JDKs in our IDEs set to JDK 8 until minimum JDK is bumped to 11 so we do not include this source set in our IDEs if (!isEclipse) { diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultSpanScope.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultSpanScope.java new file mode 100644 index 0000000000000..58e9e0abad739 --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultSpanScope.java @@ -0,0 +1,70 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import java.util.function.Consumer; + +/** + * Default implementation of Scope + */ +public class DefaultSpanScope implements SpanScope { + + private final Span span; + + private final Consumer onCloseConsumer; + + /** + * Creates Scope instance for the given span + * + * @param span underlying span + * @param onCloseConsumer consumer to execute on scope close + */ + public DefaultSpanScope(Span span, Consumer onCloseConsumer) { + this.span = span; + this.onCloseConsumer = onCloseConsumer; + } + + @Override + public void addSpanAttribute(String key, String value) { + span.addAttribute(key, value); + } + + @Override + public void addSpanAttribute(String key, long value) { + span.addAttribute(key, value); + } + + @Override + public void addSpanAttribute(String key, double value) { + span.addAttribute(key, value); + } + + @Override + public void addSpanAttribute(String key, boolean value) { + span.addAttribute(key, value); + } + + @Override + public void addSpanEvent(String event) { + span.addEvent(event); + } + + @Override + public void setError(Exception exception) { + span.setError(exception); + } + + /** + * Executes the runnable to end the scope + */ + @Override + public void close() { + onCloseConsumer.accept(span); + } +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java index ab9110af7c3ab..783edd238c1c2 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/DefaultTracer.java @@ -13,8 +13,8 @@ /** * - * The default tracer implementation. This class implements the basic logic for span lifecycle and its state management. - * It also handles tracing context propagation between spans. + * The default tracer implementation. It handles tracing context propagation between spans by maintaining + * current active span in its storage * * */ @@ -36,41 +36,11 @@ public DefaultTracer(TracingTelemetry tracingTelemetry, TracerContextStorage endSpan(span)); - } - - @Override - public void addSpanAttribute(String key, String value) { - Span currentSpan = getCurrentSpan(); - currentSpan.addAttribute(key, value); - } - - @Override - public void addSpanAttribute(String key, long value) { - Span currentSpan = getCurrentSpan(); - currentSpan.addAttribute(key, value); - } - - @Override - public void addSpanAttribute(String key, double value) { - Span currentSpan = getCurrentSpan(); - currentSpan.addAttribute(key, value); - } - - @Override - public void addSpanAttribute(String key, boolean value) { - Span currentSpan = getCurrentSpan(); - currentSpan.addAttribute(key, value); - } - - @Override - public void addSpanEvent(String event) { - Span currentSpan = getCurrentSpan(); - currentSpan.addEvent(event); + return new DefaultSpanScope(span, (scopeSpan) -> endSpan(scopeSpan)); } @Override diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Scope.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Scope.java deleted file mode 100644 index 52f4eaf648eea..0000000000000 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Scope.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.telemetry.tracing; - -/** - * An auto-closeable that represents scope of the span. - * It is recommended that you use this class with a try-with-resources block: - */ -public interface Scope extends AutoCloseable { - /** - * No-op Scope implementation - */ - Scope NO_OP = () -> {}; - - /** - * closes the scope - */ - @Override - void close(); -} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/ScopeImpl.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/ScopeImpl.java deleted file mode 100644 index 30a7ac7fa90e7..0000000000000 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/ScopeImpl.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.telemetry.tracing; - -/** - * Executes the runnable on close - */ -public class ScopeImpl implements Scope { - - private Runnable runnableOnClose; - - /** - * Creates Scope instance - * @param runnableOnClose runnable to execute on scope close - */ - public ScopeImpl(Runnable runnableOnClose) { - this.runnableOnClose = runnableOnClose; - } - - /** - * Executes the runnable to end the scope - */ - @Override - public void close() { - runnableOnClose.run(); - } -} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Span.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Span.java index 0710b8a22a37f..d60b4e60adece 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Span.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Span.java @@ -62,6 +62,13 @@ public interface Span { */ void addAttribute(String key, Boolean value); + /** + * Records error in the span + * + * @param exception exception to be recorded + */ + void setError(Exception exception); + /** * Adds an event in the span * diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanScope.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanScope.java new file mode 100644 index 0000000000000..cf67165d889bc --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/SpanScope.java @@ -0,0 +1,74 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.opensearch.telemetry.tracing.noop.NoopSpanScope; + +/** + * An auto-closeable that represents scope of the span. + * It provides interface for all the span operations. + */ +public interface SpanScope extends AutoCloseable { + /** + * No-op Scope implementation + */ + SpanScope NO_OP = new NoopSpanScope(); + + /** + * Adds string attribute to the {@link Span}. + * + * @param key attribute key + * @param value attribute value + */ + void addSpanAttribute(String key, String value); + + /** + * Adds long attribute to the {@link Span}. + * + * @param key attribute key + * @param value attribute value + */ + void addSpanAttribute(String key, long value); + + /** + * Adds double attribute to the {@link Span}. + * + * @param key attribute key + * @param value attribute value + */ + void addSpanAttribute(String key, double value); + + /** + * Adds boolean attribute to the {@link Span}. + * + * @param key attribute key + * @param value attribute value + */ + void addSpanAttribute(String key, boolean value); + + /** + * Adds an event to the {@link Span}. + * + * @param event event name + */ + void addSpanEvent(String event); + + /** + * Records error in the span + * + * @param exception exception to be recorded + */ + void setError(Exception exception); + + /** + * closes the scope + */ + @Override + void close(); +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java index fcc091eb39c48..d422b58aa0a9f 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/Tracer.java @@ -11,7 +11,7 @@ import java.io.Closeable; /** - * Tracer is the interface used to create a {@link Span} and interact with current active {@link Span}. + * Tracer is the interface used to create a {@link Span} * It automatically handles the context propagation between threads, tasks, nodes etc. * * All methods on the Tracer object are multi-thread safe. @@ -24,44 +24,6 @@ public interface Tracer extends Closeable { * @param spanName span name * @return scope of the span, must be closed with explicit close or with try-with-resource */ - Scope startSpan(String spanName); + SpanScope startSpan(String spanName); - /** - * Adds string attribute to the current active {@link Span}. - * - * @param key attribute key - * @param value attribute value - */ - void addSpanAttribute(String key, String value); - - /** - * Adds long attribute to the current active {@link Span}. - * - * @param key attribute key - * @param value attribute value - */ - void addSpanAttribute(String key, long value); - - /** - * Adds double attribute to the current active {@link Span}. - * - * @param key attribute key - * @param value attribute value - */ - void addSpanAttribute(String key, double value); - - /** - * Adds boolean attribute to the current active {@link Span}. - * - * @param key attribute key - * @param value attribute value - */ - void addSpanAttribute(String key, boolean value); - - /** - * Adds an event to the current active {@link Span}. - * - * @param event event name - */ - void addSpanEvent(String event); } diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopSpanScope.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopSpanScope.java new file mode 100644 index 0000000000000..c0dbaf65ba48b --- /dev/null +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopSpanScope.java @@ -0,0 +1,57 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing.noop; + +import org.opensearch.telemetry.tracing.SpanScope; + +/** + * No-op implementation of SpanScope + */ +public final class NoopSpanScope implements SpanScope { + + /** + * No-args constructor + */ + public NoopSpanScope() {} + + @Override + public void addSpanAttribute(String key, String value) { + + } + + @Override + public void addSpanAttribute(String key, long value) { + + } + + @Override + public void addSpanAttribute(String key, double value) { + + } + + @Override + public void addSpanAttribute(String key, boolean value) { + + } + + @Override + public void addSpanEvent(String event) { + + } + + @Override + public void setError(Exception exception) { + + } + + @Override + public void close() { + + } +} diff --git a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java index 18fc60e41e54d..a66cbcf4fef52 100644 --- a/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java +++ b/libs/telemetry/src/main/java/org/opensearch/telemetry/tracing/noop/NoopTracer.java @@ -8,7 +8,7 @@ package org.opensearch.telemetry.tracing.noop; -import org.opensearch.telemetry.tracing.Scope; +import org.opensearch.telemetry.tracing.SpanScope; import org.opensearch.telemetry.tracing.Tracer; /** @@ -24,49 +24,8 @@ public class NoopTracer implements Tracer { private NoopTracer() {} @Override - public Scope startSpan(String spanName) { - return Scope.NO_OP; - } - - /** - * @param key attribute key - * @param value attribute value - */ - @Override - public void addSpanAttribute(String key, String value) { - - } - - /** - * @param key attribute key - * @param value attribute value - */ - @Override - public void addSpanAttribute(String key, long value) { - - } - - /** - * @param key attribute key - * @param value attribute value - */ - @Override - public void addSpanAttribute(String key, double value) { - - } - - /** - * @param key attribute key - * @param value attribute value - */ - @Override - public void addSpanAttribute(String key, boolean value) { - - } - - @Override - public void addSpanEvent(String event) { - + public SpanScope startSpan(String spanName) { + return SpanScope.NO_OP; } @Override diff --git a/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultSpanScopeTests.java b/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultSpanScopeTests.java new file mode 100644 index 0000000000000..eea6b77ce6e1e --- /dev/null +++ b/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultSpanScopeTests.java @@ -0,0 +1,79 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.telemetry.tracing; + +import org.opensearch.test.OpenSearchTestCase; + +import java.util.function.Consumer; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; + +public class DefaultSpanScopeTests extends OpenSearchTestCase { + + @SuppressWarnings("unchecked") + public void testClose() { + Span mockSpan = mock(Span.class); + Consumer mockConsumer = mock(Consumer.class); + DefaultSpanScope defaultSpanScope = new DefaultSpanScope(mockSpan, mockConsumer); + defaultSpanScope.close(); + + verify(mockConsumer).accept(mockSpan); + } + + public void testAddSpanAttributeString() { + Span mockSpan = mock(Span.class); + DefaultSpanScope defaultSpanScope = new DefaultSpanScope(mockSpan, null); + defaultSpanScope.addSpanAttribute("key", "value"); + + verify(mockSpan).addAttribute("key", "value"); + } + + public void testAddSpanAttributeLong() { + Span mockSpan = mock(Span.class); + DefaultSpanScope defaultSpanScope = new DefaultSpanScope(mockSpan, null); + defaultSpanScope.addSpanAttribute("key", 1L); + + verify(mockSpan).addAttribute("key", 1L); + } + + public void testAddSpanAttributeDouble() { + Span mockSpan = mock(Span.class); + DefaultSpanScope defaultSpanScope = new DefaultSpanScope(mockSpan, null); + defaultSpanScope.addSpanAttribute("key", 1.0); + + verify(mockSpan).addAttribute("key", 1.0); + } + + public void testAddSpanAttributeBoolean() { + Span mockSpan = mock(Span.class); + DefaultSpanScope defaultSpanScope = new DefaultSpanScope(mockSpan, null); + defaultSpanScope.addSpanAttribute("key", true); + + verify(mockSpan).addAttribute("key", true); + } + + public void testAddEvent() { + Span mockSpan = mock(Span.class); + DefaultSpanScope defaultSpanScope = new DefaultSpanScope(mockSpan, null); + defaultSpanScope.addSpanEvent("eventName"); + + verify(mockSpan).addEvent("eventName"); + } + + public void testSetError() { + Span mockSpan = mock(Span.class); + DefaultSpanScope defaultSpanScope = new DefaultSpanScope(mockSpan, null); + Exception ex = new Exception("error"); + defaultSpanScope.setError(ex); + + verify(mockSpan).setError(ex); + } + +} diff --git a/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java b/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java index f0e8f3c2e2344..2b7a379b0051a 100644 --- a/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java +++ b/libs/telemetry/src/test/java/org/opensearch/telemetry/tracing/DefaultTracerTests.java @@ -45,57 +45,12 @@ public void testCreateSpan() { public void testEndSpanByClosingScope() { DefaultTracer defaultTracer = new DefaultTracer(mockTracingTelemetry, mockTracerContextStorage); - try (Scope scope = defaultTracer.startSpan("span_name")) { + try (SpanScope spanScope = defaultTracer.startSpan("span_name")) { verify(mockTracerContextStorage).put(TracerContextStorage.CURRENT_SPAN, mockSpan); } verify(mockTracerContextStorage).put(TracerContextStorage.CURRENT_SPAN, mockParentSpan); } - public void testAddSpanAttributeString() { - Tracer defaultTracer = new DefaultTracer(mockTracingTelemetry, mockTracerContextStorage); - defaultTracer.startSpan("span_name"); - - defaultTracer.addSpanAttribute("key", "value"); - - verify(mockSpan).addAttribute("key", "value"); - } - - public void testAddSpanAttributeLong() { - Tracer defaultTracer = new DefaultTracer(mockTracingTelemetry, mockTracerContextStorage); - defaultTracer.startSpan("span_name"); - - defaultTracer.addSpanAttribute("key", 1L); - - verify(mockSpan).addAttribute("key", 1L); - } - - public void testAddSpanAttributeDouble() { - Tracer defaultTracer = new DefaultTracer(mockTracingTelemetry, mockTracerContextStorage); - defaultTracer.startSpan("span_name"); - - defaultTracer.addSpanAttribute("key", 1.0); - - verify(mockSpan).addAttribute("key", 1.0); - } - - public void testAddSpanAttributeBoolean() { - Tracer defaultTracer = new DefaultTracer(mockTracingTelemetry, mockTracerContextStorage); - defaultTracer.startSpan("span_name"); - - defaultTracer.addSpanAttribute("key", true); - - verify(mockSpan).addAttribute("key", true); - } - - public void testAddEvent() { - Tracer defaultTracer = new DefaultTracer(mockTracingTelemetry, mockTracerContextStorage); - defaultTracer.startSpan("span_name"); - - defaultTracer.addSpanEvent("eventName"); - - verify(mockSpan).addEvent("eventName"); - } - public void testClose() throws IOException { Tracer defaultTracer = new DefaultTracer(mockTracingTelemetry, mockTracerContextStorage); diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle index 6c1785e6faf6b..3b3f2a7f9ca38 100644 --- a/modules/lang-painless/build.gradle +++ b/modules/lang-painless/build.gradle @@ -109,7 +109,7 @@ tasks.register("apiJavadoc", Javadoc) { source = sourceSets.main.allJava classpath = sourceSets.main.runtimeClasspath include '**/org/opensearch/painless/api/' - destinationDir = new File(docsDir, 'apiJavadoc') + destinationDir = new File(file(java.docsDir), 'apiJavadoc') } tasks.register("apiJavadocJar", Jar) { diff --git a/modules/lang-painless/spi/build.gradle b/modules/lang-painless/spi/build.gradle index 7811ee0e41dd8..59a77870b4987 100644 --- a/modules/lang-painless/spi/build.gradle +++ b/modules/lang-painless/spi/build.gradle @@ -31,8 +31,10 @@ apply plugin: 'opensearch.build' apply plugin: 'opensearch.publish' -group = 'org.opensearch.plugin' -archivesBaseName = 'opensearch-scripting-painless-spi' +base { + group = 'org.opensearch.plugin' + archivesBaseName = 'opensearch-scripting-painless-spi' +} dependencies { api project(":server") diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/FilterQueryRequestProcessor.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/FilterQueryRequestProcessor.java index 7deb8faa03af6..d8862aa59cede 100644 --- a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/FilterQueryRequestProcessor.java +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/FilterQueryRequestProcessor.java @@ -13,12 +13,12 @@ import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.core.ParseField; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.index.query.BoolQueryBuilder; import org.opensearch.index.query.QueryBuilder; +import org.opensearch.ingest.ConfigurationUtils; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.search.pipeline.Processor; import org.opensearch.search.pipeline.SearchRequestProcessor; @@ -89,8 +89,8 @@ public SearchRequest processRequest(SearchRequest request) throws Exception { } static class Factory implements Processor.Factory { + private static final String QUERY_KEY = "query"; private final NamedXContentRegistry namedXContentRegistry; - public static final ParseField QUERY_FIELD = new ParseField("query"); Factory(NamedXContentRegistry namedXContentRegistry) { this.namedXContentRegistry = namedXContentRegistry; @@ -101,30 +101,21 @@ public FilterQueryRequestProcessor create( Map> processorFactories, String tag, String description, - Map config + Map config, + PipelineContext pipelineContext ) throws Exception { + Map query = ConfigurationUtils.readOptionalMap(TYPE, tag, config, QUERY_KEY); + if (query == null) { + throw new IllegalArgumentException("Did not specify the " + QUERY_KEY + " property in processor of type " + TYPE); + } try ( - XContentBuilder builder = XContentBuilder.builder(JsonXContent.jsonXContent).map(config); + XContentBuilder builder = XContentBuilder.builder(JsonXContent.jsonXContent).map(query); InputStream stream = BytesReference.bytes(builder).streamInput(); XContentParser parser = XContentType.JSON.xContent() .createParser(namedXContentRegistry, LoggingDeprecationHandler.INSTANCE, stream) ) { - XContentParser.Token token = parser.nextToken(); - assert token == XContentParser.Token.START_OBJECT; - String currentFieldName = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token == XContentParser.Token.START_OBJECT) { - if (QUERY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { - return new FilterQueryRequestProcessor(tag, description, parseInnerQueryBuilder(parser)); - } - } - } + return new FilterQueryRequestProcessor(tag, description, parseInnerQueryBuilder(parser)); } - throw new IllegalArgumentException( - "Did not specify the " + QUERY_FIELD.getPreferredName() + " property in processor of type " + TYPE - ); } } } diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/RenameFieldResponseProcessor.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/RenameFieldResponseProcessor.java index 4c40dda5928f0..c8b3c06a71562 100644 --- a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/RenameFieldResponseProcessor.java +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/RenameFieldResponseProcessor.java @@ -140,7 +140,8 @@ public RenameFieldResponseProcessor create( Map> processorFactories, String tag, String description, - Map config + Map config, + PipelineContext pipelineContext ) throws Exception { String oldField = ConfigurationUtils.readStringProperty(TYPE, tag, config, "field"); String newField = ConfigurationUtils.readStringProperty(TYPE, tag, config, "target_field"); diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/ScriptRequestProcessor.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/ScriptRequestProcessor.java index 015411e0701a4..43ab3d4622d6b 100644 --- a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/ScriptRequestProcessor.java +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/ScriptRequestProcessor.java @@ -29,7 +29,8 @@ import org.opensearch.search.pipeline.common.helpers.SearchRequestMap; import java.io.InputStream; -import java.util.Arrays; +import java.util.HashMap; +import java.util.List; import java.util.Map; import static org.opensearch.ingest.ConfigurationUtils.newConfigurationException; @@ -127,6 +128,8 @@ SearchScript getPrecompiledSearchScript() { * Factory class for creating {@link ScriptRequestProcessor}. */ public static final class Factory implements Processor.Factory { + private static final List SCRIPT_CONFIG_KEYS = List.of("id", "source", "inline", "lang", "params", "options"); + private final ScriptService scriptService; /** @@ -138,33 +141,29 @@ public Factory(ScriptService scriptService) { this.scriptService = scriptService; } - /** - * Creates a new instance of {@link ScriptRequestProcessor}. - * - * @param registry The registry of processor factories. - * @param processorTag The processor's tag. - * @param description The processor's description. - * @param config The configuration options for the processor. - * @return The created {@link ScriptRequestProcessor} instance. - * @throws Exception if an error occurs during the creation process. - */ @Override public ScriptRequestProcessor create( Map> registry, String processorTag, String description, - Map config + Map config, + PipelineContext pipelineContext ) throws Exception { + Map scriptConfig = new HashMap<>(); + for (String key : SCRIPT_CONFIG_KEYS) { + Object val = config.remove(key); + if (val != null) { + scriptConfig.put(key, val); + } + } try ( - XContentBuilder builder = XContentBuilder.builder(JsonXContent.jsonXContent).map(config); + XContentBuilder builder = XContentBuilder.builder(JsonXContent.jsonXContent).map(scriptConfig); InputStream stream = BytesReference.bytes(builder).streamInput(); XContentParser parser = XContentType.JSON.xContent() .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream) ) { Script script = Script.parse(parser); - Arrays.asList("id", "source", "inline", "lang", "params", "options").forEach(config::remove); - // verify script is able to be compiled before successfully creating processor. SearchScript searchScript = null; try { diff --git a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java index dc25de460fdba..49681b80fdead 100644 --- a/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java +++ b/modules/search-pipeline-common/src/main/java/org/opensearch/search/pipeline/common/SearchPipelineCommonModulePlugin.java @@ -33,7 +33,7 @@ public SearchPipelineCommonModulePlugin() {} * @return A map of processor factories, where the keys are the processor types and the values are the corresponding factory instances. */ @Override - public Map> getRequestProcessors(Processor.Parameters parameters) { + public Map> getRequestProcessors(Parameters parameters) { return Map.of( FilterQueryRequestProcessor.TYPE, new FilterQueryRequestProcessor.Factory(parameters.namedXContentRegistry), @@ -43,7 +43,7 @@ public Map> getRequestProcesso } @Override - public Map> getResponseProcessors(Processor.Parameters parameters) { + public Map> getResponseProcessors(Parameters parameters) { return Map.of(RenameFieldResponseProcessor.TYPE, new RenameFieldResponseProcessor.Factory()); } } diff --git a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/FilterQueryRequestProcessorTests.java b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/FilterQueryRequestProcessorTests.java index 1f355ac97c801..ecf746af556a2 100644 --- a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/FilterQueryRequestProcessorTests.java +++ b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/FilterQueryRequestProcessorTests.java @@ -16,6 +16,7 @@ import org.opensearch.test.AbstractBuilderTestCase; import java.util.Collections; +import java.util.HashMap; import java.util.Map; public class FilterQueryRequestProcessorTests extends AbstractBuilderTestCase { @@ -37,15 +38,14 @@ public void testFilterQuery() throws Exception { public void testFactory() throws Exception { FilterQueryRequestProcessor.Factory factory = new FilterQueryRequestProcessor.Factory(this.xContentRegistry()); - FilterQueryRequestProcessor processor = factory.create( - Collections.emptyMap(), - null, - null, - Map.of("query", Map.of("term", Map.of("field", "value"))) - ); + Map configMap = new HashMap<>(Map.of("query", Map.of("term", Map.of("field", "value")))); + FilterQueryRequestProcessor processor = factory.create(Collections.emptyMap(), null, null, configMap, null); assertEquals(new TermQueryBuilder("field", "value"), processor.filterQuery); // Missing "query" parameter: - expectThrows(IllegalArgumentException.class, () -> factory.create(Collections.emptyMap(), null, null, Collections.emptyMap())); + expectThrows( + IllegalArgumentException.class, + () -> factory.create(Collections.emptyMap(), null, null, Collections.emptyMap(), null) + ); } } diff --git a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/RenameFieldResponseProcessorTests.java b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/RenameFieldResponseProcessorTests.java index a2fc7f6acfa7c..7f3a2acfbdc08 100644 --- a/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/RenameFieldResponseProcessorTests.java +++ b/modules/search-pipeline-common/src/test/java/org/opensearch/search/pipeline/common/RenameFieldResponseProcessorTests.java @@ -115,12 +115,15 @@ public void testFactory() throws Exception { config.put("target_field", newField); RenameFieldResponseProcessor.Factory factory = new RenameFieldResponseProcessor.Factory(); - RenameFieldResponseProcessor processor = factory.create(Collections.emptyMap(), null, null, config); + RenameFieldResponseProcessor processor = factory.create(Collections.emptyMap(), null, null, config, null); assertEquals(processor.getType(), "rename_field"); assertEquals(processor.getOldField(), oldField); assertEquals(processor.getNewField(), newField); assertFalse(processor.isIgnoreMissing()); - expectThrows(OpenSearchParseException.class, () -> factory.create(Collections.emptyMap(), null, null, Collections.emptyMap())); + expectThrows( + OpenSearchParseException.class, + () -> factory.create(Collections.emptyMap(), null, null, Collections.emptyMap(), null) + ); } } diff --git a/modules/search-pipeline-common/src/yamlRestTest/resources/rest-api-spec/test/search_pipeline/50_script_processor.yml b/modules/search-pipeline-common/src/yamlRestTest/resources/rest-api-spec/test/search_pipeline/50_script_processor.yml index 9d855e8a1861a..7a01e68acf75c 100644 --- a/modules/search-pipeline-common/src/yamlRestTest/resources/rest-api-spec/test/search_pipeline/50_script_processor.yml +++ b/modules/search-pipeline-common/src/yamlRestTest/resources/rest-api-spec/test/search_pipeline/50_script_processor.yml @@ -17,6 +17,7 @@ teardown: "request_processors": [ { "script" : { + "tag": "empty_script", "lang": "painless", "source" : "" } @@ -38,6 +39,7 @@ teardown: "request_processors": [ { "script" : { + "tag": "working", "lang" : "painless", "source" : "ctx._source['size'] += 10; ctx._source['from'] = ctx._source['from'] <= 0 ? ctx._source['from'] : ctx._source['from'] - 1 ; ctx._source['explain'] = !ctx._source['explain']; ctx._source['version'] = !ctx._source['version']; ctx._source['seq_no_primary_term'] = !ctx._source['seq_no_primary_term']; ctx._source['track_scores'] = !ctx._source['track_scores']; ctx._source['track_total_hits'] = 1; ctx._source['min_score'] -= 0.9; ctx._source['terminate_after'] += 2; ctx._source['profile'] = !ctx._source['profile'];" } diff --git a/plugins/discovery-azure-classic/licenses/httpclient-4.5.13.jar.sha1 b/plugins/discovery-azure-classic/licenses/httpclient-4.5.13.jar.sha1 deleted file mode 100644 index 3281e21595b39..0000000000000 --- a/plugins/discovery-azure-classic/licenses/httpclient-4.5.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e5f6cae5ca7ecaac1ec2827a9e2d65ae2869cada \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/httpclient-4.5.14.jar.sha1 b/plugins/discovery-azure-classic/licenses/httpclient-4.5.14.jar.sha1 new file mode 100644 index 0000000000000..66e05851c2e3c --- /dev/null +++ b/plugins/discovery-azure-classic/licenses/httpclient-4.5.14.jar.sha1 @@ -0,0 +1 @@ +1194890e6f56ec29177673f2f12d0b8e627dec98 \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/httpcore-4.4.15.jar.sha1 b/plugins/discovery-azure-classic/licenses/httpcore-4.4.15.jar.sha1 deleted file mode 100644 index 42a03b5d7a376..0000000000000 --- a/plugins/discovery-azure-classic/licenses/httpcore-4.4.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7f2e0c573eaa7a74bac2e89b359e1f73d92a0a1d \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/httpcore-4.4.16.jar.sha1 b/plugins/discovery-azure-classic/licenses/httpcore-4.4.16.jar.sha1 new file mode 100644 index 0000000000000..172110694b5bd --- /dev/null +++ b/plugins/discovery-azure-classic/licenses/httpcore-4.4.16.jar.sha1 @@ -0,0 +1 @@ +51cf043c87253c9f58b539c9f7e44c8894223850 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/httpclient-4.5.13.jar.sha1 b/plugins/discovery-ec2/licenses/httpclient-4.5.13.jar.sha1 deleted file mode 100644 index 3281e21595b39..0000000000000 --- a/plugins/discovery-ec2/licenses/httpclient-4.5.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e5f6cae5ca7ecaac1ec2827a9e2d65ae2869cada \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/httpclient-4.5.14.jar.sha1 b/plugins/discovery-ec2/licenses/httpclient-4.5.14.jar.sha1 new file mode 100644 index 0000000000000..66e05851c2e3c --- /dev/null +++ b/plugins/discovery-ec2/licenses/httpclient-4.5.14.jar.sha1 @@ -0,0 +1 @@ +1194890e6f56ec29177673f2f12d0b8e627dec98 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/httpcore-4.4.15.jar.sha1 b/plugins/discovery-ec2/licenses/httpcore-4.4.15.jar.sha1 deleted file mode 100644 index 42a03b5d7a376..0000000000000 --- a/plugins/discovery-ec2/licenses/httpcore-4.4.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7f2e0c573eaa7a74bac2e89b359e1f73d92a0a1d \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/httpcore-4.4.16.jar.sha1 b/plugins/discovery-ec2/licenses/httpcore-4.4.16.jar.sha1 new file mode 100644 index 0000000000000..172110694b5bd --- /dev/null +++ b/plugins/discovery-ec2/licenses/httpcore-4.4.16.jar.sha1 @@ -0,0 +1 @@ +51cf043c87253c9f58b539c9f7e44c8894223850 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/httpclient-4.5.13.jar.sha1 b/plugins/discovery-gce/licenses/httpclient-4.5.13.jar.sha1 deleted file mode 100644 index 3281e21595b39..0000000000000 --- a/plugins/discovery-gce/licenses/httpclient-4.5.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e5f6cae5ca7ecaac1ec2827a9e2d65ae2869cada \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/httpclient-4.5.14.jar.sha1 b/plugins/discovery-gce/licenses/httpclient-4.5.14.jar.sha1 new file mode 100644 index 0000000000000..66e05851c2e3c --- /dev/null +++ b/plugins/discovery-gce/licenses/httpclient-4.5.14.jar.sha1 @@ -0,0 +1 @@ +1194890e6f56ec29177673f2f12d0b8e627dec98 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/httpcore-4.4.15.jar.sha1 b/plugins/discovery-gce/licenses/httpcore-4.4.15.jar.sha1 deleted file mode 100644 index 42a03b5d7a376..0000000000000 --- a/plugins/discovery-gce/licenses/httpcore-4.4.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7f2e0c573eaa7a74bac2e89b359e1f73d92a0a1d \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/httpcore-4.4.16.jar.sha1 b/plugins/discovery-gce/licenses/httpcore-4.4.16.jar.sha1 new file mode 100644 index 0000000000000..172110694b5bd --- /dev/null +++ b/plugins/discovery-gce/licenses/httpcore-4.4.16.jar.sha1 @@ -0,0 +1 @@ +51cf043c87253c9f58b539c9f7e44c8894223850 \ No newline at end of file diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index e67ea7ab0a11e..4edb9e0b1913e 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -58,8 +58,8 @@ dependencies { api 'com.azure:azure-storage-blob:12.22.2' api 'org.reactivestreams:reactive-streams:1.0.4' api 'io.projectreactor:reactor-core:3.5.6' - api 'io.projectreactor.netty:reactor-netty:1.1.7' - api 'io.projectreactor.netty:reactor-netty-core:1.1.7' + api 'io.projectreactor.netty:reactor-netty:1.1.8' + api 'io.projectreactor.netty:reactor-netty-core:1.1.8' api 'io.projectreactor.netty:reactor-netty-http:1.1.8' api "org.slf4j:slf4j-api:${versions.slf4j}" api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" diff --git a/plugins/repository-azure/licenses/reactor-netty-1.1.7.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-1.1.7.jar.sha1 deleted file mode 100644 index 01a9b1d34d52f..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-1.1.7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c16497c29f96ea7b1db538cb0ddde55d9be173fe \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-1.1.8.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-1.1.8.jar.sha1 new file mode 100644 index 0000000000000..6b6bf1903b16c --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-1.1.8.jar.sha1 @@ -0,0 +1 @@ +d53a9d7d0395285f4c81664494fcd61477626e32 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.7.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.7.jar.sha1 deleted file mode 100644 index 62ed795cb11e9..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-core-1.1.7.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d38bb526a501f52c4476b03730c710a96f8fd35b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.8.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.8.jar.sha1 new file mode 100644 index 0000000000000..707631f4dfe0c --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-core-1.1.8.jar.sha1 @@ -0,0 +1 @@ +48999c4ae27cdcee5eaff9dfd150a8b64624f0f5 \ No newline at end of file diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 7ac54544f7a1b..3c83e535a91d8 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -66,7 +66,7 @@ dependencies { } api 'org.apache.htrace:htrace-core4:4.2.0-incubating' api "org.apache.logging.log4j:log4j-core:${versions.log4j}" - api 'org.apache.avro:avro:1.11.1' + api 'org.apache.avro:avro:1.11.2' api 'com.google.code.gson:gson:2.10.1' runtimeOnly "com.google.guava:guava:${versions.guava}" api "commons-logging:commons-logging:${versions.commonslogging}" diff --git a/plugins/repository-hdfs/licenses/avro-1.11.1.jar.sha1 b/plugins/repository-hdfs/licenses/avro-1.11.1.jar.sha1 deleted file mode 100644 index f03424516b44e..0000000000000 --- a/plugins/repository-hdfs/licenses/avro-1.11.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -81af5d4b9bdaaf4ba41bcb0df5241355ec34c630 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/avro-1.11.2.jar.sha1 b/plugins/repository-hdfs/licenses/avro-1.11.2.jar.sha1 new file mode 100644 index 0000000000000..ce1a894e0ce6d --- /dev/null +++ b/plugins/repository-hdfs/licenses/avro-1.11.2.jar.sha1 @@ -0,0 +1 @@ +97e62e8be2b37e849f1bdb5a4f08121d47cc9806 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/httpclient-4.5.13.jar.sha1 b/plugins/repository-s3/licenses/httpclient-4.5.13.jar.sha1 deleted file mode 100644 index 3281e21595b39..0000000000000 --- a/plugins/repository-s3/licenses/httpclient-4.5.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e5f6cae5ca7ecaac1ec2827a9e2d65ae2869cada \ No newline at end of file diff --git a/plugins/repository-s3/licenses/httpclient-4.5.14.jar.sha1 b/plugins/repository-s3/licenses/httpclient-4.5.14.jar.sha1 new file mode 100644 index 0000000000000..66e05851c2e3c --- /dev/null +++ b/plugins/repository-s3/licenses/httpclient-4.5.14.jar.sha1 @@ -0,0 +1 @@ +1194890e6f56ec29177673f2f12d0b8e627dec98 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/httpcore-4.4.15.jar.sha1 b/plugins/repository-s3/licenses/httpcore-4.4.15.jar.sha1 deleted file mode 100644 index 42a03b5d7a376..0000000000000 --- a/plugins/repository-s3/licenses/httpcore-4.4.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7f2e0c573eaa7a74bac2e89b359e1f73d92a0a1d \ No newline at end of file diff --git a/plugins/repository-s3/licenses/httpcore-4.4.16.jar.sha1 b/plugins/repository-s3/licenses/httpcore-4.4.16.jar.sha1 new file mode 100644 index 0000000000000..172110694b5bd --- /dev/null +++ b/plugins/repository-s3/licenses/httpcore-4.4.16.jar.sha1 @@ -0,0 +1 @@ +51cf043c87253c9f58b539c9f7e44c8894223850 \ No newline at end of file diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java index cf749eeffd903..49ebce77a59ad 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java @@ -36,6 +36,7 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.ExceptionsHelper; +import org.opensearch.action.ActionListener; import org.opensearch.common.Nullable; import org.opensearch.common.SetOnce; import org.opensearch.common.blobstore.BlobContainer; @@ -296,6 +297,35 @@ private static DeleteObjectsRequest bulkDelete(String bucket, List blobs .build(); } + @Override + public void listBlobsByPrefixInSortedOrder( + String blobNamePrefix, + int limit, + BlobNameSortOrder blobNameSortOrder, + ActionListener> listener + ) { + // As AWS S3 returns list of keys in Lexicographic order, we don't have to fetch all the keys in order to sort them + // We fetch only keys as per the given limit to optimize the fetch. If provided sort order is not Lexicographic, + // we fall-back to default implementation of fetching all the keys and sorting them. + if (blobNameSortOrder != BlobNameSortOrder.LEXICOGRAPHIC) { + super.listBlobsByPrefixInSortedOrder(blobNamePrefix, limit, blobNameSortOrder, listener); + } else { + if (limit < 0) { + throw new IllegalArgumentException("limit should not be a negative value"); + } + String prefix = blobNamePrefix == null ? keyPath : buildKey(blobNamePrefix); + try (AmazonS3Reference clientReference = blobStore.clientReference()) { + List blobs = executeListing(clientReference, listObjectsRequest(prefix, limit), limit).stream() + .flatMap(listing -> listing.contents().stream()) + .map(s3Object -> new PlainBlobMetadata(s3Object.key().substring(keyPath.length()), s3Object.size())) + .collect(Collectors.toList()); + listener.onResponse(blobs.subList(0, Math.min(limit, blobs.size()))); + } catch (final Exception e) { + listener.onFailure(new IOException("Exception when listing blobs by prefix [" + prefix + "]", e)); + } + } + } + @Override public Map listBlobsByPrefix(@Nullable String blobNamePrefix) throws IOException { String prefix = blobNamePrefix == null ? keyPath : buildKey(blobNamePrefix); @@ -339,10 +369,25 @@ public Map children() throws IOException { } private static List executeListing(AmazonS3Reference clientReference, ListObjectsV2Request listObjectsRequest) { + return executeListing(clientReference, listObjectsRequest, -1); + } + + private static List executeListing( + AmazonS3Reference clientReference, + ListObjectsV2Request listObjectsRequest, + int limit + ) { return SocketAccess.doPrivileged(() -> { final List results = new ArrayList<>(); + int totalObjects = 0; ListObjectsV2Iterable listObjectsIterable = clientReference.get().listObjectsV2Paginator(listObjectsRequest); - listObjectsIterable.forEach(results::add); + for (ListObjectsV2Response listObjectsV2Response : listObjectsIterable) { + results.add(listObjectsV2Response); + totalObjects += listObjectsV2Response.contents().size(); + if (limit != -1 && totalObjects > limit) { + break; + } + } return results; }); } @@ -356,6 +401,10 @@ private ListObjectsV2Request listObjectsRequest(String keyPath) { .build(); } + private ListObjectsV2Request listObjectsRequest(String keyPath, int limit) { + return listObjectsRequest(keyPath).toBuilder().maxKeys(Math.min(limit, 1000)).build(); + } + private String buildKey(String blobName) { return keyPath + blobName; } diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java index ec16f216f1777..a2a7ca8d8bdd5 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java @@ -33,6 +33,9 @@ package org.opensearch.repositories.s3; import org.mockito.ArgumentCaptor; +import org.opensearch.action.ActionListener; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStoreException; import org.opensearch.common.blobstore.DeleteResult; @@ -74,9 +77,13 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.Comparator; +import java.util.HashSet; import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.NoSuchElementException; +import java.util.Set; import java.util.UUID; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; @@ -84,12 +91,12 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.Mockito.when; +import static org.mockito.Mockito.mock; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; +import static org.mockito.Mockito.times; public class S3BlobStoreContainerTests extends OpenSearchTestCase { @@ -187,26 +194,34 @@ private static class MockListObjectsV2ResponseIterator implements Iterator keysListed = new ArrayList<>(); + private final List keysListed; private final boolean throwExceptionOnNextInvocation; public MockListObjectsV2ResponseIterator(int totalPageCount, int s3ObjectsPerPage, long s3ObjectSize) { - this.totalPageCount = totalPageCount; - this.s3ObjectsPerPage = s3ObjectsPerPage; - this.s3ObjectSize = s3ObjectSize; - this.throwExceptionOnNextInvocation = false; + this(totalPageCount, s3ObjectsPerPage, s3ObjectSize, ""); + } + + public MockListObjectsV2ResponseIterator(int totalPageCount, int s3ObjectsPerPage, long s3ObjectSize, String blobPath) { + this(totalPageCount, s3ObjectsPerPage, s3ObjectSize, blobPath, false); } public MockListObjectsV2ResponseIterator( int totalPageCount, int s3ObjectsPerPage, long s3ObjectSize, + String blobPath, boolean throwExceptionOnNextInvocation ) { this.totalPageCount = totalPageCount; this.s3ObjectsPerPage = s3ObjectsPerPage; this.s3ObjectSize = s3ObjectSize; this.throwExceptionOnNextInvocation = throwExceptionOnNextInvocation; + keysListed = new ArrayList<>(); + for (int i = 0; i < totalPageCount * s3ObjectsPerPage; i++) { + keysListed.add(blobPath + UUID.randomUUID().toString()); + } + // S3 lists keys in lexicographic order + keysListed.sort(String::compareTo); } @Override @@ -220,11 +235,12 @@ public ListObjectsV2Response next() { throw SdkException.builder().build(); } if (currInvocationCount.getAndIncrement() < totalPageCount) { - String s3ObjectKey = UUID.randomUUID().toString(); - keysListed.add(s3ObjectKey); - return ListObjectsV2Response.builder() - .contents(Collections.nCopies(s3ObjectsPerPage, S3Object.builder().key(s3ObjectKey).size(s3ObjectSize).build())) - .build(); + List s3Objects = new ArrayList<>(); + for (int i = 0; i < s3ObjectsPerPage; i++) { + String s3ObjectKey = keysListed.get((currInvocationCount.get() - 1) * s3ObjectsPerPage + i); + s3Objects.add(S3Object.builder().key(s3ObjectKey).size(s3ObjectSize).build()); + } + return ListObjectsV2Response.builder().contents(s3Objects).build(); } throw new NoSuchElementException(); } @@ -232,6 +248,10 @@ public ListObjectsV2Response next() { public List getKeysListed() { return keysListed; } + + public int numberOfPagesFetched() { + return currInvocationCount.get(); + } } public void testDelete() throws IOException { @@ -273,10 +293,8 @@ public void testDelete() throws IOException { // keysDeleted will have blobPath also assertEquals(listObjectsV2ResponseIterator.getKeysListed().size(), keysDeleted.size() - 1); assertTrue(keysDeleted.contains(blobPath.buildAsString())); - assertArrayEquals( - listObjectsV2ResponseIterator.getKeysListed().toArray(String[]::new), - keysDeleted.stream().filter(key -> !blobPath.buildAsString().equals(key)).toArray(String[]::new) - ); + keysDeleted.remove(blobPath.buildAsString()); + assertEquals(new HashSet<>(listObjectsV2ResponseIterator.getKeysListed()), new HashSet<>(keysDeleted)); } public void testDeleteItemLevelErrorsDuringDelete() { @@ -772,4 +790,112 @@ private static void assertNumberOfMultiparts(final int expectedParts, final long assertEquals("Expected number of parts [" + expectedParts + "] but got [" + result.v1() + "]", expectedParts, (long) result.v1()); assertEquals("Expected remaining [" + expectedRemaining + "] but got [" + result.v2() + "]", expectedRemaining, (long) result.v2()); } + + public void testListBlobsByPrefix() throws IOException { + final S3BlobStore blobStore = mock(S3BlobStore.class); + when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + + final S3Client client = mock(S3Client.class); + final AmazonS3Reference clientReference = new AmazonS3Reference(client); + when(blobStore.clientReference()).thenReturn(clientReference); + + BlobPath blobPath = mock(BlobPath.class); + when(blobPath.buildAsString()).thenReturn("/dummy/path"); + final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); + + final ListObjectsV2Iterable listObjectsV2Iterable = mock(ListObjectsV2Iterable.class); + when(client.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listObjectsV2Iterable); + + MockListObjectsV2ResponseIterator iterator = new MockListObjectsV2ResponseIterator(2, 5, 100); + when(listObjectsV2Iterable.iterator()).thenReturn(iterator); + + Map listOfBlobs = blobContainer.listBlobsByPrefix(null); + assertEquals(10, listOfBlobs.size()); + + Set keys = iterator.keysListed.stream() + .map(s -> s.substring(blobPath.buildAsString().length())) + .collect(Collectors.toSet()); + assertEquals(keys, listOfBlobs.keySet()); + } + + private void testListBlobsByPrefixInLexicographicOrder( + int limit, + int expectedNumberofPagesFetched, + BlobContainer.BlobNameSortOrder blobNameSortOrder + ) throws IOException { + final S3BlobStore blobStore = mock(S3BlobStore.class); + when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + + final S3Client client = mock(S3Client.class); + final AmazonS3Reference clientReference = new AmazonS3Reference(client); + when(blobStore.clientReference()).thenReturn(clientReference); + + BlobPath blobPath = mock(BlobPath.class); + when(blobPath.buildAsString()).thenReturn("/dummy/path"); + final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); + + final ListObjectsV2Iterable listObjectsV2Iterable = mock(ListObjectsV2Iterable.class); + when(client.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listObjectsV2Iterable); + + final MockListObjectsV2ResponseIterator iterator = new MockListObjectsV2ResponseIterator(2, 5, 100, blobPath.buildAsString()); + when(listObjectsV2Iterable.iterator()).thenReturn(iterator); + + if (limit >= 0) { + blobContainer.listBlobsByPrefixInSortedOrder(null, limit, blobNameSortOrder, new ActionListener<>() { + @Override + public void onResponse(List blobMetadata) { + int actualLimit = Math.max(0, Math.min(limit, 10)); + assertEquals(actualLimit, blobMetadata.size()); + + List keys = iterator.keysListed.stream() + .map(s -> s.substring(blobPath.buildAsString().length())) + .collect(Collectors.toList()); + Comparator keysComparator = String::compareTo; + if (blobNameSortOrder != BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC) { + keysComparator = Collections.reverseOrder(String::compareTo); + } + keys.sort(keysComparator); + List sortedKeys = keys.subList(0, actualLimit); + assertEquals(sortedKeys, blobMetadata.stream().map(BlobMetadata::name).collect(Collectors.toList())); + assertEquals(expectedNumberofPagesFetched, iterator.numberOfPagesFetched()); + } + + @Override + public void onFailure(Exception e) { + fail("blobContainer.listBlobsByPrefixInLexicographicOrder failed with exception: " + e.getMessage()); + } + }); + } else { + assertThrows( + IllegalArgumentException.class, + () -> blobContainer.listBlobsByPrefixInSortedOrder(null, limit, blobNameSortOrder, new ActionListener<>() { + @Override + public void onResponse(List blobMetadata) {} + + @Override + public void onFailure(Exception e) {} + }) + ); + } + } + + public void testListBlobsByPrefixInLexicographicOrderWithNegativeLimit() throws IOException { + testListBlobsByPrefixInLexicographicOrder(-5, 0, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC); + } + + public void testListBlobsByPrefixInLexicographicOrderWithZeroLimit() throws IOException { + testListBlobsByPrefixInLexicographicOrder(0, 1, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC); + } + + public void testListBlobsByPrefixInLexicographicOrderWithLimitLessThanPageSize() throws IOException { + testListBlobsByPrefixInLexicographicOrder(2, 1, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC); + } + + public void testListBlobsByPrefixInLexicographicOrderWithLimitGreaterThanPageSize() throws IOException { + testListBlobsByPrefixInLexicographicOrder(8, 2, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC); + } + + public void testListBlobsByPrefixInLexicographicOrderWithLimitGreaterThanNumberOfRecords() throws IOException { + testListBlobsByPrefixInLexicographicOrder(12, 2, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC); + } } diff --git a/plugins/telemetry-otel/build.gradle b/plugins/telemetry-otel/build.gradle index 7a56621be5f1e..2c275388cce38 100644 --- a/plugins/telemetry-otel/build.gradle +++ b/plugins/telemetry-otel/build.gradle @@ -54,3 +54,25 @@ thirdPartyAudit { 'io.opentelemetry.sdk.autoconfigure.spi.traces.ConfigurableSpanExporterProvider' ) } + +tasks.named("bundlePlugin").configure { + from('config/telemetry-otel') { + into 'config' + } +} + +tasks.register("writeTestJavaPolicy") { + doLast { + final File tmp = file("${buildDir}/tmp") + if (tmp.exists() == false && tmp.mkdirs() == false) { + throw new GradleException("failed to create temporary directory [${tmp}]") + } + final File javaPolicy = file("${tmp}/java.policy") + javaPolicy.write( + [ + "grant {", + " permission java.io.FilePermission \"config\", \"read\";", + "};" + ].join("\n")) + } +} diff --git a/plugins/telemetry-otel/config/telemetry-otel/log4j2.properties b/plugins/telemetry-otel/config/telemetry-otel/log4j2.properties new file mode 100644 index 0000000000000..544f42bd5513b --- /dev/null +++ b/plugins/telemetry-otel/config/telemetry-otel/log4j2.properties @@ -0,0 +1,27 @@ +# +# SPDX-License-Identifier: Apache-2.0 +# +# The OpenSearch Contributors require contributions made to +# this file be licensed under the Apache-2.0 license or a +# compatible open source license. +# + + +appender.tracing.type = RollingFile +appender.tracing.name = tracing +appender.tracing.fileName = ${sys:opensearch.logs.base_path}${sys:file.separator}${sys:opensearch.logs.cluster_name}_otel_traces.log +appender.tracing.filePermissions = rw-r----- +appender.tracing.layout.type = PatternLayout +appender.tracing.layout.pattern = %m%n +appender.tracing.filePattern = ${sys:opensearch.logs.base_path}${sys:file.separator}${sys:opensearch.logs.cluster_name}_otel_traces-%i.log.gz +appender.tracing.policies.type = Policies +appender.tracing.policies.size.type = SizeBasedTriggeringPolicy +appender.tracing.policies.size.size = 1GB +appender.tracing.strategy.type = DefaultRolloverStrategy +appender.tracing.strategy.max = 4 + + +logger.exporter.name = io.opentelemetry.exporter.logging.LoggingSpanExporter +logger.exporter.level = INFO +logger.exporter.appenderRef.tracing.ref = tracing +logger.exporter.additivity = false diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java index 04bade9ec942a..292165979c2f2 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelResourceProvider.java @@ -42,7 +42,7 @@ private OTelResourceProvider() {} public static OpenTelemetry get(Settings settings) { return get( settings, - new LoggingSpanExporter(), + LoggingSpanExporter.create(), ContextPropagators.create(W3CTraceContextPropagator.getInstance()), Sampler.alwaysOn() ); diff --git a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelSpan.java b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelSpan.java index 23a2d9baa3e6e..ba63df4ae47a1 100644 --- a/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelSpan.java +++ b/plugins/telemetry-otel/src/main/java/org/opensearch/telemetry/tracing/OTelSpan.java @@ -9,6 +9,7 @@ package org.opensearch.telemetry.tracing; import io.opentelemetry.api.trace.Span; +import io.opentelemetry.api.trace.StatusCode; /** * Default implementation of {@link Span} using Otel span. It keeps a reference of OpenTelemetry Span and handles span @@ -48,6 +49,11 @@ public void addAttribute(String key, Boolean value) { delegateSpan.setAttribute(key, value); } + @Override + public void setError(Exception exception) { + delegateSpan.setStatus(StatusCode.ERROR, exception.getMessage()); + } + @Override public void addEvent(String event) { delegateSpan.addEvent(event); diff --git a/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java index b867b90af333c..f1f469544f634 100644 --- a/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java +++ b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java @@ -113,8 +113,6 @@ private void printClusterRouting() throws IOException, ParseException { /** * This test verifies that segment replication does not break when primary shards are on lower OS version. It does this * by verifying replica shards contains same number of documents as primary's. - * - * @throws Exception */ public void testIndexingWithPrimaryOnBwcNodes() throws Exception { if (UPGRADE_FROM_VERSION.before(Version.V_2_4_0)) { @@ -164,8 +162,6 @@ public void testIndexingWithPrimaryOnBwcNodes() throws Exception { * This test creates a cluster with primary on higher version but due to {@link org.opensearch.cluster.routing.allocation.decider.NodeVersionAllocationDecider}; * replica shard allocation on lower OpenSearch version is prevented. Thus, this test though cover the use case where * primary shard containing nodes are running on higher OS version while replicas are unassigned. - * - * @throws Exception */ public void testIndexingWithReplicaOnBwcNodes() throws Exception { if (UPGRADE_FROM_VERSION.before(Version.V_2_4_0)) { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/100_partial_flat_object.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/100_partial_flat_object.yml new file mode 100644 index 0000000000000..91e4127da9c32 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/100_partial_flat_object.yml @@ -0,0 +1,606 @@ +--- +# The test setup includes: +# - Create flat_object mapping for test_partial_flat_object index +# - Index two example documents +# - Refresh the index so it is ready for search tests + +setup: + - do: + indices.create: + index: test_partial_flat_object + body: + mappings: + properties: + issue: + properties: + number: + type: "integer" + labels: + type: "flat_object" + - do: + index: + index: test_partial_flat_object + id: 1 + body: { + "issue": { + "number": 123, + "labels": { + "version": "2.2", + "backport": [ + "2.0", + "1.9" + ], + "category": { + "type": "API", + "level": "bug" + }, + "createdDate": "2023-01-01", + "comment": [ [ "Doe","Shipped" ],[ "John","Approved" ] ], + "views": 288, + "priority": 5.00 + } + } + } + + - do: + index: + index: test_partial_flat_object + id: 2 + body: { + "issue": { + "number": 456, + "labels": { + "author": "Liu", + "version": "2.1", + "backport": [ + "2.0", + "1.3" + ], + "category": { + "type": "API", + "level": "enhancement" + }, + "createdDate": "2023-02-01", + "comment": [ [ "Mike","LGTM" ],[ "John","Approved" ] ], + "views": 3333, + "priority": 1.50 + } + } + } + + - do: + index: + index: test_partial_flat_object + id: 3 + body: { + "issue": { + "number": 999, + "labels": [ { + "version": "1.1", + "backport": [ + "1.0", + "0.9" + ], + "category": { + "type": "Module", + "level": "feature" + } + } ] + } + } + + - do: + indices.refresh: + index: test_partial_flat_object +--- +# Delete Index when connection is teardown +teardown: + - do: + indices.delete: + index: test_partial_flat_object + + +--- +# Verify that mappings under the catalog field did not expand +# and no dynamic fields were created. +"Mappings": + - skip: + version: " - 2.99.99" + reason: "flat_object is introduced in 3.0.0 in main branch" + + - do: + indices.get_mapping: + index: test_partial_flat_object + - is_true: test_partial_flat_object.mappings + - match: { test_partial_flat_object.mappings.properties.issue.properties.number.type: integer } + - match: { test_partial_flat_object.mappings.properties.issue.properties.labels.type: flat_object } + # https://github.com/opensearch-project/OpenSearch/tree/main/rest-api-spec/src/main/resources/rest-api-spec/test#length + - length: { test_partial_flat_object.mappings.properties.issue.properties: 2 } + - length: { test_partial_flat_object.mappings.properties.issue.properties.labels: 1 } + + +--- +"Supported queries": + - skip: + version: " - 2.99.99" + reason: "flat_object is introduced in 3.0.0 in main branch" + + + # Verify Document Count + - do: + search: + body: { + query: { + match_all: { } + } + } + + - length: { hits.hits: 3 } + + # Match Query with exact dot path. + - do: + search: + body: { + _source: true, + query: { + match: { "issue.labels.version": "2.1" } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.issue.labels.version: "2.1" } + + # Match Query without exact dot path. + - do: + search: + body: { + _source: true, + query: { + match: { issue.labels: "2.1" } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.issue.labels.version: "2.1" } + + # Multi Match Query with exact dot path. + - do: + search: + body: { + _source: true, + query: { + multi_match: { + "query": "2.0", + "fields": [ "issue.labels.version", "issue.labels.backport" ] + } + } + } + + - length: { hits.hits: 2 } + - match: { hits.hits.0._source.issue.labels.backport: [ "2.0", "1.9" ] } + - match: { hits.hits.1._source.issue.labels.backport: [ "2.0", "1.3" ] } + + # Term Query1 with exact dot path for date + - do: + search: + body: { + _source: true, + query: { + term: { issue.labels.createdDate: "2023-01-01" } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.issue.labels.createdDate: "2023-01-01" } + + # Term Query1 without exact dot path for date + - do: + search: + body: { + _source: true, + query: { + term: { issue.labels: "2023-01-01" } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.issue.labels.createdDate: "2023-01-01" } + + + # Term Query2 with dot path for string + - do: + search: + body: { + _source: true, + query: { + term: { "issue.labels.category.type": "API" } + } + } + + - length: { hits.hits: 2 } + - match: { hits.hits.0._source.issue.labels.category.type: "API" } + - match: { hits.hits.1._source.issue.labels.category.type: "API" } + + # Term Query2 without exact dot path. + - do: + search: + body: { + _source: true, + query: { + term: { issue.labels: "API" } + } + } + + - length: { hits.hits: 2 } + - match: { hits.hits.0._source.issue.labels.category.type: "API" } + - match: { hits.hits.1._source.issue.labels.category.type: "API" } + + # Term Query3 with dot path for array + - do: + search: + body: { + _source: true, + query: { + term: { issue.labels.backport: "1.9" } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.issue.labels.backport: [ "2.0", "1.9" ] } + + # Term Query3 without dot path for array + - do: + search: + body: { + _source: true, + query: { + term: { issue.labels: "1.9" } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.issue.labels.backport: [ "2.0", "1.9" ] } + + # Term Query4 with dot path for nested-array + - do: + search: + body: { + _source: true, + query: { + term: { issue.labels.comment: "LGTM" } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.issue.labels.comment: [ [ "Mike","LGTM" ],[ "John","Approved" ] ] } + + # Term Query4 without dot path for nested-array + - do: + search: + body: { + _source: true, + query: { + term: { issue.labels: "LGTM" } + } + } + + # Term Query5 with dot path for array + - do: + search: + body: { + _source: true, + query: { + term: { issue.labels.category.type: "Module" } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.issue.labels.0.category.type: "Module" } + + # Term Query5 without dot path for array + - do: + search: + body: { + _source: true, + query: { + term: { issue.labels: "Module" } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.issue.labels.0.category.type: "Module" } + + # Terms Query without dot path. + - do: + search: + body: { + _source: true, + query: { + terms: { issue.labels: [ "John","Mike" ] } + } + } + + - length: { hits.hits: 2 } + - match: { hits.hits.0._source.issue.labels.comment: [ [ "Doe","Shipped" ],[ "John","Approved" ] ] } + - match: { hits.hits.1._source.issue.labels.comment: [ [ "Mike","LGTM" ],[ "John","Approved" ] ] } + + # Terms Query with dot path. + - do: + search: + body: { + _source: true, + query: { + terms: { issue.labels.comment: [ "John","Mike" ] } + } + } + + - length: { hits.hits: 2 } + - match: { hits.hits.0._source.issue.labels.comment: [ [ "Doe","Shipped" ],[ "John","Approved" ] ] } + - match: { hits.hits.1._source.issue.labels.comment: [ [ "Mike","LGTM" ],[ "John","Approved" ] ] } + + # Prefix Query with dot path. + - do: + search: + body: { + _source: true, + query: { + "prefix": { + "issue.labels.comment": { + "value": "Mi" + } + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.issue.labels.comment: [ [ "Mike","LGTM" ],[ "John","Approved" ] ] } + + # Prefix Query without dot path. + - do: + search: + body: { + _source: true, + query: { + "prefix": { + "issue.labels": { + "value": "Mi" + } + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.issue.labels.comment: [ [ "Mike","LGTM" ],[ "John","Approved" ] ] } + + # Range Query with dot path. + - do: + search: + body: { + _source: true, + query: { + "range": { + "issue.labels.version": { + "gte": "2.1", + "lte": "3.0" + } + } + } + } + + - length: { hits.hits: 2 } + - match: { hits.hits.0._source.issue.labels.version: "2.2" } + - match: { hits.hits.1._source.issue.labels.version: "2.1" } + + # Range Query without dot path. + - do: + search: + body: { + _source: true, + query: { + "range": { + "issue.labels": { + "gte": "2.1", + "lte": "3.0" + } + } + } + } + + - length: { hits.hits: 2 } + - match: { hits.hits.0._source.issue.labels.version: "2.2" } + - match: { hits.hits.1._source.issue.labels.version: "2.1" } + + # Range Query with integer input with dot path. + - do: + search: + body: { + _source: true, + query: { + "range": { + "issue.labels.views": { + "gte": 3000, + "lte": 4000 + } + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.issue.labels.views: 3333 } + + # Range Query with integer input without dot path. + - do: + search: + body: { + _source: true, + query: { + "range": { + "issue.labels": { + "gte": 3000, + "lte": 4000 + } + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.issue.labels.views: 3333 } + + + # Range Query with double input with dot path. + - do: + search: + body: { + _source: true, + query: { + "range": { + "issue.labels.priority": { + "gte": 4.1234, + "lte": 5.1234 + } + } + } + } + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.issue.labels.priority: 5.00 } + + # Range Query with double input without dot path. + - do: + search: + body: { + _source: true, + query: { + "range": { + "issue.labels": { + "gte": 4.1234, + "lte": 5.1234 + } + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.issue.labels.priority: 5.00 } + + + # Exists Query with dot path. + - do: + search: + body: { + _source: true, + query: { + "exists": { + "field": issue.labels.priority + } + } + } + + - length: { hits.hits: 2 } + + # Exists Query with nested dot path, use the flat_object_field_name.last_key + - do: + search: + body: { + _source: true, + query: { + "exists": { + "field": issue.labels.type + } + } + } + + - length: { hits.hits: 3 } + + # Exists Query without dot path for the flat_object_field_name + - do: + search: + body: { + _source: true, + query: { + "exists": { + "field": issue.labels + } + } + } + + - length: { hits.hits: 3 } + + # Exists Query2 with dot path for one hit + - do: + search: + body: { + _source: true, + query: { + "exists": { + "field": issue.labels.author + } + } + } + + - length: { hits.hits: 1 } + + # Query_string Query without dot path. + - do: + search: + body: { + _source: true, + query: { + "query_string": { + "fields": [ "issue.labels" ], + "query": "Doe OR Mike" + } + } + } + + - length: { hits.hits: 2 } + - match: { hits.hits.0._source.issue.labels.comment: [ [ "Doe","Shipped" ],[ "John","Approved" ] ] } + - match: { hits.hits.1._source.issue.labels.comment: [ [ "Mike","LGTM" ],[ "John","Approved" ] ] } + + # Query_string Query with dot path. + - do: + search: + body: { + _source: true, + query: { + "query_string": { + "fields": [ "issue.labels.comment" ], + "query": "Doe OR Mike" + } + } + } + + - length: { hits.hits: 2 } + - match: { hits.hits.0._source.issue.labels.comment: [ [ "Doe","Shipped" ],[ "John","Approved" ] ] } + - match: { hits.hits.1._source.issue.labels.comment: [ [ "Mike","LGTM" ],[ "John","Approved" ] ] } + + # Simple_query_string Query without full dot path. + - do: + search: + body: { + _source: true, + query: { + "simple_query_string": { + "query": "Doe", + "fields": [ "issue.labels" ] + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.issue.labels.comment: [ [ "Doe","Shipped" ],[ "John","Approved" ] ] } + + # Simple_query_string Query with dot path. + - do: + search: + body: { + _source: true, + query: { + "simple_query_string": { + "query": "Doe", + "fields": [ "issue.labels.comment" ] + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.issue.labels.comment: [ [ "Doe","Shipped" ],[ "John","Approved" ] ] } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/105_partial_flat_object_nested.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/105_partial_flat_object_nested.yml new file mode 100644 index 0000000000000..ce172c2773e1f --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/105_partial_flat_object_nested.yml @@ -0,0 +1,636 @@ +--- +# The test setup includes: +# - Create flat_object mapping for test_partial_flat_object_nested index +# - Index two example documents +# - Refresh the index so it is ready for search tests + +setup: + - do: + indices.create: + index: test_partial_flat_object_nested + body: + mappings: + properties: + issue: + type: "nested" + properties: + number: + type: "integer" + labels: + type: "flat_object" + - do: + index: + index: test_partial_flat_object_nested + id: 1 + body: { + "issue": [ + { + "number": 123, + "labels": { + "version": "2.2", + "backport": [ + "2.0", + "1.9" + ], + "category": { + "type": "API", + "level": "bug" + }, + "createdDate": "2023-01-01", + "comment": [ [ "Doe","Shipped" ],[ "John","Approved" ] ], + "views": 288, + "priority": 5.00 + } + } + ] + } + + - do: + index: + index: test_partial_flat_object_nested + id: 2 + body: { + "issue": [ + { + "number": 456, + "labels": { + "author": "Liu", + "version": "2.1", + "backport": [ + "2.0", + "1.3" + ], + "category": { + "type": "API", + "level": "enhancement" + }, + "createdDate": "2023-02-01", + "comment": [ [ "Mike","LGTM" ],[ "John","Approved" ] ], + "views": 3333, + "priority": 1.50 + } + } + ] + } + + - do: + indices.refresh: + index: test_partial_flat_object_nested +--- +# Delete Index when connection is teardown +teardown: + - do: + indices.delete: + index: test_partial_flat_object_nested + + +--- +# Verify that mappings under the catalog field did not expand +# and no dynamic fields were created. +"Mappings": + - skip: + version: " - 2.99.99" + reason: "flat_object is introduced in 3.0.0 in main branch" + + - do: + indices.get_mapping: + index: test_partial_flat_object_nested + - is_true: test_partial_flat_object_nested.mappings + - match: { test_partial_flat_object_nested.mappings.properties.issue.properties.number.type: integer } + - match: { test_partial_flat_object_nested.mappings.properties.issue.properties.labels.type: flat_object } + # https://github.com/opensearch-project/OpenSearch/tree/main/rest-api-spec/src/main/resources/rest-api-spec/test#length + - length: { test_partial_flat_object_nested.mappings.properties.issue.properties: 2 } + - length: { test_partial_flat_object_nested.mappings.properties.issue.properties.labels: 1 } + + +--- +"Supported queries": + - skip: + version: " - 2.99.99" + reason: "flat_object is introduced in 3.0.0 in main branch" + + + # Verify Document Count + - do: + search: + body: { + query: { + match_all: { } + } + } + + - length: { hits.hits: 2 } + + # Match Query with exact dot path. + - do: + search: + body: { + _source: true, + query: { + nested: { + path: "issue", + query: { + match: { "issue.labels.version": "2.1" } + } + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.issue.0.labels.version: "2.1" } + + # Match Query without exact dot path. + - do: + search: + body: { + _source: true, + query: { + nested: { + path: "issue", + query: { + match: { "issue.labels": "2.1" } + } + } + } + } + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.issue.0.labels.version: "2.1" } + + # Term Query1 with exact dot path for date + - do: + search: + body: { + _source: true, + query: { + nested: { + path: "issue", + query: { + term: { issue.labels.createdDate: "2023-01-01" } + } + } } } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.issue.0.labels.createdDate: "2023-01-01" } + + # Term Query1 without exact dot path for date + - do: + search: + body: { + _source: true, + query: { + nested: { + path: "issue", + query: { + term: { issue.labels: "2023-01-01" } + } } } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.issue.0.labels.createdDate: "2023-01-01" } + + # Term Query2 with dot path for string + - do: + search: + body: { + _source: true, + query: { + nested: { + path: "issue", + query: { + term: { "issue.labels.category.type": "API" } + } } } + } + + - length: { hits.hits: 2 } + - match: { hits.hits.0._source.issue.0.labels.category.type: "API" } + - match: { hits.hits.1._source.issue.0.labels.category.type: "API" } + + # Term Query2 without exact dot path. + - do: + search: + body: { + _source: true, + query: { + nested: { + path: "issue", + query: { + term: { issue.labels: "API" } + } } } + } + + - length: { hits.hits: 2 } + - match: { hits.hits.0._source.issue.0.labels.category.type: "API" } + - match: { hits.hits.1._source.issue.0.labels.category.type: "API" } + + # Term Query3 with dot path for array + - do: + search: + body: { + _source: true, + query: { + nested: { + path: "issue", + query: { + term: { issue.labels.backport: "1.9" } + } } } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.issue.0.labels.backport: [ "2.0", "1.9" ] } + + # Term Query3 without dot path for array + - do: + search: + body: { + _source: true, + query: { + nested: { + path: "issue", + query: { + term: { issue.labels: "1.9" } + } } } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.issue.0.labels.backport: [ "2.0", "1.9" ] } + + # Term Query4 with dot path for nested-array + - do: + search: + body: { + _source: true, + query: { + nested: { + path: "issue", + query: { + term: { issue.labels.comment: "LGTM" } + } } } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.issue.0.labels.comment: [ [ "Mike","LGTM" ],[ "John","Approved" ] ] } + + # Term Query4 without dot path for nested-array + - do: + search: + body: { + _source: true, + query: { + nested: { + path: "issue", + query: { + term: { issue.labels: "LGTM" } } } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.issue.0.labels.comment: [ [ "Mike","LGTM" ],[ "John","Approved" ] ] } + + # Terms Query without dot path. + - do: + search: + body: { + _source: true, + query: { + nested: { + path: "issue", + query: { + terms: { issue.labels: [ "John","Mike" ] } } } + } + } + + - length: { hits.hits: 2 } + - match: { hits.hits.0._source.issue.0.labels.comment: [ [ "Doe","Shipped" ],[ "John","Approved" ] ] } + - match: { hits.hits.1._source.issue.0.labels.comment: [ [ "Mike","LGTM" ],[ "John","Approved" ] ] } + + # Terms Query with dot path. + - do: + search: + body: { + _source: true, + query: { + nested: { + path: "issue", + query: { + terms: { issue.labels.comment: [ "John","Mike" ] } } } + } + } + + - length: { hits.hits: 2 } + - match: { hits.hits.0._source.issue.0.labels.comment: [ [ "Doe","Shipped" ],[ "John","Approved" ] ] } + - match: { hits.hits.1._source.issue.0.labels.comment: [ [ "Mike","LGTM" ],[ "John","Approved" ] ] } + + # Prefix Query with dot path. + - do: + search: + body: { + _source: true, + query: { + nested: { + path: "issue", + query: { + "prefix": { + "issue.labels.comment": { + "value": "Mi" + } } } + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.issue.0.labels.comment: [ [ "Mike","LGTM" ],[ "John","Approved" ] ] } + + # Prefix Query without dot path. + - do: + search: + body: { + _source: true, + query: { + nested: { + path: "issue", + query: { + "prefix": { + "issue.labels": { + "value": "Mi" + } + } + } } } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.issue.0.labels.comment: [ [ "Mike","LGTM" ],[ "John","Approved" ] ] } + + # Range Query with dot path. + - do: + search: + body: { + _source: true, + query: { + nested: { + path: "issue", + query: { + "range": { + "issue.labels.version": { + "gte": "2.1", + "lte": "3.0" + } } } + } + } + } + + - length: { hits.hits: 2 } + - match: { hits.hits.0._source.issue.0.labels.version: "2.2" } + - match: { hits.hits.1._source.issue.0.labels.version: "2.1" } + + # Range Query without dot path. + - do: + search: + body: { + _source: true, + query: { + nested: { + path: "issue", + query: { + "range": { + "issue.labels": { + "gte": "2.1", + "lte": "3.0" + } } } + } + } + } + + - length: { hits.hits: 2 } + - match: { hits.hits.0._source.issue.0.labels.version: "2.2" } + - match: { hits.hits.1._source.issue.0.labels.version: "2.1" } + + # Range Query with integer input with dot path. + - do: + search: + body: { + _source: true, + query: { + nested: { + path: "issue", + query: { + "range": { + "issue.labels.views": { + "gte": 3000, + "lte": 4000 + } } } + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.issue.0.labels.views: 3333 } + + # Range Query with integer input without dot path. + - do: + search: + body: { + _source: true, + query: { + nested: { + path: "issue", + query: { + "range": { + "issue.labels": { + "gte": 3000, + "lte": 4000 + } } } + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.issue.0.labels.views: 3333 } + + + # Range Query with double input with dot path. + - do: + search: + body: { + _source: true, + query: { + nested: { + path: "issue", + query: { + "range": { + "issue.labels.priority": { + "gte": 4.1234, + "lte": 5.1234 + } } } + } + } + } + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.issue.0.labels.priority: 5.00 } + + # Range Query with double input without dot path. + - do: + search: + body: { + _source: true, + query: { + nested: { + path: "issue", + query: { + "range": { + "issue.labels": { + "gte": 4.1234, + "lte": 5.1234 + } } } + } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.issue.0.labels.priority: 5.00 } + + + # Exists Query with dot path. + - do: + search: + body: { + _source: true, + query: { + nested: { + path: "issue", + query: { + "exists": { + "field": issue.labels.priority + } } } + } + } + + - length: { hits.hits: 2 } + + # Exists Query with nested dot path, use the flat_object_field_name.last_key + - do: + search: + body: { + _source: true, + query: { + nested: { + path: "issue", + query: { + "exists": { + "field": issue.labels.type + } } } + } + } + + - length: { hits.hits: 2 } + + # Exists Query without dot path for the flat_object_field_name + - do: + search: + body: { + _source: true, + query: { + nested: { + path: "issue", + query: { + "exists": { + "field": issue.labels + } } } + } + } + + - length: { hits.hits: 2 } + + # Exists Query2 with dot path for one hit + - do: + search: + body: { + _source: true, + query: { + nested: { + path: "issue", + query: { + "exists": { + "field": issue.labels.author + } } } + } + } + + - length: { hits.hits: 1 } + + # Query_string Query without dot path. + - do: + search: + body: { + _source: true, + query: { + nested: { + path: "issue", + query: { + "query_string": { + "fields": [ "issue.labels" ], + "query": "Doe OR Mike" + } } } + } + } + + - length: { hits.hits: 2 } + - match: { hits.hits.0._source.issue.0.labels.comment: [ [ "Doe","Shipped" ],[ "John","Approved" ] ] } + - match: { hits.hits.1._source.issue.0.labels.comment: [ [ "Mike","LGTM" ],[ "John","Approved" ] ] } + + # Query_string Query with dot path. + - do: + search: + body: { + _source: true, + query: { + nested: { + path: "issue", + query: { + "query_string": { + "fields": [ "issue.labels.comment" ], + "query": "Doe OR Mike" + } } } + } + } + + - length: { hits.hits: 2 } + - match: { hits.hits.0._source.issue.0.labels.comment: [ [ "Doe","Shipped" ],[ "John","Approved" ] ] } + - match: { hits.hits.1._source.issue.0.labels.comment: [ [ "Mike","LGTM" ],[ "John","Approved" ] ] } + + # Simple_query_string Query without full dot path. + - do: + search: + body: { + _source: true, + query: { + nested: { + path: "issue", + query: { + "simple_query_string": { + "query": "Doe", + "fields": [ "issue.labels" ] + } } } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.issue.0.labels.comment: [ [ "Doe","Shipped" ],[ "John","Approved" ] ] } + + # Simple_query_string Query with dot path. + - do: + search: + body: { + _source: true, + query: { + nested: { + path: "issue", + query: { + "simple_query_string": { + "query": "Doe", + "fields": [ "issue.labels.comment" ] + } } } + } + } + + - length: { hits.hits: 1 } + - match: { hits.hits.0._source.issue.0.labels.comment: [ [ "Doe","Shipped" ],[ "John","Approved" ] ] } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml index 65c1527a68b96..55e1566656faf 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml @@ -160,6 +160,28 @@ - match: {hits.hits.0._source.timestamp: "2019-10-21 00:30:04.828" } - match: {hits.hits.0.sort: [1571617804828] } + # search_after with the sort with missing + - do: + bulk: + refresh: true + index: test + body: | + {"index":{}} + {"timestamp": null} + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 5 + "sort": [ { "timestamp": { "order": "asc", "missing": "_last" } } ] + search_after: [ "2021-10-21 08:30:04.828" ] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 3 } + - length: { hits.hits: 1 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.timestamp: null } + --- "date_nanos": - skip: @@ -276,3 +298,25 @@ - match: {hits.hits.0._index: test } - match: {hits.hits.0._source.population: 15223372036854775800 } - match: {hits.hits.0.sort: [15223372036854775800] } + + # search_after with the sort with missing + - do: + bulk: + refresh: true + index: test + body: | + {"index":{}} + {"population": null} + - do: + search: + index: test + rest_total_hits_as_int: true + body: + "size": 5 + "sort": [ { "population": { "order": "asc", "missing": "_last" } } ] + search_after: [15223372036854775801] # making it out of min/max so only missing value hit is qualified + + - match: { hits.total: 3 } + - length: { hits.hits: 1 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0._source.population: null } diff --git a/server/build.gradle b/server/build.gradle index ab67eabe76d0c..38bbf020d860b 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -46,7 +46,9 @@ publishing { } } -archivesBaseName = 'opensearch' +base { + archivesBaseName = 'opensearch' +} sourceSets { main { diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java index cbaf70c325a57..b6ea3a094f496 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationAllocationIT.java @@ -233,24 +233,30 @@ private void verifyPerIndexPrimaryBalance() throws Exception { RoutingNodes nodes = currentState.getRoutingNodes(); for (final Map.Entry index : currentState.getRoutingTable().indicesRouting().entrySet()) { final int totalPrimaryShards = index.getValue().primaryShardsActive(); - final int avgPrimaryShardsPerNode = (int) Math.floor(totalPrimaryShards * 1f / currentState.getRoutingNodes().size()); + final int lowerBoundPrimaryShardsPerNode = (int) Math.floor(totalPrimaryShards * 1f / currentState.getRoutingNodes().size()) + - 1; + final int upperBoundPrimaryShardsPerNode = (int) Math.ceil(totalPrimaryShards * 1f / currentState.getRoutingNodes().size()) + + 1; for (RoutingNode node : nodes) { final int primaryCount = node.shardsWithState(index.getKey(), STARTED) .stream() .filter(ShardRouting::primary) .collect(Collectors.toList()) .size(); - if (primaryCount > avgPrimaryShardsPerNode) { - logger.info( - "--> Primary shard balance assertion failure for index {} on node {} {} <= {}", - index.getKey(), - node.node().getName(), - primaryCount, - avgPrimaryShardsPerNode - ); - } // Asserts value is within the variance threshold (-1/+1 of the average value). - assertTrue(avgPrimaryShardsPerNode - 1 <= primaryCount && primaryCount <= avgPrimaryShardsPerNode + 1); + assertTrue( + "--> Primary balance assertion failure for index " + + index + + "on node " + + node.node().getName() + + " " + + lowerBoundPrimaryShardsPerNode + + " <= " + + primaryCount + + " (assigned) <= " + + upperBoundPrimaryShardsPerNode, + lowerBoundPrimaryShardsPerNode <= primaryCount && primaryCount <= upperBoundPrimaryShardsPerNode + ); } } }, 60, TimeUnit.SECONDS); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java index 57578cdbfa8e8..c1326c1b50c9e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationClusterSettingIT.java @@ -15,16 +15,8 @@ import org.opensearch.index.Index; import org.opensearch.index.IndexModule; import org.opensearch.indices.IndicesService; -import org.opensearch.indices.SystemIndexDescriptor; import org.opensearch.indices.replication.common.ReplicationType; -import org.opensearch.plugins.Plugin; -import org.opensearch.plugins.SystemIndexPlugin; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.transport.MockTransportService; - -import java.util.Collection; -import java.util.Collections; -import java.util.Arrays; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; import static org.opensearch.indices.IndicesService.CLUSTER_SETTING_REPLICATION_TYPE; @@ -60,40 +52,6 @@ protected Settings nodeSettings(int nodeOrdinal) { .build(); } - public static class TestPlugin extends Plugin implements SystemIndexPlugin { - @Override - public Collection getSystemIndexDescriptors(Settings settings) { - return Collections.singletonList( - new SystemIndexDescriptor(SYSTEM_INDEX_NAME, "System index for [" + getTestClass().getName() + ']') - ); - } - } - - @Override - protected Collection> nodePlugins() { - return Arrays.asList(SegmentReplicationClusterSettingIT.TestPlugin.class, MockTransportService.TestPlugin.class); - } - - public void testSystemIndexWithSegmentReplicationClusterSetting() throws Exception { - - // Starting two nodes with primary and replica shards respectively. - final String primaryNode = internalCluster().startNode(); - createIndex(SYSTEM_INDEX_NAME); - ensureYellowAndNoInitializingShards(SYSTEM_INDEX_NAME); - final String replicaNode = internalCluster().startNode(); - ensureGreen(SYSTEM_INDEX_NAME); - final GetSettingsResponse response = client().admin() - .indices() - .getSettings(new GetSettingsRequest().indices(SYSTEM_INDEX_NAME).includeDefaults(true)) - .actionGet(); - assertEquals(response.getSetting(SYSTEM_INDEX_NAME, SETTING_REPLICATION_TYPE), ReplicationType.DOCUMENT.toString()); - - // Verify index setting isSegRepEnabled is false. - Index index = resolveIndex(SYSTEM_INDEX_NAME); - IndicesService indicesService = internalCluster().getInstance(IndicesService.class, primaryNode); - assertEquals(indicesService.indexService(index).getIndexSettings().isSegRepEnabled(), false); - } - public void testIndexReplicationSettingOverridesSegRepClusterSetting() throws Exception { Settings settings = Settings.builder().put(CLUSTER_SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT).build(); final String ANOTHER_INDEX = "test-index"; @@ -165,28 +123,4 @@ public void testIndexReplicationSettingOverridesDocRepClusterSetting() throws Ex assertEquals(indicesService.indexService(anotherIndex).getIndexSettings().isSegRepEnabled(), false); } - public void testHiddenIndicesWithReplicationStrategyClusterSetting() throws Exception { - final String primaryNode = internalCluster().startNode(); - final String replicaNode = internalCluster().startNode(); - prepareCreate( - INDEX_NAME, - Settings.builder() - // we want to set index as hidden - .put("index.hidden", true) - ).get(); - ensureGreen(INDEX_NAME); - - // Verify that document replication strategy is used for hidden indices. - final GetSettingsResponse response = client().admin() - .indices() - .getSettings(new GetSettingsRequest().indices(INDEX_NAME).includeDefaults(true)) - .actionGet(); - assertEquals(response.getSetting(INDEX_NAME, SETTING_REPLICATION_TYPE), ReplicationType.DOCUMENT.toString()); - - // Verify index setting isSegRepEnabled. - Index index = resolveIndex(INDEX_NAME); - IndicesService indicesService = internalCluster().getInstance(IndicesService.class, primaryNode); - assertEquals(indicesService.indexService(index).getIndexSettings().isSegRepEnabled(), false); - } - } diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java index ce5e0989b622f..ac57c78d20b73 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java @@ -435,6 +435,7 @@ public void testNodeDropWithOngoingReplication() throws Exception { refresh(INDEX_NAME); blockFileCopy.countDown(); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNode)); + ensureYellow(INDEX_NAME); assertBusy(() -> { assertDocCounts(docCount, replicaNode); }); state = client().admin().cluster().prepareState().execute().actionGet().getState(); // replica now promoted as primary should have same allocation id @@ -732,6 +733,7 @@ public void testDropPrimaryDuringReplication() throws Exception { // start another replica. dataNodes.add(internalCluster().startDataOnlyNode()); ensureGreen(INDEX_NAME); + waitForSearchableDocs(initialDocCount, dataNodes); // index another doc and refresh - without this the new replica won't catch up. String docId = String.valueOf(initialDocCount + 1); @@ -799,6 +801,7 @@ public void testReplicaHasDiffFilesThanPrimary() throws Exception { public void testPressureServiceStats() throws Exception { final String primaryNode = internalCluster().startDataOnlyNode(); createIndex(INDEX_NAME); + ensureYellow(INDEX_NAME); final String replicaNode = internalCluster().startDataOnlyNode(); ensureGreen(INDEX_NAME); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexIT.java index 46966e289e75e..fda344acad166 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexIT.java @@ -12,24 +12,15 @@ import org.junit.Before; import org.opensearch.action.admin.indices.get.GetIndexRequest; import org.opensearch.action.admin.indices.get.GetIndexResponse; -import org.opensearch.action.admin.indices.settings.get.GetSettingsRequest; -import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.FeatureFlags; -import org.opensearch.indices.SystemIndexDescriptor; import org.opensearch.index.IndexSettings; import org.opensearch.indices.replication.common.ReplicationType; -import org.opensearch.plugins.Plugin; -import org.opensearch.plugins.SystemIndexPlugin; import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.OpenSearchIntegTestCase; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; - import static org.hamcrest.Matchers.containsString; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_REPOSITORY; @@ -68,20 +59,6 @@ protected Settings nodeSettings(int nodeOriginal) { return builder.build(); } - public static class TestPlugin extends Plugin implements SystemIndexPlugin { - @Override - public Collection getSystemIndexDescriptors(Settings settings) { - return Collections.singletonList( - new SystemIndexDescriptor(SYSTEM_INDEX_NAME, "System index for [" + getTestClass().getName() + ']') - ); - } - } - - @Override - protected Collection> nodePlugins() { - return Arrays.asList(CreateRemoteIndexIT.TestPlugin.class); - } - @Override protected Settings featureFlagSettings() { return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.REMOTE_STORE, "true").build(); @@ -130,35 +107,6 @@ public void testDefaultRemoteStoreNoUserOverride() throws Exception { ); } - private static final String SYSTEM_INDEX_NAME = ".test-system-index"; - - public void testSystemIndexWithRemoteStoreClusterSetting() throws Exception { - createIndex(SYSTEM_INDEX_NAME); - ensureGreen(SYSTEM_INDEX_NAME); - final GetSettingsResponse response = client().admin() - .indices() - .getSettings(new GetSettingsRequest().indices(SYSTEM_INDEX_NAME).includeDefaults(true)) - .actionGet(); - // Verify that Document replication strategy is used - assertEquals(response.getSetting(SYSTEM_INDEX_NAME, SETTING_REPLICATION_TYPE), ReplicationType.DOCUMENT.toString()); - assertEquals(response.getSetting(SYSTEM_INDEX_NAME, SETTING_REMOTE_STORE_ENABLED), "false"); - } - - public void testSystemIndexWithRemoteStoreIndexSettings() throws Exception { - prepareCreate( - SYSTEM_INDEX_NAME, - Settings.builder().put(SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT).put(SETTING_REMOTE_STORE_ENABLED, true) - ).get(); - ensureGreen(SYSTEM_INDEX_NAME); - final GetSettingsResponse response = client().admin() - .indices() - .getSettings(new GetSettingsRequest().indices(SYSTEM_INDEX_NAME).includeDefaults(true)) - .actionGet(); - // Verify that Document replication strategy is used - assertEquals(response.getSetting(SYSTEM_INDEX_NAME, SETTING_REPLICATION_TYPE), ReplicationType.DOCUMENT.toString()); - assertEquals(response.getSetting(SYSTEM_INDEX_NAME, SETTING_REMOTE_STORE_ENABLED), "false"); - } - public void testRemoteStoreDisabledByUser() throws Exception { Settings settings = Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/PrimaryTermValidationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/PrimaryTermValidationIT.java new file mode 100644 index 0000000000000..6691da81f057d --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/PrimaryTermValidationIT.java @@ -0,0 +1,166 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore; + +import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; +import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.opensearch.action.admin.indices.refresh.RefreshResponse; +import org.opensearch.action.index.IndexResponse; +import org.opensearch.action.support.IndicesOptions; +import org.opensearch.cluster.coordination.FollowersChecker; +import org.opensearch.cluster.coordination.LeaderChecker; +import org.opensearch.cluster.health.ClusterHealthStatus; +import org.opensearch.cluster.health.ClusterIndexHealth; +import org.opensearch.common.UUIDs; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.index.shard.ShardNotFoundException; +import org.opensearch.plugins.Plugin; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.disruption.NetworkDisruption; +import org.opensearch.test.transport.MockTransportService; + +import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.hamcrest.Matchers.equalTo; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoFailures; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) + +public class PrimaryTermValidationIT extends RemoteStoreBaseIntegTestCase { + + private static final String INDEX_NAME = "remote-store-test-idx-1"; + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(MockTransportService.TestPlugin.class); + } + + public void testPrimaryTermValidation() throws Exception { + // Follower checker interval is lower compared to leader checker so that the cluster manager can remove the node + // with network partition faster. The follower check retry count is also kept 1. + Settings clusterSettings = Settings.builder() + .put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "1s") + .put(LeaderChecker.LEADER_CHECK_INTERVAL_SETTING.getKey(), "20s") + .put(LeaderChecker.LEADER_CHECK_RETRY_COUNT_SETTING.getKey(), 4) + .put(FollowersChecker.FOLLOWER_CHECK_TIMEOUT_SETTING.getKey(), "1s") + .put(FollowersChecker.FOLLOWER_CHECK_INTERVAL_SETTING.getKey(), "1s") + .put(FollowersChecker.FOLLOWER_CHECK_RETRY_COUNT_SETTING.getKey(), 1) + .build(); + internalCluster().startClusterManagerOnlyNode(clusterSettings); + + // Create repository + absolutePath = randomRepoPath().toAbsolutePath(); + assertAcked( + clusterAdmin().preparePutRepository(REPOSITORY_NAME).setType("fs").setSettings(Settings.builder().put("location", absolutePath)) + ); + + // Start data nodes and create index + internalCluster().startDataOnlyNodes(2, clusterSettings); + createIndex(INDEX_NAME, remoteTranslogIndexSettings(1)); + ensureYellowAndNoInitializingShards(INDEX_NAME); + ensureGreen(INDEX_NAME); + + // Get the names of nodes to create network disruption + String primaryNode = primaryNodeName(INDEX_NAME); + String replicaNode = replicaNodeName(INDEX_NAME); + String clusterManagerNode = internalCluster().getClusterManagerName(); + logger.info("Node names : clusterManager={} primary={} replica={}", clusterManagerNode, primaryNode, replicaNode); + + // Index some docs and validate that both primary and replica node has it. Refresh is triggered to trigger segment replication + // to ensure replica is also upto date. + int numOfDocs = randomIntBetween(5, 10); + for (int i = 0; i < numOfDocs; i++) { + indexSameDoc(clusterManagerNode, INDEX_NAME); + } + refresh(INDEX_NAME); + assertBusy( + () -> assertHitCount(client(primaryNode).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), numOfDocs) + ); + assertBusy( + () -> assertHitCount(client(replicaNode).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), numOfDocs) + ); + + // Start network disruption - primary node will be isolated + Set nodesInOneSide = Stream.of(clusterManagerNode, replicaNode).collect(Collectors.toCollection(HashSet::new)); + Set nodesInOtherSide = Stream.of(primaryNode).collect(Collectors.toCollection(HashSet::new)); + NetworkDisruption networkDisruption = new NetworkDisruption( + new NetworkDisruption.TwoPartitions(nodesInOneSide, nodesInOtherSide), + NetworkDisruption.DISCONNECT + ); + internalCluster().setDisruptionScheme(networkDisruption); + logger.info("--> network disruption is started"); + networkDisruption.startDisrupting(); + + // Ensure the node which is partitioned is removed from the cluster + assertBusy(() -> { + NodesInfoResponse response = client(clusterManagerNode).admin().cluster().prepareNodesInfo().get(); + assertThat(response.getNodes().size(), equalTo(2)); + }); + + // Ensure that the cluster manager has latest information about the index + assertBusy(() -> { + ClusterHealthResponse clusterHealthResponse = client(clusterManagerNode).admin() + .cluster() + .health(new ClusterHealthRequest()) + .actionGet(TimeValue.timeValueSeconds(1)); + assertTrue(clusterHealthResponse.getIndices().containsKey(INDEX_NAME)); + ClusterIndexHealth clusterIndexHealth = clusterHealthResponse.getIndices().get(INDEX_NAME); + assertEquals(ClusterHealthStatus.YELLOW, clusterHealthResponse.getStatus()); + assertEquals(1, clusterIndexHealth.getNumberOfShards()); + assertEquals(1, clusterIndexHealth.getActiveShards()); + assertEquals(1, clusterIndexHealth.getUnassignedShards()); + assertEquals(1, clusterIndexHealth.getUnassignedShards()); + assertEquals(1, clusterIndexHealth.getActivePrimaryShards()); + assertEquals(ClusterHealthStatus.YELLOW, clusterIndexHealth.getStatus()); + }); + + // Index data to the newly promoted primary + indexSameDoc(clusterManagerNode, INDEX_NAME); + RefreshResponse refreshResponse = client(clusterManagerNode).admin() + .indices() + .prepareRefresh(INDEX_NAME) + .setIndicesOptions(IndicesOptions.STRICT_EXPAND_OPEN_HIDDEN_FORBID_CLOSED) + .execute() + .actionGet(); + assertNoFailures(refreshResponse); + assertEquals(1, refreshResponse.getSuccessfulShards()); + assertHitCount(client(replicaNode).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), numOfDocs + 1); + + // At this point we stop the disruption. Since the follower checker has already failed and cluster manager has removed the node + // from cluster, failed node needs to start discovery process by leader checker call. We stop the disruption to allow the failed + // node to + // communicate with the other node which it assumes has replica. + networkDisruption.stopDisrupting(); + + // When the index call is made to the stale primary, it makes the primary term validation call to the other node (which + // it assumes has the replica node). At this moment, the stale primary realises that it is no more the primary and the caller + // received the following exception. + ShardNotFoundException exception = assertThrows(ShardNotFoundException.class, () -> indexSameDoc(primaryNode, INDEX_NAME)); + assertTrue(exception.getMessage().contains("no such shard")); + ensureStableCluster(3); + ensureGreen(INDEX_NAME); + } + + private IndexResponse indexSameDoc(String nodeName, String indexName) { + return client(nodeName).prepareIndex(indexName) + .setId(UUIDs.randomBase64UUID()) + .setSource("{\"foo\" : \"bar\"}", XContentType.JSON) + .get(); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java index d226d0d757638..2b3fcadfc645e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java @@ -9,12 +9,12 @@ package org.opensearch.remotestore; import org.junit.After; -import org.junit.Before; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; +import org.opensearch.index.mapper.MapperService; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.OpenSearchIntegTestCase; @@ -74,6 +74,13 @@ protected Settings remoteStoreIndexSettings(int numberOfReplicas) { return remoteStoreIndexSettings(numberOfReplicas, 1); } + protected Settings remoteStoreIndexSettings(int numberOfReplicas, long totalFieldLimit) { + return Settings.builder() + .put(remoteStoreIndexSettings(numberOfReplicas)) + .put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), totalFieldLimit) + .build(); + } + protected Settings remoteTranslogIndexSettings(int numberOfReplicas, int numberOfShards) { return Settings.builder() .put(remoteStoreIndexSettings(numberOfReplicas, numberOfShards)) @@ -92,8 +99,7 @@ protected void putRepository(Path path) { ); } - @Before - public void setup() { + protected void setupRepo() { internalCluster().startClusterManagerOnlyNode(); absolutePath = randomRepoPath().toAbsolutePath(); assertAcked( diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index 70a41d74a57c5..85208a33cc9f5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -8,6 +8,7 @@ package org.opensearch.remotestore; +import org.junit.Before; import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; import org.opensearch.action.admin.indices.recovery.RecoveryResponse; @@ -17,6 +18,7 @@ import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; +import org.opensearch.index.shard.RemoteStoreRefreshListener; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.plugins.Plugin; import org.opensearch.test.InternalTestCluster; @@ -50,6 +52,11 @@ protected Collection> nodePlugins() { return Arrays.asList(MockTransportService.TestPlugin.class); } + @Before + public void setup() { + setupRepo(); + } + @Override public Settings indexSettings() { return remoteStoreIndexSettings(0); @@ -277,4 +284,44 @@ public void testRemoteSegmentCleanup() throws Exception { public void testRemoteTranslogCleanup() throws Exception { verifyRemoteStoreCleanup(true); } + + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8504") + public void testStaleCommitDeletionWithInvokeFlush() throws Exception { + internalCluster().startDataOnlyNodes(3); + createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l)); + int numberOfIterations = randomIntBetween(5, 15); + indexData(numberOfIterations, true); + String indexUUID = client().admin() + .indices() + .prepareGetSettings(INDEX_NAME) + .get() + .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); + Path indexPath = Path.of(String.valueOf(absolutePath), indexUUID, "/0/segments/metadata"); + // Delete is async. + assertBusy(() -> { + int actualFileCount = getFileCount(indexPath); + if (numberOfIterations <= RemoteStoreRefreshListener.LAST_N_METADATA_FILES_TO_KEEP) { + assertEquals(numberOfIterations, actualFileCount); + } else { + // As delete is async its possible that the file gets created before the deletion or after + // deletion. + assertTrue(actualFileCount >= 10 || actualFileCount <= 11); + } + }, 30, TimeUnit.SECONDS); + } + + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8504") + public void testStaleCommitDeletionWithoutInvokeFlush() throws Exception { + internalCluster().startDataOnlyNodes(3); + createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 10000l)); + int numberOfIterations = randomIntBetween(5, 15); + indexData(numberOfIterations, false); + String indexUUID = client().admin() + .indices() + .prepareGetSettings(INDEX_NAME) + .get() + .getSetting(INDEX_NAME, IndexMetadata.SETTING_INDEX_UUID); + Path indexPath = Path.of(String.valueOf(absolutePath), indexUUID, "/0/segments/metadata"); + assertEquals(numberOfIterations, getFileCount(indexPath)); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java index 0ea87d106c14e..0e4774c1f3454 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java @@ -8,6 +8,7 @@ package org.opensearch.remotestore; +import org.junit.Before; import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStats; import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsRequestBuilder; import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsResponse; @@ -28,6 +29,11 @@ public class RemoteStoreStatsIT extends RemoteStoreBaseIntegTestCase { private static final String INDEX_NAME = "remote-store-test-idx-1"; + @Before + public void setup() { + setupRepo(); + } + public void testStatsResponseFromAllNodes() { // Step 1 - We create cluster, create an index, and then index documents into. We also do multiple refreshes/flushes diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/ReplicaToPrimaryPromotionIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/ReplicaToPrimaryPromotionIT.java index 712747f7479ae..0f3e041dd429a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/ReplicaToPrimaryPromotionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/ReplicaToPrimaryPromotionIT.java @@ -9,6 +9,7 @@ package org.opensearch.remotestore; import com.carrotsearch.randomizedtesting.RandomizedTest; +import org.junit.Before; import org.opensearch.action.admin.indices.close.CloseIndexResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; @@ -31,6 +32,11 @@ public class ReplicaToPrimaryPromotionIT extends RemoteStoreBaseIntegTestCase { private int shard_count = 5; + @Before + public void setup() { + setupRepo(); + } + @Override public Settings indexSettings() { return Settings.builder() diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java index 01fb91f83aa02..d39b30ada5ef7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java @@ -64,7 +64,6 @@ public void teardown() { assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_NAME)); } - @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/7592") @Override public void testPressureServiceStats() throws Exception { super.testPressureServiceStats(); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java index 5441dae9703ce..ce92a15026b70 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java @@ -38,11 +38,15 @@ import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.SnapshotsInProgress; +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexNotFoundException; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.RepositoryData; import org.opensearch.snapshots.mockstore.MockRepository; +import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.OpenSearchIntegTestCase; import java.util.ArrayList; @@ -64,6 +68,7 @@ import org.opensearch.repositories.blobstore.BlobStoreRepository; import java.nio.file.Path; +import java.util.concurrent.ExecutionException; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; @@ -153,6 +158,162 @@ public void testCloneSnapshotIndex() throws Exception { assertEquals(status1.getStats().getTotalSize(), status2.getStats().getTotalSize()); } + public void testCloneShallowSnapshotIndex() throws Exception { + disableRepoConsistencyCheck("This test uses remote store repository"); + FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNode(); + + final String snapshotRepoName = "snapshot-repo-name"; + final Path snapshotRepoPath = randomRepoPath(); + createRepository(snapshotRepoName, "fs", snapshotRepoPath); + + final String shallowSnapshotRepoName = "shallow-snapshot-repo-name"; + final Path shallowSnapshotRepoPath = randomRepoPath(); + createRepository(shallowSnapshotRepoName, "fs", snapshotRepoSettingsForShallowCopy(shallowSnapshotRepoPath)); + + final Path remoteStoreRepoPath = randomRepoPath(); + final String remoteStoreRepoName = "remote-store-repo-name"; + createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); + + final String indexName = "index-1"; + createIndexWithRandomDocs(indexName, randomIntBetween(5, 10)); + + final String remoteStoreEnabledIndexName = "remote-index-1"; + final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepoName); + createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); + indexRandomDocs(remoteStoreEnabledIndexName, randomIntBetween(5, 10)); + + final String snapshot = "snapshot"; + createFullSnapshot(snapshotRepoName, snapshot); + assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, remoteStoreRepoName).length == 0); + + indexRandomDocs(indexName, randomIntBetween(20, 100)); + + final String shallowSnapshot = "shallow-snapshot"; + createFullSnapshot(shallowSnapshotRepoName, shallowSnapshot); + assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, remoteStoreRepoName).length == 1); + + if (randomBoolean()) { + assertAcked(admin().indices().prepareDelete(indexName)); + } + + final String sourceSnapshot = shallowSnapshot; + final String targetSnapshot = "target-snapshot"; + assertAcked(startClone(shallowSnapshotRepoName, sourceSnapshot, targetSnapshot, indexName, remoteStoreEnabledIndexName).get()); + logger.info("Lock files count: {}", getLockFilesInRemoteStore(remoteStoreEnabledIndexName, remoteStoreRepoName).length); + assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, remoteStoreRepoName).length == 2); + } + + public void testShallowCloneNameAvailability() throws Exception { + disableRepoConsistencyCheck("This test uses remote store repository"); + FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); + internalCluster().startClusterManagerOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS); + internalCluster().startDataOnlyNode(); + + final String shallowSnapshotRepoName = "shallow-snapshot-repo-name"; + final Path shallowSnapshotRepoPath = randomRepoPath(); + createRepository(shallowSnapshotRepoName, "fs", snapshotRepoSettingsForShallowCopy(shallowSnapshotRepoPath)); + + final Path remoteStoreRepoPath = randomRepoPath(); + final String remoteStoreRepoName = "remote-store-repo-name"; + createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); + + final String indexName = "index-1"; + createIndexWithRandomDocs(indexName, randomIntBetween(5, 10)); + + final String remoteStoreEnabledIndexName = "remote-index-1"; + final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepoName); + createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); + indexRandomDocs(remoteStoreEnabledIndexName, randomIntBetween(5, 10)); + + final String shallowSnapshot1 = "snapshot1"; + createFullSnapshot(shallowSnapshotRepoName, shallowSnapshot1); + + final String shallowSnapshot2 = "snapshot2"; + createFullSnapshot(shallowSnapshotRepoName, shallowSnapshot2); + + ExecutionException ex = expectThrows( + ExecutionException.class, + () -> startClone(shallowSnapshotRepoName, shallowSnapshot1, shallowSnapshot2, indexName, remoteStoreEnabledIndexName).get() + ); + assertThat(ex.getMessage(), containsString("snapshot with the same name already exists")); + } + + public void testCloneAfterRepoShallowSettingEnabled() throws Exception { + disableRepoConsistencyCheck("This test uses remote store repository"); + FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNode(); + + final String snapshotRepoName = "snapshot-repo-name"; + final Path snapshotRepoPath = randomRepoPath(); + createRepository(snapshotRepoName, "fs", snapshotRepoPath); + + final Path remoteStoreRepoPath = randomRepoPath(); + final String remoteStoreRepoName = "remote-store-repo-name"; + createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); + + final String indexName = "index-1"; + createIndexWithRandomDocs(indexName, randomIntBetween(5, 10)); + + final String remoteStoreEnabledIndexName = "remote-index-1"; + final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepoName); + createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); + indexRandomDocs(remoteStoreEnabledIndexName, randomIntBetween(5, 10)); + + final String snapshot = "snapshot"; + createFullSnapshot(snapshotRepoName, snapshot); + assertEquals(getSnapshot(snapshotRepoName, snapshot).state(), SnapshotState.SUCCESS); + + // Updating the snapshot repository flag to enable shallow snapshots + createRepository(snapshotRepoName, "fs", snapshotRepoSettingsForShallowCopy(snapshotRepoPath)); + RepositoryMetadata updatedRepositoryMetadata = clusterAdmin().prepareGetRepositories(snapshotRepoName).get().repositories().get(0); + assertTrue(updatedRepositoryMetadata.settings().getAsBoolean(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), false)); + + final String targetSnapshot = "target-snapshot"; + assertAcked(startClone(snapshotRepoName, snapshot, targetSnapshot, indexName, remoteStoreEnabledIndexName).get()); + assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, remoteStoreRepoName).length == 0); + assertEquals(getSnapshot(snapshotRepoName, targetSnapshot).isRemoteStoreIndexShallowCopyEnabled(), false); + } + + public void testCloneAfterRepoShallowSettingDisabled() throws Exception { + disableRepoConsistencyCheck("This test uses remote store repository"); + FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNode(); + + final String snapshotRepoName = "snapshot-repo-name"; + final Path snapshotRepoPath = randomRepoPath(); + createRepository(snapshotRepoName, "fs", snapshotRepoSettingsForShallowCopy(snapshotRepoPath)); + + final Path remoteStoreRepoPath = randomRepoPath(); + final String remoteStoreRepoName = "remote-store-repo-name"; + createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); + + final String indexName = "index-1"; + createIndexWithRandomDocs(indexName, randomIntBetween(5, 10)); + + final String remoteStoreEnabledIndexName = "remote-index-1"; + final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepoName); + createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); + indexRandomDocs(remoteStoreEnabledIndexName, randomIntBetween(5, 10)); + + final String snapshot = "snapshot"; + createFullSnapshot(snapshotRepoName, snapshot); + assertEquals(getSnapshot(snapshotRepoName, snapshot).state(), SnapshotState.SUCCESS); + + // Updating the snapshot repository flag to enable shallow snapshots + createRepository(snapshotRepoName, "fs", snapshotRepoPath); + RepositoryMetadata updatedRepositoryMetadata = clusterAdmin().prepareGetRepositories(snapshotRepoName).get().repositories().get(0); + assertFalse(updatedRepositoryMetadata.settings().getAsBoolean(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), false)); + + final String targetSnapshot = "target-snapshot"; + assertAcked(startClone(snapshotRepoName, snapshot, targetSnapshot, indexName, remoteStoreEnabledIndexName).get()); + assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, remoteStoreRepoName).length == 2); + assertEquals(getSnapshot(snapshotRepoName, targetSnapshot).isRemoteStoreIndexShallowCopyEnabled(), true); + } + public void testClonePreventsSnapshotDelete() throws Exception { final String clusterManagerName = internalCluster().startClusterManagerOnlyNode(); internalCluster().startDataOnlyNode(); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java index 9f492bbaee01a..e362b7f61e8e6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java @@ -183,8 +183,8 @@ public void testRestoreRemoteStoreIndicesWithoutRemoteTranslog() throws IOExcept public void testRestoreOperationsShallowCopyEnabled(boolean remoteTranslogEnabled) throws IOException, ExecutionException, InterruptedException { - internalCluster().startClusterManagerOnlyNode(); - final String primaryNode = internalCluster().startNode(); + String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); + String primary = internalCluster().startDataOnlyNode(); String indexName1 = "testindex1"; String indexName2 = "testindex2"; String snapshotRepoName = "test-restore-snapshot-repo"; @@ -216,7 +216,7 @@ public void testRestoreOperationsShallowCopyEnabled(boolean remoteTranslogEnable indexDocuments(client, indexName2, numDocsInIndex2); ensureGreen(indexName1, indexName2); - final String secondNode = internalCluster().startNode(); + internalCluster().startDataOnlyNode(); logger.info("--> snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() @@ -273,10 +273,12 @@ public void testRestoreOperationsShallowCopyEnabled(boolean remoteTranslogEnable assertDocsPresentInIndex(client, restoredIndexName2, numDocsInIndex2); // deleting data for restoredIndexName1 and restoring from remote store. - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(restoredIndexName1))); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primary)); ensureRed(restoredIndexName1); - assertAcked(client().admin().indices().prepareClose(restoredIndexName1)); - client().admin() + // Re-initialize client to make sure we are not using client from stopped node. + client = client(clusterManagerNode); + assertAcked(client.admin().indices().prepareClose(restoredIndexName1)); + client.admin() .cluster() .restoreRemoteStore(new RestoreRemoteStoreRequest().indices(restoredIndexName1), PlainActionFuture.newFuture()); ensureYellowAndNoInitializingShards(restoredIndexName1); @@ -300,7 +302,7 @@ public void testRestoreOperationsShallowCopyEnabled(boolean remoteTranslogEnable assertEquals(restoreSnapshotResponse3.status(), RestStatus.ACCEPTED); ensureGreen(restoredIndexName1Seg); - GetIndexResponse getIndexResponse = client().admin() + GetIndexResponse getIndexResponse = client.admin() .indices() .getIndex(new GetIndexRequest().indices(restoredIndexName1Seg).includeDefaults(true)) .get(); @@ -331,7 +333,7 @@ public void testRestoreOperationsShallowCopyEnabled(boolean remoteTranslogEnable assertEquals(restoreSnapshotResponse4.status(), RestStatus.ACCEPTED); ensureGreen(restoredIndexName1Doc); - getIndexResponse = client().admin() + getIndexResponse = client.admin() .indices() .getIndex(new GetIndexRequest().indices(restoredIndexName1Doc).includeDefaults(true)) .get(); @@ -347,8 +349,8 @@ public void testRestoreOperationsShallowCopyEnabled(boolean remoteTranslogEnable } public void testRestoreInSameRemoteStoreEnabledIndex() throws IOException { - internalCluster().startClusterManagerOnlyNode(); - internalCluster().startNode(); + String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); + String primary = internalCluster().startDataOnlyNode(); String indexName1 = "testindex1"; String indexName2 = "testindex2"; String snapshotRepoName = "test-restore-snapshot-repo"; @@ -378,7 +380,7 @@ public void testRestoreInSameRemoteStoreEnabledIndex() throws IOException { indexDocuments(client, indexName2, numDocsInIndex2); ensureGreen(indexName1, indexName2); - final String secondNode = internalCluster().startNode(); + internalCluster().startDataOnlyNode(); logger.info("--> snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin() .cluster() @@ -435,10 +437,12 @@ public void testRestoreInSameRemoteStoreEnabledIndex() throws IOException { assertDocsPresentInIndex(client, restoredIndexName2, numDocsInIndex2); // deleting data for restoredIndexName1 and restoring from remote store. - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(indexName1))); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primary)); ensureRed(indexName1); - assertAcked(client().admin().indices().prepareClose(indexName1)); - client().admin().cluster().restoreRemoteStore(new RestoreRemoteStoreRequest().indices(indexName1), PlainActionFuture.newFuture()); + // Re-initialize client to make sure we are not using client from stopped node. + client = client(clusterManagerNode); + assertAcked(client.admin().indices().prepareClose(indexName1)); + client.admin().cluster().restoreRemoteStore(new RestoreRemoteStoreRequest().indices(indexName1), PlainActionFuture.newFuture()); ensureYellowAndNoInitializingShards(indexName1); ensureGreen(indexName1); assertDocsPresentInIndex(client(), indexName1, numDocsInIndex1); @@ -449,8 +453,8 @@ public void testRestoreInSameRemoteStoreEnabledIndex() throws IOException { } public void testRestoreShallowCopySnapshotWithDifferentRepo() throws IOException { - internalCluster().startClusterManagerOnlyNode(); - final String primaryNode = internalCluster().startNode(); + String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); + String primary = internalCluster().startDataOnlyNode(); String indexName1 = "testindex1"; String indexName2 = "testindex2"; String snapshotRepoName = "test-restore-snapshot-repo"; @@ -479,7 +483,7 @@ public void testRestoreShallowCopySnapshotWithDifferentRepo() throws IOException indexDocuments(client, indexName2, numDocsInIndex2); ensureGreen(indexName1, indexName2); - final String secondNode = internalCluster().startNode(); + internalCluster().startDataOnlyNode(); logger.info("--> snapshot"); CreateSnapshotResponse createSnapshotResponse = client.admin() @@ -513,9 +517,11 @@ public void testRestoreShallowCopySnapshotWithDifferentRepo() throws IOException assertDocsPresentInIndex(client(), restoredIndexName1, numDocsInIndex1); // deleting data for restoredIndexName1 and restoring from remote store. - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(restoredIndexName1))); - assertAcked(client().admin().indices().prepareClose(restoredIndexName1)); - client().admin() + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primary)); + // Re-initialize client to make sure we are not using client from stopped node. + client = client(clusterManagerNode); + assertAcked(client.admin().indices().prepareClose(restoredIndexName1)); + client.admin() .cluster() .restoreRemoteStore(new RestoreRemoteStoreRequest().indices(restoredIndexName1), PlainActionFuture.newFuture()); ensureYellowAndNoInitializingShards(restoredIndexName1); diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index 902ae7cc54e3f..56d1758161ced 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -462,9 +462,9 @@ import java.util.ArrayList; import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Optional; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentSkipListSet; @@ -1044,7 +1044,7 @@ public static class DynamicActionRegistry { // A dynamic registry to add or remove Route / RestSendToExtensionAction pairs // at times other than node bootstrap. - private final Map routeRegistry = new ConcurrentHashMap<>(); + private final Map routeRegistry = new ConcurrentHashMap<>(); private final Set registeredActionNames = new ConcurrentSkipListSet<>(); @@ -1112,26 +1112,37 @@ public boolean isActionRegistered(String actionName) { } /** - * Add a dynamic action to the registry. + * Adds a dynamic route to the registry. * * @param route The route instance to add * @param action The corresponding instance of RestSendToExtensionAction to execute */ - public void registerDynamicRoute(RestHandler.Route route, RestSendToExtensionAction action) { + public void registerDynamicRoute(NamedRoute route, RestSendToExtensionAction action) { requireNonNull(route, "route is required"); requireNonNull(action, "action is required"); - Optional routeName = Optional.empty(); - if (route instanceof NamedRoute) { - routeName = Optional.of(((NamedRoute) route).name()); - if (isActionRegistered(routeName.get()) || registeredActionNames.contains(routeName.get())) { - throw new IllegalArgumentException("route [" + route + "] already registered"); - } + + String routeName = route.name(); + requireNonNull(routeName, "route name is required"); + if (isActionRegistered(routeName)) { + throw new IllegalArgumentException("route [" + route + "] already registered"); } + + Set actionNames = route.actionNames(); + if (!Collections.disjoint(actionNames, registeredActionNames)) { + Set alreadyRegistered = new HashSet<>(registeredActionNames); + alreadyRegistered.retainAll(actionNames); + String acts = String.join(", ", alreadyRegistered); + throw new IllegalArgumentException( + "action" + (alreadyRegistered.size() > 1 ? "s [" : " [") + acts + "] already registered" + ); + } + if (routeRegistry.containsKey(route)) { throw new IllegalArgumentException("route [" + route + "] already registered"); } routeRegistry.put(route, action); - routeName.ifPresent(registeredActionNames::add); + registeredActionNames.add(routeName); + registeredActionNames.addAll(actionNames); } /** @@ -1139,14 +1150,14 @@ public void registerDynamicRoute(RestHandler.Route route, RestSendToExtensionAct * * @param route The route to remove */ - public void unregisterDynamicRoute(RestHandler.Route route) { + public void unregisterDynamicRoute(NamedRoute route) { requireNonNull(route, "route is required"); if (routeRegistry.remove(route) == null) { throw new IllegalArgumentException("action [" + route + "] was not registered"); } - if (route instanceof NamedRoute) { - registeredActionNames.remove(((NamedRoute) route).name()); - } + + registeredActionNames.remove(route.name()); + registeredActionNames.removeAll(route.actionNames()); } /** diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java index 6b8e06594acb7..4cdc54f9c7952 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java @@ -193,7 +193,7 @@ public NodeStats(StreamInput in) throws IOException { } else { taskCancellationStats = null; } - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { // TODO Update to 2_9_0 when we backport to 2.x + if (in.getVersion().onOrAfter(Version.V_2_9_0)) { searchPipelineStats = in.readOptionalWriteable(SearchPipelineStats::new); } else { searchPipelineStats = null; @@ -427,7 +427,7 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_9_0)) { out.writeOptionalWriteable(taskCancellationStats); } - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { // TODO: Update to 2_9_0 once we backport to 2.x + if (out.getVersion().onOrAfter(Version.V_2_9_0)) { out.writeOptionalWriteable(searchPipelineStats); } } diff --git a/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java index d827dc6409778..ffc99c34fcac5 100644 --- a/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java @@ -167,7 +167,8 @@ public static Entry startClone( version, source, Map.of(), - false // TODO: need to pull this value from the original snapshot, use whatever we set during snapshot create. + false // initialising to false, will be updated in startCloning method of SnapshotsService while updating entry with + // clone jobs ); } @@ -453,6 +454,26 @@ public Entry withClones(final Map update ); } + public Entry withRemoteStoreIndexShallowCopy(final boolean remoteStoreIndexShallowCopy) { + return new Entry( + snapshot, + includeGlobalState, + partial, + state, + indices, + dataStreams, + startTime, + repositoryStateId, + shards, + failure, + userMetadata, + version, + source, + clones, + remoteStoreIndexShallowCopy + ); + } + /** * Create a new instance by aborting this instance. Moving all in-progress shards to {@link ShardState#ABORTED} if assigned to a * data node or to {@link ShardState#FAILED} if not assigned to any data node. diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index 0a49feec61621..3fff6e8823e9e 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -72,7 +72,6 @@ import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.NamedXContentRegistry; @@ -137,7 +136,6 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; import static org.opensearch.cluster.metadata.Metadata.DEFAULT_REPLICA_COUNT_SETTING; -import static org.opensearch.common.util.FeatureFlags.REMOTE_STORE; import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_REPOSITORY_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_ENABLED_SETTING; @@ -585,8 +583,7 @@ private ClusterState applyCreateIndexRequestWithV1Templates( settings, indexScopedSettings, shardLimitValidator, - indexSettingProviders, - systemIndices.validateSystemIndex(request.index()) + indexSettingProviders ); int routingNumShards = getIndexNumberOfRoutingShards(aggregatedIndexSettings, null); IndexMetadata tmpImd = buildAndValidateTemporaryIndexMetadata(currentState, aggregatedIndexSettings, request, routingNumShards); @@ -650,8 +647,7 @@ private ClusterState applyCreateIndexRequestWithV2Template( settings, indexScopedSettings, shardLimitValidator, - indexSettingProviders, - systemIndices.validateSystemIndex(request.index()) + indexSettingProviders ); int routingNumShards = getIndexNumberOfRoutingShards(aggregatedIndexSettings, null); IndexMetadata tmpImd = buildAndValidateTemporaryIndexMetadata(currentState, aggregatedIndexSettings, request, routingNumShards); @@ -731,8 +727,7 @@ private ClusterState applyCreateIndexRequestWithExistingMetadata( settings, indexScopedSettings, shardLimitValidator, - indexSettingProviders, - sourceMetadata.isSystem() + indexSettingProviders ); final int routingNumShards = getIndexNumberOfRoutingShards(aggregatedIndexSettings, sourceMetadata); IndexMetadata tmpImd = buildAndValidateTemporaryIndexMetadata(currentState, aggregatedIndexSettings, request, routingNumShards); @@ -815,8 +810,7 @@ static Settings aggregateIndexSettings( Settings settings, IndexScopedSettings indexScopedSettings, ShardLimitValidator shardLimitValidator, - Set indexSettingProviders, - boolean isSystemIndex + Set indexSettingProviders ) { // Create builders for the template and request settings. We transform these into builders // because we may want settings to be "removed" from these prior to being set on the new @@ -900,18 +894,8 @@ static Settings aggregateIndexSettings( indexSettingsBuilder.put(IndexMetadata.SETTING_INDEX_PROVIDED_NAME, request.getProvidedName()); indexSettingsBuilder.put(SETTING_INDEX_UUID, UUIDs.randomBase64UUID()); - if (isSystemIndex || IndexMetadata.INDEX_HIDDEN_SETTING.get(request.settings())) { - logger.warn( - "Setting replication.type: DOCUMENT will be used for Index until Segment Replication supports System and Hidden indices" - ); - indexSettingsBuilder.put(SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT); - if (FeatureFlags.isEnabled(REMOTE_STORE)) { - indexSettingsBuilder.put(SETTING_REMOTE_STORE_ENABLED, false); - } - } else { - updateReplicationStrategy(indexSettingsBuilder, request.settings(), settings); - updateRemoteStoreSettings(indexSettingsBuilder, request.settings(), settings); - } + updateReplicationStrategy(indexSettingsBuilder, request.settings(), settings); + updateRemoteStoreSettings(indexSettingsBuilder, request.settings(), settings); if (sourceMetadata != null) { assert request.resizeType() != null; diff --git a/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java b/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java index ac38768c9f3d3..e626824e7e271 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java +++ b/server/src/main/java/org/opensearch/common/blobstore/BlobContainer.java @@ -32,10 +32,14 @@ package org.opensearch.common.blobstore; +import org.opensearch.action.ActionListener; + import java.io.IOException; import java.io.InputStream; import java.nio.file.FileAlreadyExistsException; import java.nio.file.NoSuchFileException; +import java.util.ArrayList; +import java.util.Comparator; import java.util.List; import java.util.Map; @@ -191,4 +195,47 @@ default long readBlobPreferredLength() { * @throws IOException if there were any failures in reading from the blob container. */ Map listBlobsByPrefix(String blobNamePrefix) throws IOException; + + /** + * The type representing sort order of blob names + */ + enum BlobNameSortOrder { + + LEXICOGRAPHIC(Comparator.comparing(BlobMetadata::name)); + + final Comparator comparator; + + public Comparator comparator() { + return comparator; + } + + BlobNameSortOrder(final Comparator comparator) { + this.comparator = comparator; + } + } + + /** + * Lists all blobs in the container that match the specified prefix in lexicographic order + * @param blobNamePrefix The prefix to match against blob names in the container. + * @param limit Limits the result size to min(limit, number of keys) + * @param blobNameSortOrder Comparator to sort keys with + * @param listener the listener to be notified upon request completion + */ + default void listBlobsByPrefixInSortedOrder( + String blobNamePrefix, + int limit, + BlobNameSortOrder blobNameSortOrder, + ActionListener> listener + ) { + if (limit < 0) { + throw new IllegalArgumentException("limit should not be a negative value"); + } + try { + List blobNames = new ArrayList<>(listBlobsByPrefix(blobNamePrefix).values()); + blobNames.sort(blobNameSortOrder.comparator()); + listener.onResponse(blobNames.subList(0, Math.min(blobNames.size(), limit))); + } catch (Exception e) { + listener.onFailure(e); + } + } } diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java index 025fb7a36b684..9dd5d21a00231 100644 --- a/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/opensearch/common/util/concurrent/ThreadContext.java @@ -149,7 +149,7 @@ public StoredContext stashContext() { * Otherwise when context is stash, it should be empty. */ - ThreadContextStruct threadContextStruct = DEFAULT_CONTEXT; + ThreadContextStruct threadContextStruct = DEFAULT_CONTEXT.putPersistent(context.persistentHeaders); if (context.requestHeaders.containsKey(Task.X_OPAQUE_ID)) { threadContextStruct = threadContextStruct.putHeaders( @@ -262,6 +262,7 @@ public StoredContext newStoredContext(boolean preserveResponseHeaders, Collectio originalContext.requestHeaders, originalContext.responseHeaders, newTransientHeaders, + originalContext.persistentHeaders, originalContext.isSystemContext, originalContext.warningHeadersSize ); @@ -337,7 +338,7 @@ public void setHeaders(Tuple, Map>> head if (requestHeaders.isEmpty() && responseHeaders.isEmpty()) { struct = ThreadContextStruct.EMPTY; } else { - struct = new ThreadContextStruct(requestHeaders, responseHeaders, Collections.emptyMap(), false); + struct = new ThreadContextStruct(requestHeaders, responseHeaders, Collections.emptyMap(), Collections.emptyMap(), false); } threadLocal.set(struct); } @@ -375,6 +376,13 @@ public String getHeader(String key) { return value; } + /** + * Returns the persistent header for the given key or null if not present - persistent headers cannot be stashed + */ + public Object getPersistent(String key) { + return threadLocal.get().persistentHeaders.get(key); + } + /** * Returns all of the request headers from the thread's context.
* Be advised, headers might contain credentials. @@ -434,6 +442,20 @@ public void putHeader(Map header) { threadLocal.set(threadLocal.get().putHeaders(header)); } + /** + * Puts a persistent header into the context - persistent headers cannot be stashed + */ + public void putPersistent(String key, Object value) { + threadLocal.set(threadLocal.get().putPersistent(key, value)); + } + + /** + * Puts all of the given headers into this persistent context - persistent headers cannot be stashed + */ + public void putPersistent(Map persistentHeaders) { + threadLocal.set(threadLocal.get().putPersistent(persistentHeaders)); + } + /** * Puts a transient header object into this context */ @@ -566,12 +588,14 @@ private static final class ThreadContextStruct { Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), + Collections.emptyMap(), false ); private final Map requestHeaders; private final Map transientHeaders; private final Map> responseHeaders; + private final Map persistentHeaders; private final boolean isSystemContext; // saving current warning headers' size not to recalculate the size with every new warning header private final long warningHeadersSize; @@ -580,18 +604,20 @@ private ThreadContextStruct setSystemContext() { if (isSystemContext) { return this; } - return new ThreadContextStruct(requestHeaders, responseHeaders, transientHeaders, true); + return new ThreadContextStruct(requestHeaders, responseHeaders, transientHeaders, persistentHeaders, true); } private ThreadContextStruct( Map requestHeaders, Map> responseHeaders, Map transientHeaders, + Map persistentHeaders, boolean isSystemContext ) { this.requestHeaders = requestHeaders; this.responseHeaders = responseHeaders; this.transientHeaders = transientHeaders; + this.persistentHeaders = persistentHeaders; this.isSystemContext = isSystemContext; this.warningHeadersSize = 0L; } @@ -600,12 +626,14 @@ private ThreadContextStruct( Map requestHeaders, Map> responseHeaders, Map transientHeaders, + Map persistentHeaders, boolean isSystemContext, long warningHeadersSize ) { this.requestHeaders = requestHeaders; this.responseHeaders = responseHeaders; this.transientHeaders = transientHeaders; + this.persistentHeaders = persistentHeaders; this.isSystemContext = isSystemContext; this.warningHeadersSize = warningHeadersSize; } @@ -614,13 +642,13 @@ private ThreadContextStruct( * This represents the default context and it should only ever be called by {@link #DEFAULT_CONTEXT}. */ private ThreadContextStruct() { - this(Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), false); + this(Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), false); } private ThreadContextStruct putRequest(String key, String value) { Map newRequestHeaders = new HashMap<>(this.requestHeaders); putSingleHeader(key, value, newRequestHeaders); - return new ThreadContextStruct(newRequestHeaders, responseHeaders, transientHeaders, isSystemContext); + return new ThreadContextStruct(newRequestHeaders, responseHeaders, transientHeaders, persistentHeaders, isSystemContext); } private static void putSingleHeader(String key, T value, Map newHeaders) { @@ -637,7 +665,25 @@ private ThreadContextStruct putHeaders(Map headers) { for (Map.Entry entry : headers.entrySet()) { putSingleHeader(entry.getKey(), entry.getValue(), newHeaders); } - return new ThreadContextStruct(newHeaders, responseHeaders, transientHeaders, isSystemContext); + return new ThreadContextStruct(newHeaders, responseHeaders, transientHeaders, persistentHeaders, isSystemContext); + } + } + + private ThreadContextStruct putPersistent(String key, Object value) { + Map newPersistentHeaders = new HashMap<>(this.persistentHeaders); + putSingleHeader(key, value, newPersistentHeaders); + return new ThreadContextStruct(requestHeaders, responseHeaders, transientHeaders, newPersistentHeaders, isSystemContext); + } + + private ThreadContextStruct putPersistent(Map headers) { + if (headers.isEmpty()) { + return this; + } else { + final Map newPersistentHeaders = new HashMap<>(this.persistentHeaders); + for (Map.Entry entry : headers.entrySet()) { + putSingleHeader(entry.getKey(), entry.getValue(), newPersistentHeaders); + } + return new ThreadContextStruct(requestHeaders, responseHeaders, transientHeaders, newPersistentHeaders, isSystemContext); } } @@ -658,7 +704,7 @@ private ThreadContextStruct putResponseHeaders(Map> headers) newResponseHeaders.put(key, entry.getValue()); } } - return new ThreadContextStruct(requestHeaders, newResponseHeaders, transientHeaders, isSystemContext); + return new ThreadContextStruct(requestHeaders, newResponseHeaders, transientHeaders, persistentHeaders, isSystemContext); } private ThreadContextStruct putResponse( @@ -695,6 +741,7 @@ private ThreadContextStruct putResponse( requestHeaders, responseHeaders, transientHeaders, + persistentHeaders, isSystemContext, newWarningHeaderSize ); @@ -730,7 +777,14 @@ private ThreadContextStruct putResponse( return this; } } - return new ThreadContextStruct(requestHeaders, newResponseHeaders, transientHeaders, isSystemContext, newWarningHeaderSize); + return new ThreadContextStruct( + requestHeaders, + newResponseHeaders, + transientHeaders, + persistentHeaders, + isSystemContext, + newWarningHeaderSize + ); } private ThreadContextStruct putTransient(Map values) { @@ -738,13 +792,13 @@ private ThreadContextStruct putTransient(Map values) { for (Map.Entry entry : values.entrySet()) { putSingleHeader(entry.getKey(), entry.getValue(), newTransient); } - return new ThreadContextStruct(requestHeaders, responseHeaders, newTransient, isSystemContext); + return new ThreadContextStruct(requestHeaders, responseHeaders, newTransient, persistentHeaders, isSystemContext); } private ThreadContextStruct putTransient(String key, Object value) { Map newTransient = new HashMap<>(this.transientHeaders); putSingleHeader(key, value, newTransient); - return new ThreadContextStruct(requestHeaders, responseHeaders, newTransient, isSystemContext); + return new ThreadContextStruct(requestHeaders, responseHeaders, newTransient, persistentHeaders, isSystemContext); } private ThreadContextStruct copyHeaders(Iterable> headers) { diff --git a/server/src/main/java/org/opensearch/extensions/ExtensionDependency.java b/server/src/main/java/org/opensearch/extensions/ExtensionDependency.java index 1423a30bbe307..56c9f0387b13e 100644 --- a/server/src/main/java/org/opensearch/extensions/ExtensionDependency.java +++ b/server/src/main/java/org/opensearch/extensions/ExtensionDependency.java @@ -16,10 +16,6 @@ import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.io.stream.Writeable; -import org.opensearch.core.common.Strings; -import org.opensearch.core.xcontent.XContentParser; - -import static org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken; /** * This class handles the dependent extensions information @@ -60,39 +56,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVersion(version); } - public static ExtensionDependency parse(XContentParser parser) throws IOException { - String uniqueId = null; - Version version = null; - ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser); - while (parser.nextToken() != XContentParser.Token.END_OBJECT) { - String fieldName = parser.currentName(); - parser.nextToken(); - - switch (fieldName) { - case UNIQUE_ID: - uniqueId = parser.text(); - break; - case VERSION: - try { - version = Version.fromString(parser.text()); - } catch (IllegalArgumentException e) { - throw e; - } - break; - default: - parser.skipChildren(); - break; - } - } - if (Strings.isNullOrEmpty(uniqueId)) { - throw new IOException("Required field [uniqueId] is missing in the request for the dependent extension"); - } else if (version == null) { - throw new IOException("Required field [version] is missing in the request for the dependent extension"); - } - return new ExtensionDependency(uniqueId, version); - - } - /** * The uniqueId of the dependency extension * diff --git a/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java b/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java index 9987497b5fac0..cb22c8d864b1b 100644 --- a/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java +++ b/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java @@ -105,7 +105,7 @@ public static enum OpenSearchRequestType { /** * Instantiate a new ExtensionsManager object to handle requests and responses from extensions. This is called during Node bootstrap. * - * @param additionalSettings Additional settings to read in from extensions.yml + * @param additionalSettings Additional settings to read in from extension initialization request * @throws IOException If the extensions discovery file is not properly retrieved. */ public ExtensionsManager(Set> additionalSettings) throws IOException { @@ -504,4 +504,8 @@ void setAddSettingsUpdateConsumerRequestHandler(AddSettingsUpdateConsumerRequest Settings getEnvironmentSettings() { return environmentSettings; } + + public Set> getAdditionalSettings() { + return this.additionalSettings; + } } diff --git a/server/src/main/java/org/opensearch/extensions/rest/RestInitializeExtensionAction.java b/server/src/main/java/org/opensearch/extensions/rest/RestInitializeExtensionAction.java index e0806f8172278..878673b77a4a9 100644 --- a/server/src/main/java/org/opensearch/extensions/rest/RestInitializeExtensionAction.java +++ b/server/src/main/java/org/opensearch/extensions/rest/RestInitializeExtensionAction.java @@ -8,27 +8,37 @@ package org.opensearch.extensions.rest; +import org.opensearch.Version; import org.opensearch.client.node.NodeClient; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.common.Strings; +import org.opensearch.core.xcontent.MediaType; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.extensions.ExtensionDependency; import org.opensearch.extensions.ExtensionScopedSettings; import org.opensearch.extensions.ExtensionsManager; import org.opensearch.extensions.ExtensionsSettings.Extension; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.BytesRestResponse; +import org.opensearch.rest.NamedRoute; import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestStatus; import org.opensearch.transport.ConnectTransportException; import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; import java.util.List; +import java.util.Map; +import java.util.Set; import java.util.concurrent.CompletionException; import java.util.concurrent.TimeoutException; +import java.util.stream.Collectors; -import static org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken; import static org.opensearch.rest.RestRequest.Method.POST; /** @@ -45,7 +55,7 @@ public String getName() { @Override public List routes() { - return List.of(new Route(POST, "/_extensions/initialize")); + return List.of(new NamedRoute.Builder().method(POST).path("/_extensions/initialize").uniqueName("extensions:initialize").build()); } public RestInitializeExtensionAction(ExtensionsManager extensionsManager) { @@ -62,36 +72,79 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client String openSearchVersion = null; String minimumCompatibleVersion = null; List dependencies = new ArrayList<>(); + Set additionalSettingsKeys = extensionsManager.getAdditionalSettings() + .stream() + .map(s -> s.getKey()) + .collect(Collectors.toSet()); - try (XContentParser parser = request.contentParser()) { - parser.nextToken(); - ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser); - while (parser.nextToken() != XContentParser.Token.END_OBJECT) { - String currentFieldName = parser.currentName(); - parser.nextToken(); - if ("name".equals(currentFieldName)) { - name = parser.text(); - } else if ("uniqueId".equals(currentFieldName)) { - uniqueId = parser.text(); - } else if ("hostAddress".equals(currentFieldName)) { - hostAddress = parser.text(); - } else if ("port".equals(currentFieldName)) { - port = parser.text(); - } else if ("version".equals(currentFieldName)) { - version = parser.text(); - } else if ("opensearchVersion".equals(currentFieldName)) { - openSearchVersion = parser.text(); - } else if ("minimumCompatibleVersion".equals(currentFieldName)) { - minimumCompatibleVersion = parser.text(); - } else if ("dependencies".equals(currentFieldName)) { - ensureExpectedToken(XContentParser.Token.START_ARRAY, parser.currentToken(), parser); - while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - dependencies.add(ExtensionDependency.parse(parser)); + Tuple> unreadExtensionTuple = XContentHelper.convertToMap( + request.content(), + false, + request.getXContentType().xContent().mediaType() + ); + Map extensionMap = unreadExtensionTuple.v2(); + + ExtensionScopedSettings extAdditionalSettings = new ExtensionScopedSettings(extensionsManager.getAdditionalSettings()); + + try { + // checking to see whether any required fields are missing from extension initialization request or not + String[] requiredFields = { + "name", + "uniqueId", + "hostAddress", + "port", + "version", + "opensearchVersion", + "minimumCompatibleVersion" }; + List missingFields = Arrays.stream(requiredFields) + .filter(field -> !extensionMap.containsKey(field)) + .collect(Collectors.toList()); + if (!missingFields.isEmpty()) { + throw new IOException("Extension is missing these required fields : " + missingFields); + } + + // Parse extension dependencies + List extensionDependencyList = new ArrayList(); + if (extensionMap.get("dependencies") != null) { + List> extensionDependencies = new ArrayList<>( + (Collection>) extensionMap.get("dependencies") + ); + for (HashMap dependency : extensionDependencies) { + if (Strings.isNullOrEmpty((String) dependency.get("uniqueId"))) { + throw new IOException("Required field [uniqueId] is missing in the request for the dependent extension"); + } else if (dependency.get("version") == null) { + throw new IOException("Required field [version] is missing in the request for the dependent extension"); } + extensionDependencyList.add( + new ExtensionDependency( + dependency.get("uniqueId").toString(), + Version.fromString(dependency.get("version").toString()) + ) + ); } } + + Map additionalSettingsMap = extensionMap.entrySet() + .stream() + .filter(kv -> additionalSettingsKeys.contains(kv.getKey())) + .collect(Collectors.toMap(map -> map.getKey(), map -> map.getValue())); + + Settings.Builder output = Settings.builder(); + output.loadFromMap(additionalSettingsMap); + extAdditionalSettings.applySettings(output.build()); + + // Create extension read from initialization request + name = extensionMap.get("name").toString(); + uniqueId = extensionMap.get("uniqueId").toString(); + hostAddress = extensionMap.get("hostAddress").toString(); + port = extensionMap.get("port").toString(); + version = extensionMap.get("version").toString(); + openSearchVersion = extensionMap.get("opensearchVersion").toString(); + minimumCompatibleVersion = extensionMap.get("minimumCompatibleVersion").toString(); + dependencies = extensionDependencyList; } catch (IOException e) { - throw new IOException("Missing attribute", e); + logger.warn("loading extension has been failed because of exception : " + e.getMessage()); + return channel -> channel.sendResponse(new BytesRestResponse(RestStatus.INTERNAL_SERVER_ERROR, e.getMessage())); } Extension extension = new Extension( @@ -103,8 +156,7 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client openSearchVersion, minimumCompatibleVersion, dependencies, - // TODO add this to the API (https://github.com/opensearch-project/OpenSearch/issues/8032) - new ExtensionScopedSettings(Collections.emptySet()) + extAdditionalSettings ); try { extensionsManager.loadExtension(extension); @@ -136,6 +188,5 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client channel.sendResponse(new BytesRestResponse(RestStatus.ACCEPTED, builder)); } }; - } } diff --git a/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java b/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java index 073b3f3f45818..3dd6056bb36cf 100644 --- a/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java +++ b/server/src/main/java/org/opensearch/extensions/rest/RestSendToExtensionAction.java @@ -33,9 +33,9 @@ import java.nio.charset.StandardCharsets; import java.security.Principal; import java.util.ArrayList; +import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; import java.util.concurrent.CompletableFuture; @@ -90,33 +90,43 @@ public RestSendToExtensionAction( List restActionsAsRoutes = new ArrayList<>(); for (String restAction : restActionsRequest.getRestActions()) { - Optional name = Optional.empty(); + + // TODO Find a better way to parse these to avoid code-smells + + String name; + Set actionNames = new HashSet<>(); String[] parts = restAction.split(" "); - if (parts.length < 2) { - throw new IllegalArgumentException("REST action must contain at least a REST method and route"); + if (parts.length < 3) { + throw new IllegalArgumentException("REST action must contain at least a REST method, a route and a unique name"); } try { method = RestRequest.Method.valueOf(parts[0].trim()); path = pathPrefix + parts[1].trim(); - if (parts.length > 2) { - name = Optional.of(parts[2].trim()); + name = parts[2].trim(); + + // comma-separated action names + if (parts.length > 3) { + String[] actions = parts[3].split(","); + for (String action : actions) { + String trimmed = action.trim(); + if (!trimmed.isEmpty()) { + actionNames.add(trimmed); + } + } } } catch (IndexOutOfBoundsException | IllegalArgumentException e) { throw new IllegalArgumentException(restAction + " does not begin with a valid REST method"); } - logger.info("Registering: " + method + " " + path); - if (name.isPresent()) { - NamedRoute nr = new NamedRoute(method, path, name.get()); - restActionsAsRoutes.add(nr); - dynamicActionRegistry.registerDynamicRoute(nr, this); - } else { - Route r = new Route(method, path); - restActionsAsRoutes.add(r); - dynamicActionRegistry.registerDynamicRoute(r, this); - } + logger.info("Registering: " + method + " " + path + " " + name); + + // All extension routes being registered must have a unique name associated with them + NamedRoute nr = new NamedRoute.Builder().method(method).path(path).uniqueName(name).legacyActionNames(actionNames).build(); + restActionsAsRoutes.add(nr); + dynamicActionRegistry.registerDynamicRoute(nr, this); } this.routes = unmodifiableList(restActionsAsRoutes); + // TODO: Modify {@link NamedRoute} to support deprecated route registration List restActionsAsDeprecatedRoutes = new ArrayList<>(); // Iterate in pairs of route / deprecation message List deprecatedActions = restActionsRequest.getDeprecatedRestActions(); diff --git a/server/src/main/java/org/opensearch/extensions/rest/RouteHandler.java b/server/src/main/java/org/opensearch/extensions/rest/RouteHandler.java deleted file mode 100644 index 189d67c120189..0000000000000 --- a/server/src/main/java/org/opensearch/extensions/rest/RouteHandler.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.extensions.rest; - -import java.util.function.Function; - -import org.opensearch.rest.RestHandler.Route; -import org.opensearch.rest.RestRequest; -import org.opensearch.rest.RestRequest.Method; - -/** - * A subclass of {@link Route} that includes a handler method for that route. - */ -public class RouteHandler extends Route { - - private final String name; - - private final Function responseHandler; - - /** - * Handle the method and path with the specified handler. - * - * @param method The {@link Method} to handle. - * @param path The path to handle. - * @param handler The method which handles the method and path. - */ - public RouteHandler(Method method, String path, Function handler) { - super(method, path); - this.responseHandler = handler; - this.name = null; - } - - /** - * Handle the method and path with the specified handler. - * - * @param name The name of the handler. - * @param method The {@link Method} to handle. - * @param path The path to handle. - * @param handler The method which handles the method and path. - */ - public RouteHandler(String name, Method method, String path, Function handler) { - super(method, path); - this.responseHandler = handler; - this.name = name; - } - - /** - * Executes the handler for this route. - * - * @param request The request to handle - * @return the {@link ExtensionRestResponse} result from the handler for this route. - */ - public ExtensionRestResponse handleRequest(RestRequest request) { - return responseHandler.apply(request); - } - - /** - * The name of the RouteHandler. Must be unique across route handlers. - */ - public String name() { - return this.name; - } -} diff --git a/server/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomCodec.java b/server/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomCodec.java index 3c570f9d0566c..8aa422a47a073 100644 --- a/server/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomCodec.java +++ b/server/src/main/java/org/opensearch/index/codec/customcodecs/Lucene95CustomCodec.java @@ -23,7 +23,7 @@ * @opensearch.internal */ public abstract class Lucene95CustomCodec extends FilterCodec { - public static final int DEFAULT_COMPRESSION_LEVEL = 6; + public static final int DEFAULT_COMPRESSION_LEVEL = 3; /** Each mode represents a compression algorithm. */ public enum Mode { diff --git a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java index e0b37df5c1734..36e0adbbf057f 100644 --- a/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/FlatObjectFieldMapper.java @@ -85,7 +85,7 @@ public static class Defaults { @Override public MappedFieldType keyedFieldType(String key) { - return new FlatObjectFieldType(this.name() + DOT_SYMBOL + key); + return new FlatObjectFieldType(this.name() + DOT_SYMBOL + key, this.name()); } /** @@ -186,6 +186,8 @@ public static final class FlatObjectFieldType extends StringFieldType { private final int ignoreAbove; private final String nullValue; + private final String mappedFieldTypeName; + private KeywordFieldMapper.KeywordFieldType valueFieldType; private KeywordFieldMapper.KeywordFieldType valueAndPathFieldType; @@ -195,10 +197,7 @@ public FlatObjectFieldType(String name, boolean isSearchable, boolean hasDocValu setIndexAnalyzer(Lucene.KEYWORD_ANALYZER); this.ignoreAbove = Integer.MAX_VALUE; this.nullValue = null; - } - - public FlatObjectFieldType(String name) { - this(name, true, true, Collections.emptyMap()); + this.mappedFieldTypeName = null; } public FlatObjectFieldType(String name, FieldType fieldType) { @@ -212,12 +211,28 @@ public FlatObjectFieldType(String name, FieldType fieldType) { ); this.ignoreAbove = Integer.MAX_VALUE; this.nullValue = null; + this.mappedFieldTypeName = null; } public FlatObjectFieldType(String name, NamedAnalyzer analyzer) { super(name, true, false, true, new TextSearchInfo(Defaults.FIELD_TYPE, null, analyzer, analyzer), Collections.emptyMap()); this.ignoreAbove = Integer.MAX_VALUE; this.nullValue = null; + this.mappedFieldTypeName = null; + } + + public FlatObjectFieldType(String name, String mappedFieldTypeName) { + super( + name, + true, + false, + true, + new TextSearchInfo(Defaults.FIELD_TYPE, null, Lucene.KEYWORD_ANALYZER, Lucene.KEYWORD_ANALYZER), + Collections.emptyMap() + ); + this.ignoreAbove = Integer.MAX_VALUE; + this.nullValue = null; + this.mappedFieldTypeName = mappedFieldTypeName; } void setValueFieldType(KeywordFieldMapper.KeywordFieldType valueFieldType) { @@ -356,22 +371,21 @@ public Query termsQuery(List values, QueryShardContext context) { * @return directedSubFieldName */ public String directSubfield() { - if (name().contains(DOT_SYMBOL)) { - String[] dotPathList = name().split("\\."); - return dotPathList[0] + VALUE_AND_PATH_SUFFIX; + if (mappedFieldTypeName == null) { + return new StringBuilder().append(this.name()).append(VALUE_SUFFIX).toString(); } else { - return this.valueFieldType.name(); + return new StringBuilder().append(this.mappedFieldTypeName).append(VALUE_AND_PATH_SUFFIX).toString(); } } /** - * If the search key is assigned with value, - * the dot path was used in search query, then - * rewrite the searchValueString as the format "dotpath=value", + * If the search key has mappedFieldTypeName as prefix, + * then the dot path was used in search query, + * then rewrite the searchValueString as the format "dotpath=value", * @return rewriteSearchValue */ public String rewriteValue(String searchValueString) { - if (!name().contains(DOT_SYMBOL)) { + if (!hasMappedFieldTyeNameInQueryFieldName(name())) { return searchValueString; } else { String rewriteSearchValue = new StringBuilder().append(name()).append(EQUAL_SYMBOL).append(searchValueString).toString(); @@ -380,6 +394,23 @@ public String rewriteValue(String searchValueString) { } + private boolean hasMappedFieldTyeNameInQueryFieldName(String input) { + String prefix = this.mappedFieldTypeName; + if (prefix == null) { + return false; + } + if (!input.startsWith(prefix)) { + return false; + } + String rest = input.substring(prefix.length()); + + if (rest.isEmpty()) { + return false; + } else { + return true; + } + } + private String inputToString(Object inputValue) { if (inputValue instanceof Integer) { String inputToString = Integer.toString((Integer) inputValue); @@ -460,15 +491,15 @@ public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower } /** - * if there is dot path. query the field name in flatObject parent field. + * if there is dot path. query the field name in flatObject parent field (mappedFieldTypeName). * else query in _field_names system field */ @Override public Query existsQuery(QueryShardContext context) { String searchKey; String searchField; - if (name().contains(DOT_SYMBOL)) { - searchKey = name().split("\\.")[0]; + if (hasMappedFieldTyeNameInQueryFieldName(name())) { + searchKey = this.mappedFieldTypeName; searchField = name(); } else { searchKey = FieldNamesFieldMapper.NAME; diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java new file mode 100644 index 0000000000000..1eeadfe228a45 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/remote/RemoteStoreUtils.java @@ -0,0 +1,51 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import java.util.Arrays; + +/** + * Utils for remote store + * + * @opensearch.internal + */ +public class RemoteStoreUtils { + public static final int LONG_MAX_LENGTH = String.valueOf(Long.MAX_VALUE).length(); + + /** + * This method subtracts given numbers from Long.MAX_VALUE and returns a string representation of the result. + * The resultant string is guaranteed to be of the same length that of Long.MAX_VALUE. If shorter, we add left padding + * of 0s to the string. + * @param num number to get the inverted long string for + * @return String value of Long.MAX_VALUE - num + */ + public static String invertLong(long num) { + if (num < 0) { + throw new IllegalArgumentException("Negative long values are not allowed"); + } + String invertedLong = String.valueOf(Long.MAX_VALUE - num); + char[] characterArray = new char[LONG_MAX_LENGTH - invertedLong.length()]; + Arrays.fill(characterArray, '0'); + + return new String(characterArray) + invertedLong; + } + + /** + * This method converts the given string into long and subtracts it from Long.MAX_VALUE + * @param str long in string format to be inverted + * @return long value of the invert result + */ + public static long invertLong(String str) { + long num = Long.parseLong(str); + if (num < 0) { + throw new IllegalArgumentException("Strings representing negative long values are not allowed"); + } + return Long.MAX_VALUE - num; + } +} diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index c2bbead2d9430..80478264049c6 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -2632,7 +2632,7 @@ public void restoreFromSnapshotAndRemoteStore( assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.SNAPSHOT : "invalid recovery type: " + recoveryState.getRecoverySource(); StoreRecovery storeRecovery = new StoreRecovery(shardId, logger); - storeRecovery.recoverFromSnapshotAndRemoteStore(this, repository, repositoriesService, listener); + storeRecovery.recoverFromSnapshotAndRemoteStore(this, repository, repositoriesService, listener, threadPool); } catch (Exception e) { listener.onFailure(e); } diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java index f3b82b40faa6f..af86a108a670b 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java @@ -30,6 +30,7 @@ import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.store.RemoteSegmentStoreDirectory; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; +import org.opensearch.index.translog.Translog; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.threadpool.Scheduler; @@ -85,7 +86,7 @@ public final class RemoteStoreRefreshListener implements ReferenceManager.Refres // Visible for testing static final Set EXCLUDE_FILES = Set.of("write.lock"); // Visible for testing - static final int LAST_N_METADATA_FILES_TO_KEEP = 10; + public static final int LAST_N_METADATA_FILES_TO_KEEP = 10; private final IndexShard indexShard; private final Directory storeDirectory; @@ -202,9 +203,8 @@ private synchronized void syncSegments(boolean isRetry) { // if a new segments_N file is present in local that is not uploaded to remote store yet, it // is considered as a first refresh post commit. A cleanup of stale commit files is triggered. // This is done to avoid delete post each refresh. - // Ideally, we want this to be done in async flow. (GitHub issue #4315) if (isRefreshAfterCommit()) { - deleteStaleCommits(); + remoteDirectory.deleteStaleSegmentsAsync(LAST_N_METADATA_FILES_TO_KEEP); } try (GatedCloseable segmentInfosGatedCloseable = indexShard.getSegmentInfosSnapshot()) { @@ -226,7 +226,17 @@ private synchronized void syncSegments(boolean isRetry) { // Each metadata file in the remote segment store represents a commit and the following // statement keeps sure that each metadata will always contain all the segments from last commit + refreshed // segments. - localSegmentsPostRefresh.addAll(SegmentInfos.readCommit(storeDirectory, latestSegmentInfos.get()).files(true)); + SegmentInfos segmentCommitInfos; + try { + segmentCommitInfos = SegmentInfos.readCommit(storeDirectory, latestSegmentInfos.get()); + } catch (Exception e) { + // Seeing discrepancy in segment infos and files on disk. SegmentInfosSnapshot is returning + // a segment_N file which does not exist on local disk. + logger.error("Exception occurred while SegmentInfos.readCommit(..)", e); + logger.error("segmentInfosFiles={} diskFiles={}", localSegmentsPostRefresh, storeDirectory.listAll()); + throw e; + } + localSegmentsPostRefresh.addAll(segmentCommitInfos.files(true)); segmentInfosFiles.stream() .filter(file -> !file.equals(latestSegmentInfos.get())) .forEach(localSegmentsPostRefresh::remove); @@ -351,12 +361,19 @@ void uploadMetadata(Collection localSegmentsPostRefresh, SegmentInfos se userData.put(SequenceNumbers.MAX_SEQ_NO, Long.toString(maxSeqNo)); segmentInfosSnapshot.setUserData(userData, false); - remoteDirectory.uploadMetadata( - localSegmentsPostRefresh, - segmentInfosSnapshot, - storeDirectory, - indexShard.getOperationPrimaryTerm() - ); + Translog.TranslogGeneration translogGeneration = indexShard.getEngine().translogManager().getTranslogGeneration(); + if (translogGeneration == null) { + throw new UnsupportedOperationException("Encountered null TranslogGeneration while uploading metadata to remote segment store"); + } else { + long translogFileGeneration = translogGeneration.translogFileGeneration; + remoteDirectory.uploadMetadata( + localSegmentsPostRefresh, + segmentInfosSnapshot, + storeDirectory, + indexShard.getOperationPrimaryTerm(), + translogFileGeneration + ); + } } private boolean uploadNewSegments(Collection localSegmentsPostRefresh) throws IOException { @@ -382,14 +399,6 @@ private String getChecksumOfLocalFile(String file) throws IOException { return localSegmentChecksumMap.get(file); } - private void deleteStaleCommits() { - try { - remoteDirectory.deleteStaleSegments(LAST_N_METADATA_FILES_TO_KEEP); - } catch (IOException e) { - logger.info("Exception while deleting stale commits from remote segment store, will retry delete post next commit", e); - } - } - /** * Updates the last refresh time and refresh seq no which is seen by local store. */ diff --git a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java index 119524e8caf8a..da4e9113143af 100644 --- a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java @@ -69,6 +69,7 @@ import org.opensearch.repositories.IndexId; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; +import org.opensearch.threadpool.ThreadPool; import java.io.IOException; import java.nio.channels.FileChannel; @@ -356,7 +357,8 @@ void recoverFromSnapshotAndRemoteStore( final IndexShard indexShard, Repository repository, RepositoriesService repositoriesService, - ActionListener listener + ActionListener listener, + ThreadPool threadPool ) { try { if (canRecover(indexShard)) { @@ -384,7 +386,10 @@ void recoverFromSnapshotAndRemoteStore( remoteStoreRepository = shallowCopyShardMetadata.getRemoteStoreRepository(); } - RemoteSegmentStoreDirectoryFactory directoryFactory = new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService); + RemoteSegmentStoreDirectoryFactory directoryFactory = new RemoteSegmentStoreDirectoryFactory( + () -> repositoriesService, + threadPool + ); RemoteSegmentStoreDirectory sourceRemoteDirectory = (RemoteSegmentStoreDirectory) directoryFactory.newDirectory( remoteStoreRepository, indexUUID, diff --git a/server/src/main/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshot.java b/server/src/main/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshot.java index 8e6ed870c904f..8cb9fd3cd3c63 100644 --- a/server/src/main/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshot.java +++ b/server/src/main/java/org/opensearch/index/snapshots/blobstore/RemoteStoreShardShallowCopySnapshot.java @@ -408,4 +408,29 @@ private void verifyParameters( throw new IllegalArgumentException(exceptionStr); } } + + /** + * Creates a new instance which has a different name and zero incremental file counts but is identical to this instance in terms of the files + * it references. + * + * @param targetSnapshotName target snapshot name + * @param startTime time the clone operation on the repository was started + * @param time time it took to create the clone + */ + public RemoteStoreShardShallowCopySnapshot asClone(String targetSnapshotName, long startTime, long time) { + return new RemoteStoreShardShallowCopySnapshot( + targetSnapshotName, + indexVersion, + primaryTerm, + commitGeneration, + startTime, + time, + totalFileCount, + totalSize, + indexUUID, + remoteStoreRepository, + repositoryBasePath, + fileNames + ); + } } diff --git a/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java index be4b4e910bb4d..8782808c070ab 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java @@ -13,6 +13,8 @@ import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; import org.apache.lucene.store.Lock; +import org.opensearch.action.ActionListener; +import org.opensearch.action.LatchedActionListener; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobMetadata; @@ -20,10 +22,15 @@ import java.io.IOException; import java.io.InputStream; import java.nio.file.NoSuchFileException; +import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; /** * A {@code RemoteDirectory} provides an abstraction layer for storing a list of files to a remote store. @@ -61,6 +68,40 @@ public Collection listFilesByPrefix(String filenamePrefix) throws IOExce return blobContainer.listBlobsByPrefix(filenamePrefix).keySet(); } + public List listFilesByPrefixInLexicographicOrder(String filenamePrefix, int limit) throws IOException { + List sortedBlobList = new ArrayList<>(); + AtomicReference exception = new AtomicReference<>(); + final CountDownLatch latch = new CountDownLatch(1); + LatchedActionListener> actionListener = new LatchedActionListener<>(new ActionListener<>() { + @Override + public void onResponse(List blobMetadata) { + sortedBlobList.addAll(blobMetadata.stream().map(BlobMetadata::name).collect(Collectors.toList())); + } + + @Override + public void onFailure(Exception e) { + exception.set(e); + } + }, latch); + + try { + blobContainer.listBlobsByPrefixInSortedOrder( + filenamePrefix, + limit, + BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC, + actionListener + ); + latch.await(); + } catch (InterruptedException e) { + throw new IOException("Exception in listFilesByPrefixInLexicographicOrder with prefix: " + filenamePrefix, e); + } + if (exception.get() != null) { + throw new IOException(exception.get()); + } else { + return sortedBlobList; + } + } + /** * Removes an existing file in the directory. * diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index addd8a24af9c5..e7602203440d2 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -22,26 +22,27 @@ import org.opensearch.common.UUIDs; import org.opensearch.common.io.VersionedCodecStreamWrapper; import org.opensearch.common.lucene.store.ByteArrayIndexInput; +import org.opensearch.index.remote.RemoteStoreUtils; import org.opensearch.index.store.lockmanager.FileLockInfo; import org.opensearch.index.store.lockmanager.RemoteStoreCommitLevelLockManager; import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadataHandler; +import org.opensearch.threadpool.ThreadPool; import java.io.FileNotFoundException; import java.io.IOException; import java.nio.file.NoSuchFileException; import java.util.Collection; import java.util.Collections; -import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Optional; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; /** @@ -61,9 +62,6 @@ public final class RemoteSegmentStoreDirectory extends FilterDirectory implement */ public static final String SEGMENT_NAME_UUID_SEPARATOR = "__"; - public static final MetadataFilenameUtils.MetadataFilenameComparator METADATA_FILENAME_COMPARATOR = - new MetadataFilenameUtils.MetadataFilenameComparator(); - /** * remoteDataDirectory is used to store segment files at path: cluster_UUID/index_UUID/shardId/segments/data */ @@ -75,11 +73,7 @@ public final class RemoteSegmentStoreDirectory extends FilterDirectory implement private final RemoteStoreLockManager mdLockManager; - /** - * To prevent explosion of refresh metadata files, we replace refresh files for the given primary term and generation - * This is achieved by uploading refresh metadata file with the same UUID suffix. - */ - private String commonFilenameSuffix; + private final ThreadPool threadPool; /** * Keeps track of local segment filename to uploaded filename along with other attributes like checksum. @@ -96,15 +90,25 @@ public final class RemoteSegmentStoreDirectory extends FilterDirectory implement private static final Logger logger = LogManager.getLogger(RemoteSegmentStoreDirectory.class); + /** + * AtomicBoolean that ensures only one staleCommitDeletion activity is scheduled at a time. + * Visible for testing + */ + protected final AtomicBoolean canDeleteStaleCommits = new AtomicBoolean(true); + + private final AtomicLong metadataUploadCounter = new AtomicLong(0); + public RemoteSegmentStoreDirectory( RemoteDirectory remoteDataDirectory, RemoteDirectory remoteMetadataDirectory, - RemoteStoreLockManager mdLockManager + RemoteStoreLockManager mdLockManager, + ThreadPool threadPool ) throws IOException { super(remoteDataDirectory); this.remoteDataDirectory = remoteDataDirectory; this.remoteMetadataDirectory = remoteMetadataDirectory; this.mdLockManager = mdLockManager; + this.threadPool = threadPool; init(); } @@ -116,7 +120,6 @@ public RemoteSegmentStoreDirectory( * @throws IOException if there were any failures in reading the metadata file */ public RemoteSegmentMetadata init() throws IOException { - this.commonFilenameSuffix = UUIDs.base64UUID(); RemoteSegmentMetadata remoteSegmentMetadata = readLatestMetadataFile(); if (remoteSegmentMetadata != null) { this.segmentsUploadedToRemoteStore = new ConcurrentHashMap<>(remoteSegmentMetadata.getMetadata()); @@ -159,12 +162,15 @@ public RemoteSegmentMetadata initializeToSpecificCommit(long primaryTerm, long c public RemoteSegmentMetadata readLatestMetadataFile() throws IOException { RemoteSegmentMetadata remoteSegmentMetadata = null; - Collection metadataFiles = remoteMetadataDirectory.listFilesByPrefix(MetadataFilenameUtils.METADATA_PREFIX); - Optional latestMetadataFile = metadataFiles.stream().max(METADATA_FILENAME_COMPARATOR); + List metadataFiles = remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( + MetadataFilenameUtils.METADATA_PREFIX, + 1 + ); - if (latestMetadataFile.isPresent()) { - logger.info("Reading latest Metadata file {}", latestMetadataFile.get()); - remoteSegmentMetadata = readMetadataFile(latestMetadataFile.get()); + if (metadataFiles.isEmpty() == false) { + String latestMetadataFile = metadataFiles.get(0); + logger.info("Reading latest Metadata file {}", latestMetadataFile); + remoteSegmentMetadata = readMetadataFile(latestMetadataFile); } else { logger.info("No metadata file found, this can happen for new index with no data uploaded to remote segment store"); } @@ -176,8 +182,7 @@ private RemoteSegmentMetadata readMetadataFile(String metadataFilename) throws I try (IndexInput indexInput = remoteMetadataDirectory.openInput(metadataFilename, IOContext.DEFAULT)) { byte[] metadataBytes = new byte[(int) indexInput.length()]; indexInput.readBytes(metadataBytes, 0, (int) indexInput.length()); - RemoteSegmentMetadata metadata = metadataStreamWrapper.readStream(new ByteArrayIndexInput(metadataFilename, metadataBytes)); - return metadata; + return metadataStreamWrapper.readStream(new ByteArrayIndexInput(metadataFilename, metadataBytes)); } } @@ -231,56 +236,43 @@ static class MetadataFilenameUtils { public static final String SEPARATOR = "__"; public static final String METADATA_PREFIX = "metadata"; - /** - * Comparator to sort the metadata filenames. The order of sorting is: Primary Term, Generation, UUID - * Even though UUID sort does not provide any info on recency, it provides a consistent way to sort the filenames. - */ - static class MetadataFilenameComparator implements Comparator { - @Override - public int compare(String first, String second) { - String[] firstTokens = first.split(SEPARATOR); - String[] secondTokens = second.split(SEPARATOR); - if (!firstTokens[0].equals(secondTokens[0])) { - return firstTokens[0].compareTo(secondTokens[0]); - } - long firstPrimaryTerm = getPrimaryTerm(firstTokens); - long secondPrimaryTerm = getPrimaryTerm(secondTokens); - if (firstPrimaryTerm != secondPrimaryTerm) { - return firstPrimaryTerm > secondPrimaryTerm ? 1 : -1; - } else { - long firstGeneration = getGeneration(firstTokens); - long secondGeneration = getGeneration(secondTokens); - if (firstGeneration != secondGeneration) { - return firstGeneration > secondGeneration ? 1 : -1; - } else { - return getUuid(firstTokens).compareTo(getUuid(secondTokens)); - } - } - } - } - static String getMetadataFilePrefixForCommit(long primaryTerm, long generation) { - return String.join(SEPARATOR, METADATA_PREFIX, Long.toString(primaryTerm), Long.toString(generation, Character.MAX_RADIX)); + return String.join( + SEPARATOR, + METADATA_PREFIX, + RemoteStoreUtils.invertLong(primaryTerm), + RemoteStoreUtils.invertLong(generation) + ); } // Visible for testing - static String getMetadataFilename(long primaryTerm, long generation, String uuid) { - return String.join(SEPARATOR, getMetadataFilePrefixForCommit(primaryTerm, generation), uuid); + static String getMetadataFilename( + long primaryTerm, + long generation, + long translogGeneration, + long uploadCounter, + int metadataVersion + ) { + return String.join( + SEPARATOR, + METADATA_PREFIX, + RemoteStoreUtils.invertLong(primaryTerm), + RemoteStoreUtils.invertLong(generation), + RemoteStoreUtils.invertLong(translogGeneration), + RemoteStoreUtils.invertLong(uploadCounter), + RemoteStoreUtils.invertLong(System.currentTimeMillis()), + String.valueOf(metadataVersion) + ); } // Visible for testing static long getPrimaryTerm(String[] filenameTokens) { - return Long.parseLong(filenameTokens[1]); + return RemoteStoreUtils.invertLong(filenameTokens[1]); } // Visible for testing static long getGeneration(String[] filenameTokens) { - return Long.parseLong(filenameTokens[2], Character.MAX_RADIX); - } - - // Visible for testing - static String getUuid(String[] filenameTokens) { - return filenameTokens[3]; + return RemoteStoreUtils.invertLong(filenameTokens[2]); } } @@ -368,7 +360,6 @@ public IndexInput openInput(String name, IOContext context) throws IOException { @Override public void acquireLock(long primaryTerm, long generation, String acquirerId) throws IOException { String metadataFile = getMetadataFileForCommit(primaryTerm, generation); - mdLockManager.acquire(FileLockInfo.getLockInfoBuilder().withFileToLock(metadataFile).withAcquirerId(acquirerId).build()); } @@ -397,13 +388,19 @@ public void releaseLock(long primaryTerm, long generation, String acquirerId) th @Override public Boolean isLockAcquired(long primaryTerm, long generation) throws IOException { String metadataFile = getMetadataFileForCommit(primaryTerm, generation); + return isLockAcquired(metadataFile); + } + + // Visible for testing + Boolean isLockAcquired(String metadataFile) throws IOException { return mdLockManager.isAcquired(FileLockInfo.getLockInfoBuilder().withFileToLock(metadataFile).build()); } // Visible for testing String getMetadataFileForCommit(long primaryTerm, long generation) throws IOException { - Collection metadataFiles = remoteMetadataDirectory.listFilesByPrefix( - MetadataFilenameUtils.getMetadataFilePrefixForCommit(primaryTerm, generation) + List metadataFiles = remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( + MetadataFilenameUtils.getMetadataFilePrefixForCommit(primaryTerm, generation), + 1 ); if (metadataFiles.isEmpty()) { @@ -421,33 +418,24 @@ String getMetadataFileForCommit(long primaryTerm, long generation) throws IOExce + metadataFiles.size() ); } - return metadataFiles.iterator().next(); + return metadataFiles.get(0); } - public void copyFrom(Directory from, String src, String dest, IOContext context, boolean useCommonSuffix, String checksum) - throws IOException { + public void copyFrom(Directory from, String src, String dest, IOContext context, String checksum) throws IOException { String remoteFilename; - if (useCommonSuffix) { - remoteFilename = dest + SEGMENT_NAME_UUID_SEPARATOR + this.commonFilenameSuffix; - } else { - remoteFilename = getNewRemoteSegmentFilename(dest); - } + remoteFilename = getNewRemoteSegmentFilename(dest); remoteDataDirectory.copyFrom(from, src, remoteFilename, context); UploadedSegmentMetadata segmentMetadata = new UploadedSegmentMetadata(src, remoteFilename, checksum, from.fileLength(src)); segmentsUploadedToRemoteStore.put(src, segmentMetadata); } - public void copyFrom(Directory from, String src, String dest, IOContext context, boolean useCommonSuffix) throws IOException { - copyFrom(from, src, dest, context, useCommonSuffix, getChecksumOfLocalFile(from, src)); - } - /** * Copies an existing src file from directory from to a non-existent file dest in this directory. * Once the segment is uploaded to remote segment store, update the cache accordingly. */ @Override public void copyFrom(Directory from, String src, String dest, IOContext context) throws IOException { - copyFrom(from, src, dest, context, false); + copyFrom(from, src, dest, context, getChecksumOfLocalFile(from, src)); } /** @@ -475,13 +463,16 @@ public void uploadMetadata( Collection segmentFiles, SegmentInfos segmentInfosSnapshot, Directory storeDirectory, - long primaryTerm + long primaryTerm, + long translogGeneration ) throws IOException { synchronized (this) { String metadataFilename = MetadataFilenameUtils.getMetadataFilename( primaryTerm, segmentInfosSnapshot.getGeneration(), - this.commonFilenameSuffix + translogGeneration, + metadataUploadCounter.incrementAndGet(), + RemoteSegmentMetadata.CURRENT_VERSION ); try { IndexOutput indexOutput = storeDirectory.createOutput(metadataFilename, IOContext.DEFAULT); @@ -558,15 +549,6 @@ public Map getSegmentsUploadedToRemoteStore() { return Collections.unmodifiableMap(this.segmentsUploadedToRemoteStore); } - public Map getSegmentsUploadedToRemoteStore(long primaryTerm, long generation) throws IOException { - String metadataFile = getMetadataFileForCommit(primaryTerm, generation); - - Map segmentsUploadedToRemoteStore = new ConcurrentHashMap<>( - readMetadataFile(metadataFile).getMetadata() - ); - return Collections.unmodifiableMap(segmentsUploadedToRemoteStore); - } - /** * Delete stale segment and metadata files * One metadata file is kept per commit (refresh updates the same file). To read segments uploaded to remote store, @@ -575,8 +557,10 @@ public Map getSegmentsUploadedToRemoteStore(lon * @throws IOException in case of I/O error while reading from / writing to remote segment store */ public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException { - Collection metadataFiles = remoteMetadataDirectory.listFilesByPrefix(MetadataFilenameUtils.METADATA_PREFIX); - List sortedMetadataFileList = metadataFiles.stream().sorted(METADATA_FILENAME_COMPARATOR).collect(Collectors.toList()); + List sortedMetadataFileList = remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( + MetadataFilenameUtils.METADATA_PREFIX, + Integer.MAX_VALUE + ); if (sortedMetadataFileList.size() <= lastNMetadataFilesToKeep) { logger.info( "Number of commits in remote segment store={}, lastNMetadataFilesToKeep={}", @@ -587,21 +571,12 @@ public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException } List metadataFilesEligibleToDelete = sortedMetadataFileList.subList( - 0, - sortedMetadataFileList.size() - lastNMetadataFilesToKeep + lastNMetadataFilesToKeep, + sortedMetadataFileList.size() ); List metadataFilesToBeDeleted = metadataFilesEligibleToDelete.stream().filter(metadataFile -> { try { - // TODO: add snapshot interop feature flag here as that will be the first feature to use lock - // manager. - boolean lockManagerEnabled = false; - if (!lockManagerEnabled) { - return true; - } - return !isLockAcquired( - MetadataFilenameUtils.getPrimaryTerm(metadataFile.split(MetadataFilenameUtils.SEPARATOR)), - MetadataFilenameUtils.getGeneration(metadataFile.split(MetadataFilenameUtils.SEPARATOR)) - ); + return !isLockAcquired(metadataFile); } catch (IOException e) { logger.error( "skipping metadata file (" @@ -656,12 +631,42 @@ public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException } } + /** + * Delete stale segment and metadata files asynchronously. + * This method calls {@link RemoteSegmentStoreDirectory#deleteStaleSegments(int)} in an async manner. + * @param lastNMetadataFilesToKeep number of metadata files to keep + */ + public void deleteStaleSegmentsAsync(int lastNMetadataFilesToKeep) { + if (canDeleteStaleCommits.compareAndSet(true, false)) { + try { + threadPool.executor(ThreadPool.Names.REMOTE_PURGE).execute(() -> { + try { + deleteStaleSegments(lastNMetadataFilesToKeep); + } catch (Exception e) { + logger.info( + "Exception while deleting stale commits from remote segment store, will retry delete post next commit", + e + ); + } finally { + canDeleteStaleCommits.set(true); + } + }); + } catch (Exception e) { + logger.info("Exception occurred while scheduling deleteStaleCommits", e); + canDeleteStaleCommits.set(true); + } + } + } + /* Tries to delete shard level directory if it is empty Return true if it deleted it successfully */ private boolean deleteIfEmpty() throws IOException { - Collection metadataFiles = remoteMetadataDirectory.listFilesByPrefix(MetadataFilenameUtils.METADATA_PREFIX); + Collection metadataFiles = remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( + MetadataFilenameUtils.METADATA_PREFIX, + 1 + ); if (metadataFiles.size() != 0) { logger.info("Remote directory still has files , not deleting the path"); return false; @@ -680,7 +685,7 @@ private boolean deleteIfEmpty() throws IOException { } public void close() throws IOException { - deleteStaleSegments(0); + deleteStaleSegmentsAsync(0); deleteIfEmpty(); } } diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java index 03995d5913fb3..3bec84f287ce4 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java @@ -20,6 +20,7 @@ import org.opensearch.repositories.Repository; import org.opensearch.repositories.RepositoryMissingException; import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.threadpool.ThreadPool; import java.io.IOException; import java.util.function.Supplier; @@ -34,8 +35,11 @@ public class RemoteSegmentStoreDirectoryFactory implements IndexStorePlugin.Dire private final Supplier repositoriesService; - public RemoteSegmentStoreDirectoryFactory(Supplier repositoriesService) { + private final ThreadPool threadPool; + + public RemoteSegmentStoreDirectoryFactory(Supplier repositoriesService, ThreadPool threadPool) { this.repositoriesService = repositoriesService; + this.threadPool = threadPool; } @Override @@ -62,7 +66,7 @@ public Directory newDirectory(String repositoryName, String indexUUID, String sh shardId ); - return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, mdLockManager); + return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, mdLockManager, threadPool); } catch (RepositoryMissingException e) { throw new IllegalArgumentException("Repository should be created before creating index with remote_store enabled setting", e); } diff --git a/server/src/main/java/org/opensearch/index/store/lockmanager/FileLockInfo.java b/server/src/main/java/org/opensearch/index/store/lockmanager/FileLockInfo.java index a8fb7bf20c393..24f42743e1a04 100644 --- a/server/src/main/java/org/opensearch/index/store/lockmanager/FileLockInfo.java +++ b/server/src/main/java/org/opensearch/index/store/lockmanager/FileLockInfo.java @@ -8,6 +8,7 @@ package org.opensearch.index.store.lockmanager; +import java.nio.file.NoSuchFileException; import java.util.Arrays; import java.util.List; import java.util.stream.Collectors; @@ -50,13 +51,21 @@ String getLockPrefix() { return fileToLock + RemoteStoreLockManagerUtils.SEPARATOR; } - List getLocksForAcquirer(String[] lockFiles) { + String getLockForAcquirer(String[] lockFiles) throws NoSuchFileException { if (acquirerId == null || acquirerId.isBlank()) { throw new IllegalArgumentException("Acquirer ID should be provided"); } - return Arrays.stream(lockFiles) + List locksForAcquirer = Arrays.stream(lockFiles) .filter(lockFile -> acquirerId.equals(LockFileUtils.getAcquirerIdFromLock(lockFile))) .collect(Collectors.toList()); + + if (locksForAcquirer.isEmpty()) { + throw new NoSuchFileException("No lock file found for the acquirer: " + acquirerId); + } + if (locksForAcquirer.size() != 1) { + throw new IllegalStateException("Expected single lock file but found [" + locksForAcquirer.size() + "] lock files"); + } + return locksForAcquirer.get(0); } public static LockInfoBuilder getLockInfoBuilder() { diff --git a/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManager.java b/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManager.java index c30be082b4795..9eb066d9e955e 100644 --- a/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManager.java +++ b/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManager.java @@ -39,6 +39,16 @@ public interface RemoteStoreLockManager { */ Boolean isAcquired(LockInfo lockInfo) throws IOException; + /** + * Acquires lock on the file mentioned in originalLockInfo for acquirer mentioned in clonedLockInfo. + * There can occur a race condition where the original file is deleted before we can use it to acquire lock for the new acquirer. Until we have a + * fix on LockManager side, Implementors must ensure thread safety for this operation. + * @param originalLockInfo lock info instance for original lock. + * @param clonedLockInfo lock info instance for which lock needs to be cloned. + * @throws IOException throws IOException if originalResource itself do not have any lock. + */ + void cloneLock(LockInfo originalLockInfo, LockInfo clonedLockInfo) throws IOException; + /* Deletes all lock related files and directories */ diff --git a/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreMetadataLockManager.java b/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreMetadataLockManager.java index 7df20cae10664..fd7906729e314 100644 --- a/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreMetadataLockManager.java +++ b/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreMetadataLockManager.java @@ -15,8 +15,9 @@ import org.opensearch.index.store.RemoteBufferedOutputDirectory; import java.io.IOException; +import java.nio.file.NoSuchFileException; import java.util.Collection; -import java.util.List; +import java.util.Objects; /** * A Class that implements Remote Store Lock Manager by creating lock files for the remote store files that needs to @@ -48,6 +49,7 @@ public void acquire(LockInfo lockInfo) throws IOException { /** * Releases Locks acquired by a given acquirer which is passed in LockInfo Instance. + * If the lock file doesn't exist for the acquirer, release will be a no-op. * Right now this method is only used to release locks for a given acquirer, * This can be extended in future to handle other cases as well, like: * - release lock for given fileToLock and AcquirerId @@ -59,15 +61,12 @@ public void acquire(LockInfo lockInfo) throws IOException { public void release(LockInfo lockInfo) throws IOException { assert lockInfo instanceof FileLockInfo : "lockInfo should be instance of FileLockInfo"; String[] lockFiles = lockDirectory.listAll(); - - // ideally there should be only one lock per acquirer, but just to handle any stale locks, - // we try to release all the locks for the acquirer. - List locksToRelease = ((FileLockInfo) lockInfo).getLocksForAcquirer(lockFiles); - if (locksToRelease.size() > 1) { - logger.warn(locksToRelease.size() + " locks found for acquirer " + ((FileLockInfo) lockInfo).getAcquirerId()); - } - for (String lock : locksToRelease) { - lockDirectory.deleteFile(lock); + try { + String lockToRelease = ((FileLockInfo) lockInfo).getLockForAcquirer(lockFiles); + lockDirectory.deleteFile(lockToRelease); + } catch (NoSuchFileException e) { + // Ignoring if the file to be deleted is not present. + logger.info("No lock file found for acquirerId: {}", ((FileLockInfo) lockInfo).getAcquirerId()); } } @@ -84,6 +83,27 @@ public Boolean isAcquired(LockInfo lockInfo) throws IOException { return !lockFiles.isEmpty(); } + /** + * Acquires lock on the file mentioned in originalLockInfo for acquirer mentioned in clonedLockInfo. + * Snapshot layer enforces thread safety by having checks in place to ensure that the source snapshot is not being deleted before proceeding + * with the clone operation. Hence, the original lock file would always be present while acquiring the lock for cloned snapshot. + * @param originalLockInfo lock info instance for original lock. + * @param clonedLockInfo lock info instance for which lock needs to be cloned. + * @throws IOException throws IOException if originalResource itself do not have any lock. + */ + @Override + public void cloneLock(LockInfo originalLockInfo, LockInfo clonedLockInfo) throws IOException { + assert originalLockInfo instanceof FileLockInfo : "originalLockInfo should be instance of FileLockInfo"; + assert clonedLockInfo instanceof FileLockInfo : "clonedLockInfo should be instance of FileLockInfo"; + String originalResourceId = Objects.requireNonNull(((FileLockInfo) originalLockInfo).getAcquirerId()); + String clonedResourceId = Objects.requireNonNull(((FileLockInfo) clonedLockInfo).getAcquirerId()); + assert originalResourceId != null && clonedResourceId != null : "provided resourceIds should not be null"; + String[] lockFiles = lockDirectory.listAll(); + String lockNameForAcquirer = ((FileLockInfo) originalLockInfo).getLockForAcquirer(lockFiles); + String fileToLockName = FileLockInfo.LockFileUtils.getFileToLockNameFromLock(lockNameForAcquirer); + acquire(FileLockInfo.getLockInfoBuilder().withFileToLock(fileToLockName).withAcquirerId(clonedResourceId).build()); + } + public void delete() throws IOException { lockDirectory.delete(); } diff --git a/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java b/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java index 7eaab67ddb5a5..9b3240823f368 100644 --- a/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java +++ b/server/src/main/java/org/opensearch/index/translog/InternalTranslogManager.java @@ -300,6 +300,11 @@ public void onDelete() { translog.onDelete(); } + @Override + public Translog.TranslogGeneration getTranslogGeneration() { + return translog.getGeneration(); + } + /** * Reads operations from the translog * @param location location of translog diff --git a/server/src/main/java/org/opensearch/index/translog/NoOpTranslogManager.java b/server/src/main/java/org/opensearch/index/translog/NoOpTranslogManager.java index 58ee8c0fd39e7..dd5593b6d79cd 100644 --- a/server/src/main/java/org/opensearch/index/translog/NoOpTranslogManager.java +++ b/server/src/main/java/org/opensearch/index/translog/NoOpTranslogManager.java @@ -122,4 +122,9 @@ public Translog.Snapshot newChangesSnapshot(long fromSeqNo, long toSeqNo, boolea } public void onDelete() {} + + @Override + public Translog.TranslogGeneration getTranslogGeneration() { + return null; + } } diff --git a/server/src/main/java/org/opensearch/index/translog/TranslogManager.java b/server/src/main/java/org/opensearch/index/translog/TranslogManager.java index 420d6cdc43bbf..78aaa1bc13a00 100644 --- a/server/src/main/java/org/opensearch/index/translog/TranslogManager.java +++ b/server/src/main/java/org/opensearch/index/translog/TranslogManager.java @@ -131,4 +131,6 @@ public interface TranslogManager { Clean up if any needed on deletion of index */ void onDelete(); + + Translog.TranslogGeneration getTranslogGeneration(); } diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 3742c817118da..aecf5659de7fe 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -717,7 +717,8 @@ protected Node( clusterService.setRerouteService(rerouteService); final IndexStorePlugin.DirectoryFactory remoteDirectoryFactory = new RemoteSegmentStoreDirectoryFactory( - repositoriesServiceReference::get + repositoriesServiceReference::get, + threadPool ); final IndicesService indicesService = new IndicesService( @@ -1500,7 +1501,7 @@ public synchronized void close() throws IOException { toClose.add(injector.getInstance(NodeEnvironment.class)); toClose.add(stopWatch::stop); if (FeatureFlags.isEnabled(TELEMETRY)) { - toClose.add(() -> injector.getInstance(TracerFactory.class)); + toClose.add(injector.getInstance(TracerFactory.class)); } if (logger.isTraceEnabled()) { diff --git a/server/src/main/java/org/opensearch/plugins/SearchPipelinePlugin.java b/server/src/main/java/org/opensearch/plugins/SearchPipelinePlugin.java index 3d76bab93a60c..d2ef2b65c5944 100644 --- a/server/src/main/java/org/opensearch/plugins/SearchPipelinePlugin.java +++ b/server/src/main/java/org/opensearch/plugins/SearchPipelinePlugin.java @@ -8,13 +8,24 @@ package org.opensearch.plugins; +import org.opensearch.client.Client; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.env.Environment; +import org.opensearch.index.analysis.AnalysisRegistry; +import org.opensearch.script.ScriptService; import org.opensearch.search.pipeline.Processor; import org.opensearch.search.pipeline.SearchPhaseResultsProcessor; +import org.opensearch.search.pipeline.SearchPipelineService; import org.opensearch.search.pipeline.SearchRequestProcessor; import org.opensearch.search.pipeline.SearchResponseProcessor; +import org.opensearch.threadpool.Scheduler; import java.util.Collections; import java.util.Map; +import java.util.function.BiFunction; +import java.util.function.Consumer; +import java.util.function.LongSupplier; /** * An extension point for {@link Plugin} implementation to add custom search pipeline processors. @@ -29,7 +40,7 @@ public interface SearchPipelinePlugin { * in pipeline configurations, and the value is a {@link org.opensearch.search.pipeline.Processor.Factory} * to create the processor from a given pipeline configuration. */ - default Map> getRequestProcessors(Processor.Parameters parameters) { + default Map> getRequestProcessors(Parameters parameters) { return Collections.emptyMap(); } @@ -40,7 +51,7 @@ default Map> getRequestProcess * in pipeline configurations, and the value is a {@link org.opensearch.search.pipeline.Processor.Factory} * to create the processor from a given pipeline configuration. */ - default Map> getResponseProcessors(Processor.Parameters parameters) { + default Map> getResponseProcessors(Parameters parameters) { return Collections.emptyMap(); } @@ -51,7 +62,78 @@ default Map> getResponseProce * in pipeline configurations, and the value is a {@link org.opensearch.search.pipeline.Processor.Factory} * to create the processor from a given pipeline configuration. */ - default Map> getSearchPhaseResultsProcessors(Processor.Parameters parameters) { + default Map> getSearchPhaseResultsProcessors(Parameters parameters) { return Collections.emptyMap(); } + + /** + * Infrastructure class that holds services that can be used by processor factories to create processor instances + * and that gets passed around to all {@link SearchPipelinePlugin}s. + */ + class Parameters { + + /** + * Useful to provide access to the node's environment like config directory to processor factories. + */ + public final Environment env; + + /** + * Provides processors script support. + */ + public final ScriptService scriptService; + + /** + * Provide analyzer support + */ + public final AnalysisRegistry analysisRegistry; + + /** + * Allows processors to read headers set by {@link org.opensearch.action.support.ActionFilter} + * instances that have run while handling the current search. + */ + public final ThreadContext threadContext; + + public final LongSupplier relativeTimeSupplier; + + public final SearchPipelineService searchPipelineService; + + public final Consumer genericExecutor; + + public final NamedXContentRegistry namedXContentRegistry; + + /** + * Provides scheduler support + */ + public final BiFunction scheduler; + + /** + * Provides access to the node's cluster client + */ + public final Client client; + + public Parameters( + Environment env, + ScriptService scriptService, + AnalysisRegistry analysisRegistry, + ThreadContext threadContext, + LongSupplier relativeTimeSupplier, + BiFunction scheduler, + SearchPipelineService searchPipelineService, + Client client, + Consumer genericExecutor, + NamedXContentRegistry namedXContentRegistry + ) { + this.env = env; + this.scriptService = scriptService; + this.threadContext = threadContext; + this.analysisRegistry = analysisRegistry; + this.relativeTimeSupplier = relativeTimeSupplier; + this.scheduler = scheduler; + this.searchPipelineService = searchPipelineService; + this.client = client; + this.genericExecutor = genericExecutor; + this.namedXContentRegistry = namedXContentRegistry; + } + + } } diff --git a/server/src/main/java/org/opensearch/repositories/FilterRepository.java b/server/src/main/java/org/opensearch/repositories/FilterRepository.java index b108e2da1ab04..764f36df6d337 100644 --- a/server/src/main/java/org/opensearch/repositories/FilterRepository.java +++ b/server/src/main/java/org/opensearch/repositories/FilterRepository.java @@ -47,6 +47,7 @@ import org.opensearch.index.snapshots.IndexShardSnapshotStatus; import org.opensearch.index.snapshots.blobstore.RemoteStoreShardShallowCopySnapshot; import org.opensearch.index.store.Store; +import org.opensearch.index.store.lockmanager.RemoteStoreLockManagerFactory; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.snapshots.SnapshotId; import org.opensearch.snapshots.SnapshotInfo; @@ -248,6 +249,18 @@ public void executeConsistentStateUpdate( in.executeConsistentStateUpdate(createUpdateTask, source, onFailure); } + @Override + public void cloneRemoteStoreIndexShardSnapshot( + SnapshotId source, + SnapshotId target, + RepositoryShardId shardId, + String shardGeneration, + RemoteStoreLockManagerFactory remoteStoreLockManagerFactory, + ActionListener listener + ) { + in.cloneRemoteStoreIndexShardSnapshot(source, target, shardId, shardGeneration, remoteStoreLockManagerFactory, listener); + } + @Override public void cloneShardSnapshot( SnapshotId source, diff --git a/server/src/main/java/org/opensearch/repositories/Repository.java b/server/src/main/java/org/opensearch/repositories/Repository.java index c08369b79452d..793b3d317e1bd 100644 --- a/server/src/main/java/org/opensearch/repositories/Repository.java +++ b/server/src/main/java/org/opensearch/repositories/Repository.java @@ -48,6 +48,7 @@ import org.opensearch.index.snapshots.IndexShardSnapshotStatus; import org.opensearch.index.snapshots.blobstore.RemoteStoreShardShallowCopySnapshot; import org.opensearch.index.store.Store; +import org.opensearch.index.store.lockmanager.RemoteStoreLockManagerFactory; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.snapshots.SnapshotId; import org.opensearch.snapshots.SnapshotInfo; @@ -375,6 +376,27 @@ void cloneShardSnapshot( ActionListener listener ); + /** + * Clones a remote store index shard snapshot. + * + * @param source source snapshot + * @param target target snapshot + * @param shardId shard id + * @param shardGeneration shard generation in repo + * @param remoteStoreLockManagerFactory remoteStoreLockManagerFactory for cloning metadata lock file + * @param listener listener to complete with new shard generation once clone has completed + */ + default void cloneRemoteStoreIndexShardSnapshot( + SnapshotId source, + SnapshotId target, + RepositoryShardId shardId, + @Nullable String shardGeneration, + RemoteStoreLockManagerFactory remoteStoreLockManagerFactory, + ActionListener listener + ) { + throw new UnsupportedOperationException(); + } + /** * Hook that allows a repository to filter the user supplied snapshot metadata in {@link SnapshotsInProgress.Entry#userMetadata()} * during snapshot initialization. diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index f04bf83c2f1d1..be5fbf2ab6a51 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -114,6 +114,9 @@ import org.opensearch.index.snapshots.blobstore.SnapshotFiles; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.index.store.lockmanager.FileLockInfo; +import org.opensearch.index.store.lockmanager.RemoteStoreLockManagerFactory; +import org.opensearch.index.store.lockmanager.RemoteStoreMetadataLockManager; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.repositories.IndexId; @@ -506,8 +509,8 @@ public void cloneShardSnapshot( executor.execute(ActionRunnable.supply(listener, () -> { final long startTime = threadPool.absoluteTimeInMillis(); final BlobContainer shardContainer = shardContainer(index, shardNum); - final BlobStoreIndexShardSnapshots existingSnapshots; final String newGen; + final BlobStoreIndexShardSnapshots existingSnapshots; final String existingShardGen; if (shardGeneration == null) { Tuple tuple = buildBlobStoreIndexShardSnapshots( @@ -560,6 +563,9 @@ public void cloneShardSnapshot( + "]. A snapshot by that name already exists for this shard." ); } + // We don't need to check if there exists a shallow snapshot with the same name as we have the check before starting the clone + // operation ensuring that the snapshot name is available by checking the repository data. Also, the new clone snapshot would + // have a different UUID and hence a new unique snap-N file will be created. final BlobStoreIndexShardSnapshot sourceMeta = loadShardSnapshot(shardContainer, source); logger.trace("[{}] [{}] writing shard snapshot file for clone", shardId, target); INDEX_SHARD_SNAPSHOT_FORMAT.write( @@ -578,6 +584,50 @@ public void cloneShardSnapshot( })); } + @Override + public void cloneRemoteStoreIndexShardSnapshot( + SnapshotId source, + SnapshotId target, + RepositoryShardId shardId, + @Nullable String shardGeneration, + RemoteStoreLockManagerFactory remoteStoreLockManagerFactory, + ActionListener listener + ) { + if (isReadOnly()) { + listener.onFailure(new RepositoryException(metadata.name(), "cannot clone shard snapshot on a readonly repository")); + return; + } + final IndexId index = shardId.index(); + final int shardNum = shardId.shardId(); + final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT); + executor.execute(ActionRunnable.supply(listener, () -> { + final long startTime = threadPool.relativeTimeInMillis(); + final BlobContainer shardContainer = shardContainer(index, shardNum); + // We don't need to check if there exists a shallow/full copy snapshot with the same name as we have the check before starting + // the clone operation ensuring that the snapshot name is available by checking the repository data. Also, the new clone shallow + // snapshot would have a different UUID and hence a new unique shallow-snap-N file will be created. + RemoteStoreShardShallowCopySnapshot remStoreBasedShardMetadata = loadShallowCopyShardSnapshot(shardContainer, source); + String indexUUID = remStoreBasedShardMetadata.getIndexUUID(); + String remoteStoreRepository = remStoreBasedShardMetadata.getRemoteStoreRepository(); + RemoteStoreMetadataLockManager remoteStoreMetadataLockManger = remoteStoreLockManagerFactory.newLockManager( + remoteStoreRepository, + indexUUID, + String.valueOf(shardId.shardId()) + ); + remoteStoreMetadataLockManger.cloneLock( + FileLockInfo.getLockInfoBuilder().withAcquirerId(source.getUUID()).build(), + FileLockInfo.getLockInfoBuilder().withAcquirerId(target.getUUID()).build() + ); + REMOTE_STORE_SHARD_SHALLOW_COPY_SNAPSHOT_FORMAT.write( + remStoreBasedShardMetadata.asClone(target.getName(), startTime, threadPool.absoluteTimeInMillis() - startTime), + shardContainer, + target.getUUID(), + compressor + ); + return shardGeneration; + })); + } + // Inspects all cluster state elements that contain a hint about what the current repository generation is and updates // #latestKnownRepoGen if a newer than currently known generation is found @Override diff --git a/server/src/main/java/org/opensearch/rest/NamedRoute.java b/server/src/main/java/org/opensearch/rest/NamedRoute.java index f5eaafcd04056..109f688a4924e 100644 --- a/server/src/main/java/org/opensearch/rest/NamedRoute.java +++ b/server/src/main/java/org/opensearch/rest/NamedRoute.java @@ -9,6 +9,13 @@ package org.opensearch.rest; import org.opensearch.OpenSearchException; +import org.opensearch.transport.TransportService; + +import java.util.HashSet; +import java.util.Set; +import java.util.function.Function; + +import static java.util.Objects.requireNonNull; /** * A named Route @@ -16,21 +23,123 @@ * @opensearch.internal */ public class NamedRoute extends RestHandler.Route { + private static final String VALID_ACTION_NAME_PATTERN = "^[a-zA-Z0-9:/*_]*$"; static final int MAX_LENGTH_OF_ACTION_NAME = 250; - private final String name; + private final String uniqueName; + private final Set actionNames; - public boolean isValidRouteName(String routeName) { - if (routeName == null || routeName.isBlank() || routeName.length() > MAX_LENGTH_OF_ACTION_NAME) { - return false; + private Function handler; + + /** + * Builder class for constructing instances of {@link NamedRoute}. + */ + public static class Builder { + private RestRequest.Method method; + private String path; + private String uniqueName; + private final Set legacyActionNames = new HashSet<>(); + private Function handler; + + /** + * Sets the REST method for the route. + * + * @param method the REST method for the route + * @return the builder instance + */ + public Builder method(RestRequest.Method method) { + requireNonNull(method, "REST method must not be null."); + this.method = method; + return this; + } + + /** + * Sets the URL path for the route. + * + * @param path the URL path for the route + * @return the builder instance + */ + public Builder path(String path) { + requireNonNull(path, "REST path must not be null."); + this.path = path; + return this; + } + + /** + * Sets the name for the route. + * + * @param name the name for the route + * @return the builder instance + */ + public Builder uniqueName(String name) { + requireNonNull(name, "REST route name must not be null."); + this.uniqueName = name; + return this; + } + + /** + * Sets the legacy action names for the route. + * + * @param legacyActionNames the legacy action names for the route + * @return the builder instance + */ + public Builder legacyActionNames(Set legacyActionNames) { + this.legacyActionNames.addAll(validateLegacyActionNames(legacyActionNames)); + return this; + } + + /** + * Sets the handler for this route + * + * @param handler the handler for this route + * @return the builder instance + */ + public Builder handler(Function handler) { + requireNonNull(handler, "Route handler must not be null."); + this.handler = handler; + return this; + } + + /** + * Builds a new instance of {@link NamedRoute} based on the provided parameters. + * + * @return a new instance of {@link NamedRoute} + * @throws OpenSearchException if the route name is invalid + */ + public NamedRoute build() { + checkIfFieldsAreSet(); + return new NamedRoute(this); + } + + /** + * Checks if all builder fields are set before creating a new NamedRoute object + */ + private void checkIfFieldsAreSet() { + if (method == null || path == null || uniqueName == null) { + throw new IllegalStateException("REST method, path and uniqueName are required."); + } + } + + private Set validateLegacyActionNames(Set legacyActionNames) { + if (legacyActionNames == null) { + return new HashSet<>(); + } + for (String actionName : legacyActionNames) { + if (!TransportService.isValidActionName(actionName)) { + throw new OpenSearchException( + "Invalid action name [" + actionName + "]. It must start with one of: " + TransportService.VALID_ACTION_PREFIXES + ); + } + } + return legacyActionNames; } - return routeName.matches(VALID_ACTION_NAME_PATTERN); + } - public NamedRoute(RestRequest.Method method, String path, String name) { - super(method, path); - if (!isValidRouteName(name)) { + private NamedRoute(Builder builder) { + super(builder.method, builder.path); + if (!isValidRouteName(builder.uniqueName)) { throw new OpenSearchException( "Invalid route name specified. The route name may include the following characters" + " 'a-z', 'A-Z', '0-9', ':', '/', '*', '_' and be less than " @@ -38,18 +147,43 @@ public NamedRoute(RestRequest.Method method, String path, String name) { + " characters" ); } - this.name = name; + this.uniqueName = builder.uniqueName; + this.actionNames = Set.copyOf(builder.legacyActionNames); + this.handler = builder.handler; + } + + public boolean isValidRouteName(String routeName) { + return routeName != null + && !routeName.isBlank() + && routeName.length() <= MAX_LENGTH_OF_ACTION_NAME + && routeName.matches(VALID_ACTION_NAME_PATTERN); } /** * The name of the Route. Must be unique across Route. */ public String name() { - return this.name; + return this.uniqueName; + } + + /** + * The legacy transport Action name to match against this route to support authorization in REST layer. + * MUST be unique across all Routes + */ + public Set actionNames() { + return this.actionNames; + } + + /** + * The handler associated with this route + * @return the handler associated with this route + */ + public Function handler() { + return handler; } @Override public String toString() { - return "NamedRoute [method=" + method + ", path=" + path + ", name=" + name + "]"; + return "NamedRoute [method=" + method + ", path=" + path + ", name=" + uniqueName + ", actionNames=" + actionNames + "]"; } } diff --git a/server/src/main/java/org/opensearch/search/SearchService.java b/server/src/main/java/org/opensearch/search/SearchService.java index 7d67c6c3b45f4..9daad9112e473 100644 --- a/server/src/main/java/org/opensearch/search/SearchService.java +++ b/server/src/main/java/org/opensearch/search/SearchService.java @@ -1550,7 +1550,9 @@ private CanMatchResponse canMatch(ShardSearchRequest request, boolean checkRefre } public static boolean canMatchSearchAfter(FieldDoc searchAfter, MinAndMax minMax, FieldSortBuilder primarySortField) { - if (searchAfter != null && minMax != null && primarySortField != null) { + // Check for sort.missing == null, since in case of missing values sort queries, if segment/shard's min/max + // is out of search_after range, it still should be printed and hence we should not skip segment/shard. + if (searchAfter != null && minMax != null && primarySortField != null && primarySortField.missing() == null) { final Object searchAfterPrimary = searchAfter.fields[0]; if (primarySortField.order() == SortOrder.DESC) { if (minMax.compareMin(searchAfterPrimary) > 0) { diff --git a/server/src/main/java/org/opensearch/search/pipeline/PipelineWithMetrics.java b/server/src/main/java/org/opensearch/search/pipeline/PipelineWithMetrics.java index 612e979e56070..060894a37e5ed 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/PipelineWithMetrics.java +++ b/server/src/main/java/org/opensearch/search/pipeline/PipelineWithMetrics.java @@ -77,19 +77,28 @@ static PipelineWithMetrics create( Map> phaseResultsProcessorFactories, NamedWriteableRegistry namedWriteableRegistry, OperationMetrics totalRequestProcessingMetrics, - OperationMetrics totalResponseProcessingMetrics + OperationMetrics totalResponseProcessingMetrics, + Processor.PipelineContext pipelineContext ) throws Exception { String description = ConfigurationUtils.readOptionalStringProperty(null, null, config, DESCRIPTION_KEY); Integer version = ConfigurationUtils.readIntProperty(null, null, config, VERSION_KEY, null); List> requestProcessorConfigs = ConfigurationUtils.readOptionalList(null, null, config, REQUEST_PROCESSORS_KEY); - List requestProcessors = readProcessors(requestProcessorFactories, requestProcessorConfigs); + List requestProcessors = readProcessors( + requestProcessorFactories, + requestProcessorConfigs, + pipelineContext + ); List> responseProcessorConfigs = ConfigurationUtils.readOptionalList( null, null, config, RESPONSE_PROCESSORS_KEY ); - List responseProcessors = readProcessors(responseProcessorFactories, responseProcessorConfigs); + List responseProcessors = readProcessors( + responseProcessorFactories, + responseProcessorConfigs, + pipelineContext + ); List> phaseResultsProcessorConfigs = ConfigurationUtils.readOptionalList( null, null, @@ -98,7 +107,8 @@ static PipelineWithMetrics create( ); List phaseResultsProcessors = readProcessors( phaseResultsProcessorFactories, - phaseResultsProcessorConfigs + phaseResultsProcessorConfigs, + pipelineContext ); if (config.isEmpty() == false) { throw new OpenSearchParseException( @@ -125,7 +135,8 @@ static PipelineWithMetrics create( private static List readProcessors( Map> processorFactories, - List> requestProcessorConfigs + List> requestProcessorConfigs, + Processor.PipelineContext pipelineContext ) throws Exception { List processors = new ArrayList<>(); if (requestProcessorConfigs == null) { @@ -140,7 +151,19 @@ private static List readProcessors( Map config = (Map) entry.getValue(); String tag = ConfigurationUtils.readOptionalStringProperty(null, null, config, TAG_KEY); String description = ConfigurationUtils.readOptionalStringProperty(null, tag, config, DESCRIPTION_KEY); - processors.add(processorFactories.get(type).create(processorFactories, tag, description, config)); + processors.add(processorFactories.get(type).create(processorFactories, tag, description, config, pipelineContext)); + if (config.isEmpty() == false) { + String processorName = type; + if (tag != null) { + processorName = processorName + ":" + tag; + } + throw new OpenSearchParseException( + "processor [" + + processorName + + "] doesn't support one or more provided configuration parameters: " + + Arrays.toString(config.keySet().toArray()) + ); + } } } return Collections.unmodifiableList(processors); diff --git a/server/src/main/java/org/opensearch/search/pipeline/Processor.java b/server/src/main/java/org/opensearch/search/pipeline/Processor.java index ee28db1cc334d..cc96132479c74 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/Processor.java +++ b/server/src/main/java/org/opensearch/search/pipeline/Processor.java @@ -8,19 +8,7 @@ package org.opensearch.search.pipeline; -import org.opensearch.client.Client; -import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.env.Environment; -import org.opensearch.index.analysis.AnalysisRegistry; -import org.opensearch.plugins.SearchPipelinePlugin; -import org.opensearch.script.ScriptService; -import org.opensearch.threadpool.Scheduler; - import java.util.Map; -import java.util.function.BiFunction; -import java.util.function.Consumer; -import java.util.function.LongSupplier; /** * A processor implementation may modify the request or response from a search call. @@ -33,6 +21,12 @@ * @opensearch.internal */ public interface Processor { + /** + * Processor configuration key to let the factory know the context for pipeline creation. + *

+ * See {@link PipelineSource}. + */ + String PIPELINE_SOURCE = "pipeline_source"; /** * Gets the type of processor @@ -61,81 +55,45 @@ interface Factory { * @param tag The tag for the processor * @param description A short description of what this processor does * @param config The configuration for the processor - * * Note: Implementations are responsible for removing the used configuration * keys, so that after creation the config map should be empty. + * @param pipelineContext Contextual information about the enclosing pipeline. */ - T create(Map> processorFactories, String tag, String description, Map config) throws Exception; + T create( + Map> processorFactories, + String tag, + String description, + Map config, + PipelineContext pipelineContext + ) throws Exception; } /** - * Infrastructure class that holds services that can be used by processor factories to create processor instances - * and that gets passed around to all {@link SearchPipelinePlugin}s. + * Contextual information about the enclosing pipeline. A processor factory may change processor initialization behavior or + * pass this information to the created processor instance. */ - class Parameters { + class PipelineContext { + private final PipelineSource pipelineSource; - /** - * Useful to provide access to the node's environment like config directory to processor factories. - */ - public final Environment env; - - /** - * Provides processors script support. - */ - public final ScriptService scriptService; - - /** - * Provide analyzer support - */ - public final AnalysisRegistry analysisRegistry; - - /** - * Allows processors to read headers set by {@link org.opensearch.action.support.ActionFilter} - * instances that have run while handling the current search. - */ - public final ThreadContext threadContext; - - public final LongSupplier relativeTimeSupplier; - - public final SearchPipelineService searchPipelineService; - - public final Consumer genericExecutor; - - public final NamedXContentRegistry namedXContentRegistry; - - /** - * Provides scheduler support - */ - public final BiFunction scheduler; - - /** - * Provides access to the node's cluster client - */ - public final Client client; + public PipelineContext(PipelineSource pipelineSource) { + this.pipelineSource = pipelineSource; + } - public Parameters( - Environment env, - ScriptService scriptService, - AnalysisRegistry analysisRegistry, - ThreadContext threadContext, - LongSupplier relativeTimeSupplier, - BiFunction scheduler, - SearchPipelineService searchPipelineService, - Client client, - Consumer genericExecutor, - NamedXContentRegistry namedXContentRegistry - ) { - this.env = env; - this.scriptService = scriptService; - this.threadContext = threadContext; - this.analysisRegistry = analysisRegistry; - this.relativeTimeSupplier = relativeTimeSupplier; - this.scheduler = scheduler; - this.searchPipelineService = searchPipelineService; - this.client = client; - this.genericExecutor = genericExecutor; - this.namedXContentRegistry = namedXContentRegistry; + public PipelineSource getPipelineSource() { + return pipelineSource; } + } + /** + * A processor factory may change the processor initialization behavior based on the creation context (e.g. avoiding + * creating expensive resources during validation or in a request-scoped pipeline.) + */ + enum PipelineSource { + // A named pipeline is being created or updated + UPDATE_PIPELINE, + // Pipeline is defined within a search request + SEARCH_REQUEST, + // A named pipeline is being validated before being written to cluster state + VALIDATE_PIPELINE } } diff --git a/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java b/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java index 70dc8546a077f..83a7a0564467e 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java +++ b/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java @@ -103,7 +103,7 @@ public SearchPipelineService( this.scriptService = scriptService; this.threadPool = threadPool; this.namedWriteableRegistry = namedWriteableRegistry; - Processor.Parameters parameters = new Processor.Parameters( + SearchPipelinePlugin.Parameters parameters = new SearchPipelinePlugin.Parameters( env, scriptService, analysisRegistry, @@ -189,7 +189,8 @@ void innerUpdatePipelines(SearchPipelineMetadata newSearchPipelineMetadata) { phaseInjectorProcessorFactories, namedWriteableRegistry, totalRequestProcessingMetrics, - totalResponseProcessingMetrics + totalResponseProcessingMetrics, + new Processor.PipelineContext(Processor.PipelineSource.UPDATE_PIPELINE) ); newPipelines.put(newConfiguration.getId(), new PipelineHolder(newConfiguration, newPipeline)); @@ -289,7 +290,8 @@ void validatePipeline(Map searchPipelineInfos phaseInjectorProcessorFactories, namedWriteableRegistry, new OperationMetrics(), // Use ephemeral metrics for validation - new OperationMetrics() + new OperationMetrics(), + new Processor.PipelineContext(Processor.PipelineSource.VALIDATE_PIPELINE) ); List exceptions = new ArrayList<>(); for (SearchRequestProcessor processor : pipeline.getSearchRequestProcessors()) { @@ -388,7 +390,8 @@ public PipelinedRequest resolvePipeline(SearchRequest searchRequest) { phaseInjectorProcessorFactories, namedWriteableRegistry, totalRequestProcessingMetrics, - totalResponseProcessingMetrics + totalResponseProcessingMetrics, + new Processor.PipelineContext(Processor.PipelineSource.SEARCH_REQUEST) ); } catch (Exception e) { throw new SearchPipelineProcessingException(e); diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java b/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java index 1619f36738f7b..6bdbcfee29a9a 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java @@ -532,6 +532,7 @@ public Boolean includeGlobalState() { return includeGlobalState; } + @Nullable public Boolean isRemoteStoreIndexShallowCopyEnabled() { return remoteStoreIndexShallowCopy; } diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java index b523c1ba12b05..0a455c376f62d 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java @@ -88,8 +88,10 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.index.Index; import org.opensearch.index.shard.ShardId; +import org.opensearch.index.store.lockmanager.RemoteStoreLockManagerFactory; import org.opensearch.repositories.IndexId; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; @@ -151,6 +153,8 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus private final RepositoriesService repositoriesService; + private final RemoteStoreLockManagerFactory remoteStoreLockManagerFactory; + private final ThreadPool threadPool; private final Map>>> snapshotCompletionListeners = @@ -206,6 +210,7 @@ public SnapshotsService( this.clusterService = clusterService; this.indexNameExpressionResolver = indexNameExpressionResolver; this.repositoriesService = repositoriesService; + this.remoteStoreLockManagerFactory = new RemoteStoreLockManagerFactory(() -> repositoriesService); this.threadPool = transportService.getThreadPool(); this.transportService = transportService; @@ -621,7 +626,11 @@ public ClusterState execute(ClusterState currentState) { } } } - updatedEntry = cloneEntry.withClones(clonesBuilder); + updatedEntry = cloneEntry.withClones(clonesBuilder) + .withRemoteStoreIndexShallowCopy( + Boolean.TRUE.equals(snapshotInfoListener.result().isRemoteStoreIndexShallowCopyEnabled()) + ); + ; updatedEntries.set(i, updatedEntry); changed = true; break; @@ -649,7 +658,8 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS continue; } final RepositoryShardId repoShardId = indexClone.getKey(); - runReadyClone(target, sourceSnapshot, shardStatusBefore, repoShardId, repository); + final boolean remoteStoreIndexShallowCopy = Boolean.TRUE.equals(updatedEntry.remoteStoreIndexShallowCopy()); + runReadyClone(target, sourceSnapshot, shardStatusBefore, repoShardId, repository, remoteStoreIndexShallowCopy); } } else { // Extremely unlikely corner case of cluster-manager failing over between between starting the clone and @@ -667,60 +677,112 @@ private void runReadyClone( SnapshotId sourceSnapshot, ShardSnapshotStatus shardStatusBefore, RepositoryShardId repoShardId, - Repository repository + Repository repository, + boolean remoteStoreIndexShallowCopy ) { - final SnapshotId targetSnapshot = target.getSnapshotId(); - final String localNodeId = clusterService.localNode().getId(); - if (currentlyCloning.add(repoShardId)) { - repository.cloneShardSnapshot( - sourceSnapshot, - targetSnapshot, - repoShardId, - shardStatusBefore.generation(), - ActionListener.wrap( - generation -> innerUpdateSnapshotState( - new ShardSnapshotUpdate(target, repoShardId, new ShardSnapshotStatus(localNodeId, ShardState.SUCCESS, generation)), - ActionListener.runBefore( - ActionListener.wrap( - v -> logger.trace( - "Marked [{}] as successfully cloned from [{}] to [{}]", + final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT); + executor.execute(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + logger.warn( + "Failed to get repository data while cloning shard [{}] from [{}] to [{}]", + repoShardId, + sourceSnapshot, + target.getSnapshotId() + ); + failCloneShardAndUpdateClusterState(target, sourceSnapshot, repoShardId); + } + + @Override + protected void doRun() { + final String localNodeId = clusterService.localNode().getId(); + repository.getRepositoryData(ActionListener.wrap(repositoryData -> { + try { + final IndexMetadata indexMetadata = repository.getSnapshotIndexMetaData( + repositoryData, + sourceSnapshot, + repoShardId.index() + ); + final boolean cloneRemoteStoreIndexShardSnapshot = remoteStoreIndexShallowCopy + && indexMetadata.getSettings().getAsBoolean(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, false); + final SnapshotId targetSnapshot = target.getSnapshotId(); + final ActionListener listener = ActionListener.wrap( + generation -> innerUpdateSnapshotState( + new ShardSnapshotUpdate( + target, repoShardId, - sourceSnapshot, - targetSnapshot + new ShardSnapshotStatus(localNodeId, ShardState.SUCCESS, generation) ), - e -> { - logger.warn("Cluster state update after successful shard clone [{}] failed", repoShardId); - failAllListenersOnMasterFailOver(e); - } + ActionListener.runBefore( + ActionListener.wrap( + v -> logger.trace( + "Marked [{}] as successfully cloned from [{}] to [{}]", + repoShardId, + sourceSnapshot, + targetSnapshot + ), + e -> { + logger.warn("Cluster state update after successful shard clone [{}] failed", repoShardId); + failAllListenersOnMasterFailOver(e); + } + ), + () -> currentlyCloning.remove(repoShardId) + ) ), - () -> currentlyCloning.remove(repoShardId) - ) - ), - e -> innerUpdateSnapshotState( - new ShardSnapshotUpdate( - target, - repoShardId, - new ShardSnapshotStatus(localNodeId, ShardState.FAILED, "failed to clone shard snapshot", null) - ), - ActionListener.runBefore( - ActionListener.wrap( - v -> logger.trace( - "Marked [{}] as failed clone from [{}] to [{}]", + e -> { + logger.warn("Exception [{}] while trying to clone shard [{}]", e, repoShardId); + failCloneShardAndUpdateClusterState(target, sourceSnapshot, repoShardId); + } + ); + if (currentlyCloning.add(repoShardId)) { + if (cloneRemoteStoreIndexShardSnapshot) { + repository.cloneRemoteStoreIndexShardSnapshot( + sourceSnapshot, + targetSnapshot, repoShardId, + shardStatusBefore.generation(), + remoteStoreLockManagerFactory, + listener + ); + } else { + repository.cloneShardSnapshot( sourceSnapshot, - targetSnapshot - ), - ex -> { - logger.warn("Cluster state update after failed shard clone [{}] failed", repoShardId); - failAllListenersOnMasterFailOver(ex); - } - ), - () -> currentlyCloning.remove(repoShardId) - ) - ) - ) - ); - } + targetSnapshot, + repoShardId, + shardStatusBefore.generation(), + listener + ); + } + } + } catch (IOException e) { + logger.warn("Failed to get index-metadata from repository data for index [{}]", repoShardId.index().getName()); + failCloneShardAndUpdateClusterState(target, sourceSnapshot, repoShardId); + } + }, this::onFailure)); + } + }); + } + + private void failCloneShardAndUpdateClusterState(Snapshot target, SnapshotId sourceSnapshot, RepositoryShardId repoShardId) { + // Stale blobs/lock-files will be cleaned up during delete/cleanup operation. + final String localNodeId = clusterService.localNode().getId(); + innerUpdateSnapshotState( + new ShardSnapshotUpdate( + target, + repoShardId, + new ShardSnapshotStatus(localNodeId, ShardState.FAILED, "failed to clone shard snapshot", null) + ), + ActionListener.runBefore( + ActionListener.wrap( + v -> logger.trace("Marked [{}] as failed clone from [{}] to [{}]", repoShardId, sourceSnapshot, target.getSnapshotId()), + ex -> { + logger.warn("Cluster state update after failed shard clone [{}] failed", repoShardId); + failAllListenersOnMasterFailOver(ex); + } + ), + () -> currentlyCloning.remove(repoShardId) + ) + ); } private void ensureBelowConcurrencyLimit( @@ -3077,12 +3139,14 @@ private void startExecutableClones(SnapshotsInProgress snapshotsInProgress, @Nul // this is a clone, see if new work is ready for (final Map.Entry clone : entry.clones().entrySet()) { if (clone.getValue().state() == ShardState.INIT) { + final boolean remoteStoreIndexShallowCopy = Boolean.TRUE.equals(entry.remoteStoreIndexShallowCopy()); runReadyClone( entry.snapshot(), entry.source(), clone.getValue(), clone.getKey(), - repositoriesService.repository(entry.repository()) + repositoriesService.repository(entry.repository()), + remoteStoreIndexShallowCopy ); } } diff --git a/server/src/test/java/org/opensearch/action/DynamicActionRegistryTests.java b/server/src/test/java/org/opensearch/action/DynamicActionRegistryTests.java index 963d47df3baff..17e424ee81e7e 100644 --- a/server/src/test/java/org/opensearch/action/DynamicActionRegistryTests.java +++ b/server/src/test/java/org/opensearch/action/DynamicActionRegistryTests.java @@ -26,6 +26,7 @@ import java.io.IOException; import java.util.Collections; import java.util.Map; +import java.util.Set; import static org.mockito.Mockito.mock; @@ -80,8 +81,8 @@ public void testDynamicActionRegistry() { public void testDynamicActionRegistryWithNamedRoutes() { RestSendToExtensionAction action = mock(RestSendToExtensionAction.class); RestSendToExtensionAction action2 = mock(RestSendToExtensionAction.class); - NamedRoute r1 = new NamedRoute(RestRequest.Method.GET, "/foo", "foo"); - NamedRoute r2 = new NamedRoute(RestRequest.Method.GET, "/bar", "bar"); + NamedRoute r1 = new NamedRoute.Builder().method(RestRequest.Method.GET).path("/foo").uniqueName("foo").build(); + NamedRoute r2 = new NamedRoute.Builder().method(RestRequest.Method.PUT).path("/bar").uniqueName("bar").build(); DynamicActionRegistry registry = new DynamicActionRegistry(); registry.registerDynamicRoute(r1, action); @@ -89,22 +90,38 @@ public void testDynamicActionRegistryWithNamedRoutes() { assertTrue(registry.isActionRegistered("foo")); assertTrue(registry.isActionRegistered("bar")); + + registry.unregisterDynamicRoute(r2); + + assertTrue(registry.isActionRegistered("foo")); + assertFalse(registry.isActionRegistered("bar")); } - public void testDynamicActionRegistryRegisterAndUnregisterWithNamedRoutes() { + public void testDynamicActionRegistryWithNamedRoutesAndLegacyActionNames() { RestSendToExtensionAction action = mock(RestSendToExtensionAction.class); RestSendToExtensionAction action2 = mock(RestSendToExtensionAction.class); - NamedRoute r1 = new NamedRoute(RestRequest.Method.GET, "/foo", "foo"); - NamedRoute r2 = new NamedRoute(RestRequest.Method.GET, "/bar", "bar"); + NamedRoute r1 = new NamedRoute.Builder().method(RestRequest.Method.GET) + .path("/foo") + .uniqueName("foo") + .legacyActionNames(Set.of("cluster:admin/opensearch/abc/foo")) + .build(); + NamedRoute r2 = new NamedRoute.Builder().method(RestRequest.Method.PUT) + .path("/bar") + .uniqueName("bar") + .legacyActionNames(Set.of("cluster:admin/opensearch/xyz/bar")) + .build(); DynamicActionRegistry registry = new DynamicActionRegistry(); registry.registerDynamicRoute(r1, action); registry.registerDynamicRoute(r2, action2); + assertTrue(registry.isActionRegistered("cluster:admin/opensearch/abc/foo")); + assertTrue(registry.isActionRegistered("cluster:admin/opensearch/xyz/bar")); + registry.unregisterDynamicRoute(r2); - assertTrue(registry.isActionRegistered("foo")); - assertFalse(registry.isActionRegistered("bar")); + assertTrue(registry.isActionRegistered("cluster:admin/opensearch/abc/foo")); + assertFalse(registry.isActionRegistered("cluster:admin/opensearch/xyz/bar")); } private static final class TestAction extends ActionType { diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index 94a1ca9992cb6..c70fedbaf24bd 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -817,8 +817,7 @@ public void testAggregateSettingsAppliesSettingsFromTemplatesAndRequest() { Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet(), - false + Collections.emptySet() ); assertThat(aggregatedIndexSettings.get("template_setting"), equalTo("value1")); @@ -880,8 +879,7 @@ public void testRequestDataHavePriorityOverTemplateData() throws Exception { Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet(), - false + Collections.emptySet() ); assertThat(resolvedAliases.get(0).getSearchRouting(), equalTo("fromRequest")); @@ -903,8 +901,7 @@ public void testDefaultSettings() { Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet(), - false + Collections.emptySet() ); assertThat(aggregatedIndexSettings.get(SETTING_NUMBER_OF_SHARDS), equalTo("1")); @@ -919,8 +916,7 @@ public void testSettingsFromClusterState() { Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 15).build(), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet(), - false + Collections.emptySet() ); assertThat(aggregatedIndexSettings.get(SETTING_NUMBER_OF_SHARDS), equalTo("15")); @@ -957,8 +953,7 @@ public void testTemplateOrder() throws Exception { Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet(), - false + Collections.emptySet() ); List resolvedAliases = resolveAndValidateAliases( request.index(), @@ -997,8 +992,7 @@ public void testAggregateIndexSettingsIgnoresTemplatesOnCreateFromSourceIndex() Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet(), - false + Collections.emptySet() ); assertThat(aggregatedIndexSettings.get("templateSetting"), is(nullValue())); @@ -1220,8 +1214,7 @@ public void testRemoteStoreNoUserOverrideConflictingReplicationTypeIndexSettings settings, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet(), - false + Collections.emptySet() ) ); assertThat( @@ -1252,8 +1245,7 @@ public void testRemoteStoreNoUserOverrideExceptReplicationTypeSegmentIndexSettin settings, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet(), - false + Collections.emptySet() ); verifyRemoteStoreIndexSettings( indexSettings, @@ -1285,8 +1277,7 @@ public void testRemoteStoreNoUserOverrideIndexSettings() { settings, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet(), - false + Collections.emptySet() ); verifyRemoteStoreIndexSettings( indexSettings, @@ -1320,8 +1311,7 @@ public void testRemoteStoreDisabledByUserIndexSettings() { settings, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet(), - false + Collections.emptySet() ); verifyRemoteStoreIndexSettings( indexSettings, @@ -1355,8 +1345,7 @@ public void testRemoteStoreTranslogDisabledByUserIndexSettings() { settings, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet(), - false + Collections.emptySet() ); verifyRemoteStoreIndexSettings( indexSettings, @@ -1393,8 +1382,7 @@ public void testRemoteStoreOverrideSegmentRepoIndexSettings() { settings, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet(), - false + Collections.emptySet() ); verifyRemoteStoreIndexSettings( indexSettings, @@ -1428,8 +1416,7 @@ public void testRemoteStoreOverrideTranslogRepoIndexSettings() { settings, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet(), - false + Collections.emptySet() ); verifyRemoteStoreIndexSettings( indexSettings, @@ -1463,8 +1450,7 @@ public void testRemoteStoreOverrideReplicationTypeIndexSettings() { settings, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet(), - false + Collections.emptySet() ); verifyRemoteStoreIndexSettings( indexSettings, @@ -1546,8 +1532,7 @@ public void testSoftDeletesDisabledIsRejected() { Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet(), - false + Collections.emptySet() ); }); assertThat( @@ -1576,8 +1561,7 @@ public void testValidateTranslogRetentionSettings() { Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet(), - false + Collections.emptySet() ); assertWarnings( "Translog retention settings [index.translog.retention.age] " @@ -1624,8 +1608,7 @@ public void testDeprecatedSimpleFSStoreSettings() { Settings.EMPTY, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet(), - false + Collections.emptySet() ); assertWarnings( "[simplefs] is deprecated and will be removed in 2.0. Use [niofs], which offers equal " @@ -1644,8 +1627,7 @@ public void testClusterReplicationSetting() { settings, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet(), - false + Collections.emptySet() ); assertEquals(ReplicationType.SEGMENT.toString(), indexSettings.get(SETTING_REPLICATION_TYPE)); } @@ -1665,86 +1647,12 @@ public void testIndexSettingOverridesClusterReplicationSetting() { settings, IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, randomShardLimitService(), - Collections.emptySet(), - false + Collections.emptySet() ); // Verify if index setting overrides cluster replication setting assertEquals(ReplicationType.DOCUMENT.toString(), indexSettings.get(SETTING_REPLICATION_TYPE)); } - public void testHiddenIndexUsesDocumentReplication() { - Settings settings = Settings.builder().put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT).build(); - request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); - final Settings.Builder requestSettings = Settings.builder(); - // Set index setting replication type as DOCUMENT - requestSettings.put("index.hidden", true); - request.settings(requestSettings.build()); - Settings indexSettings = aggregateIndexSettings( - ClusterState.EMPTY_STATE, - request, - Settings.EMPTY, - null, - settings, - IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, - randomShardLimitService(), - Collections.emptySet(), - false - ); - // Verify replication type is Document Replication - assertEquals(ReplicationType.DOCUMENT.toString(), indexSettings.get(SETTING_REPLICATION_TYPE)); - } - - public void testSystemIndexUsesDocumentReplication() { - Settings settings = Settings.builder().put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT).build(); - request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); - final Settings.Builder requestSettings = Settings.builder(); - request.settings(requestSettings.build()); - // set isSystemIndex parameter as true - Settings indexSettings = aggregateIndexSettings( - ClusterState.EMPTY_STATE, - request, - Settings.EMPTY, - null, - settings, - IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, - randomShardLimitService(), - Collections.emptySet(), - true - ); - // Verify replication type is Document Replication - assertEquals(ReplicationType.DOCUMENT.toString(), indexSettings.get(SETTING_REPLICATION_TYPE)); - } - - public void testRemoteStoreDisabledForSystemIndices() { - Settings settings = Settings.builder() - .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT) - .put(CLUSTER_REMOTE_STORE_ENABLED_SETTING.getKey(), true) - .put(CLUSTER_REMOTE_STORE_REPOSITORY_SETTING.getKey(), "my-segment-repo-1") - .put(CLUSTER_REMOTE_TRANSLOG_STORE_ENABLED_SETTING.getKey(), true) - .put(CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING.getKey(), "my-translog-repo-1") - .build(); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - - request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); - final Settings.Builder requestSettings = Settings.builder(); - request.settings(requestSettings.build()); - // set isSystemIndex parameter as true - Settings indexSettings = aggregateIndexSettings( - ClusterState.EMPTY_STATE, - request, - Settings.EMPTY, - null, - settings, - IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, - randomShardLimitService(), - Collections.emptySet(), - true - ); - // Verify that remote store is disabled. - assertEquals(indexSettings.get(SETTING_REMOTE_STORE_ENABLED), "false"); - assertEquals(ReplicationType.DOCUMENT.toString(), indexSettings.get(SETTING_REPLICATION_TYPE)); - } - private IndexTemplateMetadata addMatchingTemplate(Consumer configurator) { IndexTemplateMetadata.Builder builder = templateMetadataBuilder("template1", "te*"); configurator.accept(builder); diff --git a/server/src/test/java/org/opensearch/common/blobstore/fs/FsBlobContainerTests.java b/server/src/test/java/org/opensearch/common/blobstore/fs/FsBlobContainerTests.java index 6c36368bfe446..4a2eeabeb7e58 100644 --- a/server/src/test/java/org/opensearch/common/blobstore/fs/FsBlobContainerTests.java +++ b/server/src/test/java/org/opensearch/common/blobstore/fs/FsBlobContainerTests.java @@ -34,6 +34,9 @@ import org.apache.lucene.tests.mockfile.FilterFileSystemProvider; import org.apache.lucene.tests.mockfile.FilterSeekableByteChannel; import org.apache.lucene.tests.util.LuceneTestCase; +import org.opensearch.action.ActionListener; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.io.PathUtils; import org.opensearch.common.io.PathUtilsForTesting; @@ -54,10 +57,14 @@ import java.nio.file.Path; import java.nio.file.attribute.FileAttribute; import java.nio.file.spi.FileSystemProvider; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; import java.util.Locale; import java.util.Set; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Consumer; +import java.util.stream.Collectors; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -118,6 +125,79 @@ public void testIsTempBlobName() { assertThat(FsBlobContainer.isTempBlobName(tempBlobName), is(true)); } + private void testListBlobsByPrefixInSortedOrder(int limit, BlobContainer.BlobNameSortOrder blobNameSortOrder) throws IOException { + + final Path path = PathUtils.get(createTempDir().toString()); + + List blobsInFileSystem = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + final String blobName = randomAlphaOfLengthBetween(10, 20).toLowerCase(Locale.ROOT); + final byte[] blobData = randomByteArrayOfLength(randomIntBetween(1, frequently() ? 512 : 1 << 20)); // rarely up to 1mb + Files.write(path.resolve(blobName), blobData); + blobsInFileSystem.add(blobName); + } + + final FsBlobContainer container = new FsBlobContainer( + new FsBlobStore(randomIntBetween(1, 8) * 1024, path, false), + BlobPath.cleanPath(), + path + ); + + if (limit >= 0) { + container.listBlobsByPrefixInSortedOrder(null, limit, blobNameSortOrder, new ActionListener<>() { + @Override + public void onResponse(List blobMetadata) { + int actualLimit = Math.min(limit, 10); + assertEquals(actualLimit, blobMetadata.size()); + + if (blobNameSortOrder == BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC) { + blobsInFileSystem.sort(String::compareTo); + } else { + blobsInFileSystem.sort(Collections.reverseOrder(String::compareTo)); + } + List keys = blobsInFileSystem.subList(0, actualLimit); + assertEquals(keys, blobMetadata.stream().map(BlobMetadata::name).collect(Collectors.toList())); + } + + @Override + public void onFailure(Exception e) { + fail("blobContainer.listBlobsByPrefixInLexicographicOrder failed with exception: " + e.getMessage()); + } + }); + } else { + assertThrows( + IllegalArgumentException.class, + () -> container.listBlobsByPrefixInSortedOrder(null, limit, blobNameSortOrder, new ActionListener<>() { + @Override + public void onResponse(List blobMetadata) {} + + @Override + public void onFailure(Exception e) {} + }) + ); + } + } + + public void testListBlobsByPrefixInLexicographicOrderWithNegativeLimit() throws IOException { + testListBlobsByPrefixInSortedOrder(-5, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC); + } + + public void testListBlobsByPrefixInLexicographicOrderWithZeroLimit() throws IOException { + testListBlobsByPrefixInSortedOrder(0, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC); + } + + public void testListBlobsByPrefixInLexicographicOrderWithLimitLessThanNumberOfRecords() throws IOException { + testListBlobsByPrefixInSortedOrder(8, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC); + } + + public void testListBlobsByPrefixInLexicographicOrderWithLimitNumberOfRecords() throws IOException { + testListBlobsByPrefixInSortedOrder(10, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC); + } + + public void testListBlobsByPrefixInLexicographicOrderWithLimitGreaterThanNumberOfRecords() throws IOException { + testListBlobsByPrefixInSortedOrder(12, BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC); + } + static class MockFileSystemProvider extends FilterFileSystemProvider { final Consumer onRead; diff --git a/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java b/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java index 64286e47b4966..dfa239757513e 100644 --- a/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java +++ b/server/src/test/java/org/opensearch/common/util/concurrent/ThreadContextTests.java @@ -71,6 +71,35 @@ public void testStashContext() { assertEquals("1", threadContext.getHeader("default")); } + public void testStashContextWithPersistentHeaders() { + Settings build = Settings.builder().put("request.headers.default", "1").build(); + ThreadContext threadContext = new ThreadContext(build); + threadContext.putHeader("foo", "bar"); + threadContext.putTransient("ctx.foo", 1); + threadContext.putPersistent("persistent_foo", "baz"); + threadContext.putPersistent("ctx.persistent_foo", 10); + assertEquals("bar", threadContext.getHeader("foo")); + assertEquals(Integer.valueOf(1), threadContext.getTransient("ctx.foo")); + assertEquals("1", threadContext.getHeader("default")); + try (ThreadContext.StoredContext ctx = threadContext.stashContext()) { + assertNull(threadContext.getHeader("foo")); + assertNull(threadContext.getTransient("ctx.foo")); + assertEquals("1", threadContext.getHeader("default")); + + assertEquals("baz", threadContext.getPersistent("persistent_foo")); + assertEquals(Integer.valueOf(10), threadContext.getPersistent("ctx.persistent_foo")); + assertNull(threadContext.getPersistent("default")); + } + + assertEquals("bar", threadContext.getHeader("foo")); + assertEquals(Integer.valueOf(1), threadContext.getTransient("ctx.foo")); + assertEquals("1", threadContext.getHeader("default")); + + assertEquals("baz", threadContext.getPersistent("persistent_foo")); + assertEquals(Integer.valueOf(10), threadContext.getPersistent("ctx.persistent_foo")); + assertNull(threadContext.getPersistent("default")); + } + public void testNewContextWithClearedTransients() { ThreadContext threadContext = new ThreadContext(Settings.EMPTY); threadContext.putTransient("foo", "bar"); diff --git a/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java b/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java index f8ec138d8eff2..3f61d01166fb9 100644 --- a/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java +++ b/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java @@ -45,8 +45,6 @@ import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.ClusterSettingsResponse; import org.opensearch.common.util.FeatureFlags; -import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.core.xcontent.XContentParser; import org.opensearch.env.EnvironmentSettingsResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; @@ -398,16 +396,6 @@ public void testExtensionDependency() throws Exception { } } - public void testParseExtensionDependency() throws Exception { - XContentParser parser = createParser(JsonXContent.jsonXContent, "{\"uniqueId\": \"test1\", \"version\": \"2.0.0\"}"); - - assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); - ExtensionDependency dependency = ExtensionDependency.parse(parser); - - assertEquals("test1", dependency.getUniqueId()); - assertEquals(Version.fromString("2.0.0"), dependency.getVersion()); - } - public void testInitialize() throws Exception { ExtensionsManager extensionsManager = new ExtensionsManager(Set.of()); @@ -455,8 +443,8 @@ public void testHandleRegisterRestActionsRequest() throws Exception { initialize(extensionsManager); String uniqueIdStr = "uniqueid1"; - List actionsList = List.of("GET /foo", "PUT /bar", "POST /baz"); - List deprecatedActionsList = List.of("GET /deprecated/foo", "It's deprecated!"); + List actionsList = List.of("GET /foo foo", "PUT /bar bar", "POST /baz baz"); + List deprecatedActionsList = List.of("GET /deprecated/foo foo_deprecated", "It's deprecated!"); RegisterRestActionsRequest registerActionsRequest = new RegisterRestActionsRequest(uniqueIdStr, actionsList, deprecatedActionsList); TransportResponse response = extensionsManager.getRestActionsRequestHandler() .handleRegisterRestActionsRequest(registerActionsRequest, actionModule.getDynamicActionRegistry()); diff --git a/server/src/test/java/org/opensearch/extensions/rest/RestInitializeExtensionActionTests.java b/server/src/test/java/org/opensearch/extensions/rest/RestInitializeExtensionActionTests.java index 8d027b7fca9c2..7dd616c678e74 100644 --- a/server/src/test/java/org/opensearch/extensions/rest/RestInitializeExtensionActionTests.java +++ b/server/src/test/java/org/opensearch/extensions/rest/RestInitializeExtensionActionTests.java @@ -9,24 +9,33 @@ package org.opensearch.extensions.rest; import java.util.Collections; +import java.util.List; +import java.util.Optional; import java.util.Set; import java.util.concurrent.TimeUnit; +import java.util.function.Function; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; import org.junit.After; import org.junit.Before; +import org.mockito.Mockito; import org.opensearch.Version; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.common.xcontent.XContentType; import org.opensearch.extensions.ExtensionsManager; +import org.opensearch.extensions.ExtensionsSettings; import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.rest.RestRequest; import org.opensearch.rest.RestStatus; @@ -88,8 +97,12 @@ public void testRestInitializeExtensionActionResponse() throws Exception { ExtensionsManager extensionsManager = mock(ExtensionsManager.class); RestInitializeExtensionAction restInitializeExtensionAction = new RestInitializeExtensionAction(extensionsManager); final String content = "{\"name\":\"ad-extension\",\"uniqueId\":\"ad-extension\",\"hostAddress\":\"127.0.0.1\"," - + "\"port\":\"4532\",\"version\":\"1.0\",\"opensearchVersion\":\"3.0.0\"," - + "\"minimumCompatibleVersion\":\"3.0.0\"}"; + + "\"port\":\"4532\",\"version\":\"1.0\",\"opensearchVersion\":\"" + + Version.CURRENT.toString() + + "\"," + + "\"minimumCompatibleVersion\":\"" + + Version.CURRENT.minimumCompatibilityVersion().toString() + + "\"}"; RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent(new BytesArray(content), XContentType.JSON) .withMethod(RestRequest.Method.POST) .build(); @@ -106,8 +119,12 @@ public void testRestInitializeExtensionActionFailure() throws Exception { RestInitializeExtensionAction restInitializeExtensionAction = new RestInitializeExtensionAction(extensionsManager); final String content = "{\"name\":\"ad-extension\",\"uniqueId\":\"\",\"hostAddress\":\"127.0.0.1\"," - + "\"port\":\"4532\",\"version\":\"1.0\",\"opensearchVersion\":\"3.0.0\"," - + "\"minimumCompatibleVersion\":\"3.0.0\"}"; + + "\"port\":\"4532\",\"version\":\"1.0\",\"opensearchVersion\":\"" + + Version.CURRENT.toString() + + "\"," + + "\"minimumCompatibleVersion\":\"" + + Version.CURRENT.minimumCompatibilityVersion().toString() + + "\"}"; RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent(new BytesArray(content), XContentType.JSON) .withMethod(RestRequest.Method.POST) .build(); @@ -121,4 +138,98 @@ public void testRestInitializeExtensionActionFailure() throws Exception { ); } + public void testRestInitializeExtensionActionResponseWithAdditionalSettings() throws Exception { + Setting boolSetting = Setting.boolSetting("boolSetting", false, Setting.Property.ExtensionScope); + Setting stringSetting = Setting.simpleString("stringSetting", "default", Setting.Property.ExtensionScope); + Setting intSetting = Setting.intSetting("intSetting", 0, Setting.Property.ExtensionScope); + Setting listSetting = Setting.listSetting( + "listSetting", + List.of("first", "second", "third"), + Function.identity(), + Setting.Property.ExtensionScope + ); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(boolSetting, stringSetting, intSetting, listSetting)); + ExtensionsManager spy = spy(extensionsManager); + + // optionally, you can stub out some methods: + when(spy.getAdditionalSettings()).thenCallRealMethod(); + Mockito.doCallRealMethod().when(spy).loadExtension(any(ExtensionsSettings.Extension.class)); + Mockito.doNothing().when(spy).initialize(); + RestInitializeExtensionAction restInitializeExtensionAction = new RestInitializeExtensionAction(spy); + final String content = "{\"name\":\"ad-extension\",\"uniqueId\":\"ad-extension\",\"hostAddress\":\"127.0.0.1\"," + + "\"port\":\"4532\",\"version\":\"1.0\",\"opensearchVersion\":\"" + + Version.CURRENT.toString() + + "\"," + + "\"minimumCompatibleVersion\":\"" + + Version.CURRENT.minimumCompatibilityVersion().toString() + + "\",\"boolSetting\":true,\"stringSetting\":\"customSetting\",\"intSetting\":5,\"listSetting\":[\"one\",\"two\",\"three\"]}"; + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent(new BytesArray(content), XContentType.JSON) + .withMethod(RestRequest.Method.POST) + .build(); + + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + restInitializeExtensionAction.handleRequest(request, channel, null); + + assertEquals(channel.capturedResponse().status(), RestStatus.ACCEPTED); + assertTrue(channel.capturedResponse().content().utf8ToString().contains("A request to initialize an extension has been sent.")); + + Optional extension = spy.lookupExtensionSettingsById("ad-extension"); + assertTrue(extension.isPresent()); + assertEquals(true, extension.get().getAdditionalSettings().get(boolSetting)); + assertEquals("customSetting", extension.get().getAdditionalSettings().get(stringSetting)); + assertEquals(5, extension.get().getAdditionalSettings().get(intSetting)); + + List listSettingValue = (List) extension.get().getAdditionalSettings().get(listSetting); + assertTrue(listSettingValue.contains("one")); + assertTrue(listSettingValue.contains("two")); + assertTrue(listSettingValue.contains("three")); + } + + public void testRestInitializeExtensionActionResponseWithAdditionalSettingsUsingDefault() throws Exception { + Setting boolSetting = Setting.boolSetting("boolSetting", false, Setting.Property.ExtensionScope); + Setting stringSetting = Setting.simpleString("stringSetting", "default", Setting.Property.ExtensionScope); + Setting intSetting = Setting.intSetting("intSetting", 0, Setting.Property.ExtensionScope); + Setting listSetting = Setting.listSetting( + "listSetting", + List.of("first", "second", "third"), + Function.identity(), + Setting.Property.ExtensionScope + ); + ExtensionsManager extensionsManager = new ExtensionsManager(Set.of(boolSetting, stringSetting, intSetting, listSetting)); + ExtensionsManager spy = spy(extensionsManager); + + // optionally, you can stub out some methods: + when(spy.getAdditionalSettings()).thenCallRealMethod(); + Mockito.doCallRealMethod().when(spy).loadExtension(any(ExtensionsSettings.Extension.class)); + Mockito.doNothing().when(spy).initialize(); + RestInitializeExtensionAction restInitializeExtensionAction = new RestInitializeExtensionAction(spy); + final String content = "{\"name\":\"ad-extension\",\"uniqueId\":\"ad-extension\",\"hostAddress\":\"127.0.0.1\"," + + "\"port\":\"4532\",\"version\":\"1.0\",\"opensearchVersion\":\"" + + Version.CURRENT.toString() + + "\"," + + "\"minimumCompatibleVersion\":\"" + + Version.CURRENT.minimumCompatibilityVersion().toString() + + "\"}"; + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent(new BytesArray(content), XContentType.JSON) + .withMethod(RestRequest.Method.POST) + .build(); + + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + restInitializeExtensionAction.handleRequest(request, channel, null); + + assertEquals(channel.capturedResponse().status(), RestStatus.ACCEPTED); + assertTrue(channel.capturedResponse().content().utf8ToString().contains("A request to initialize an extension has been sent.")); + + Optional extension = spy.lookupExtensionSettingsById("ad-extension"); + assertTrue(extension.isPresent()); + assertEquals(false, extension.get().getAdditionalSettings().get(boolSetting)); + assertEquals("default", extension.get().getAdditionalSettings().get(stringSetting)); + assertEquals(0, extension.get().getAdditionalSettings().get(intSetting)); + + List listSettingValue = (List) extension.get().getAdditionalSettings().get(listSetting); + assertTrue(listSettingValue.contains("first")); + assertTrue(listSettingValue.contains("second")); + assertTrue(listSettingValue.contains("third")); + } + } diff --git a/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java b/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java index fe8792b36f048..23a9169b91e21 100644 --- a/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java +++ b/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java @@ -135,8 +135,8 @@ public void tearDown() throws Exception { public void testRestSendToExtensionAction() throws Exception { RegisterRestActionsRequest registerRestActionRequest = new RegisterRestActionsRequest( "uniqueid1", - List.of("GET /foo", "PUT /bar", "POST /baz"), - List.of("GET /deprecated/foo", "It's deprecated!") + List.of("GET /foo foo", "PUT /bar bar", "POST /baz baz"), + List.of("GET /deprecated/foo foo_deprecated", "Its deprecated") ); RestSendToExtensionAction restSendToExtensionAction = new RestSendToExtensionAction( registerRestActionRequest, @@ -180,9 +180,70 @@ public void testRestSendToExtensionActionWithNamedRoute() throws Exception { assertEquals("send_to_extension_action", restSendToExtensionAction.getName()); List expected = new ArrayList<>(); String uriPrefix = "/_extensions/_uniqueid1"; - expected.add(new NamedRoute(Method.GET, uriPrefix + "/foo", "foo")); - expected.add(new NamedRoute(Method.PUT, uriPrefix + "/bar", "bar")); - expected.add(new NamedRoute(Method.POST, uriPrefix + "/baz", "baz")); + NamedRoute nr1 = new NamedRoute.Builder().method(Method.GET).path(uriPrefix + "/foo").uniqueName("foo").build(); + + NamedRoute nr2 = new NamedRoute.Builder().method(Method.PUT).path(uriPrefix + "/bar").uniqueName("bar").build(); + + NamedRoute nr3 = new NamedRoute.Builder().method(Method.POST).path(uriPrefix + "/baz").uniqueName("baz").build(); + + expected.add(nr1); + expected.add(nr2); + expected.add(nr3); + + List routes = restSendToExtensionAction.routes(); + assertEquals(expected.size(), routes.size()); + List expectedPaths = expected.stream().map(Route::getPath).collect(Collectors.toList()); + List paths = routes.stream().map(Route::getPath).collect(Collectors.toList()); + List expectedMethods = expected.stream().map(Route::getMethod).collect(Collectors.toList()); + List methods = routes.stream().map(Route::getMethod).collect(Collectors.toList()); + List expectedNames = expected.stream().map(NamedRoute::name).collect(Collectors.toList()); + List names = routes.stream().map(r -> ((NamedRoute) r).name()).collect(Collectors.toList()); + assertTrue(paths.containsAll(expectedPaths)); + assertTrue(expectedPaths.containsAll(paths)); + assertTrue(methods.containsAll(expectedMethods)); + assertTrue(expectedMethods.containsAll(methods)); + assertTrue(expectedNames.containsAll(names)); + } + + public void testRestSendToExtensionActionWithNamedRouteAndLegacyActionName() throws Exception { + RegisterRestActionsRequest registerRestActionRequest = new RegisterRestActionsRequest( + "uniqueid1", + List.of( + "GET /foo foo cluster:admin/opensearch/abc/foo", + "PUT /bar bar cluster:admin/opensearch/jkl/bar,cluster:admin/opendistro/mno/bar*", + "POST /baz baz cluster:admin/opensearch/xyz/baz" + ), + List.of("GET /deprecated/foo foo_deprecated cluster:admin/opensearch/abc/foo_deprecated", "It's deprecated!") + ); + RestSendToExtensionAction restSendToExtensionAction = new RestSendToExtensionAction( + registerRestActionRequest, + discoveryExtensionNode, + transportService, + dynamicActionRegistry + ); + + assertEquals("send_to_extension_action", restSendToExtensionAction.getName()); + List expected = new ArrayList<>(); + String uriPrefix = "/_extensions/_uniqueid1"; + NamedRoute nr1 = new NamedRoute.Builder().method(Method.GET) + .path(uriPrefix + "/foo") + .uniqueName("foo") + .legacyActionNames(Set.of("cluster:admin/opensearch/abc/foo")) + .build(); + NamedRoute nr2 = new NamedRoute.Builder().method(Method.PUT) + .path(uriPrefix + "/bar") + .uniqueName("bar") + .legacyActionNames(Set.of("cluster:admin/opensearch/jkl/bar", "cluster:admin/opendistro/mno/bar*")) + .build(); + NamedRoute nr3 = new NamedRoute.Builder().method(Method.POST) + .path(uriPrefix + "/baz") + .uniqueName("baz") + .legacyActionNames(Set.of("cluster:admin/opensearch/xyz/baz")) + .build(); + + expected.add(nr1); + expected.add(nr2); + expected.add(nr3); List routes = restSendToExtensionAction.routes(); assertEquals(expected.size(), routes.size()); @@ -192,11 +253,26 @@ public void testRestSendToExtensionActionWithNamedRoute() throws Exception { List methods = routes.stream().map(Route::getMethod).collect(Collectors.toList()); List expectedNames = expected.stream().map(NamedRoute::name).collect(Collectors.toList()); List names = routes.stream().map(r -> ((NamedRoute) r).name()).collect(Collectors.toList()); + Set expectedActionNames = expected.stream().flatMap(nr -> nr.actionNames().stream()).collect(Collectors.toSet()); + Set actionNames = routes.stream().flatMap(nr -> ((NamedRoute) nr).actionNames().stream()).collect(Collectors.toSet()); assertTrue(paths.containsAll(expectedPaths)); assertTrue(expectedPaths.containsAll(paths)); assertTrue(methods.containsAll(expectedMethods)); assertTrue(expectedMethods.containsAll(methods)); assertTrue(expectedNames.containsAll(names)); + assertTrue(expectedActionNames.containsAll(actionNames)); + } + + public void testRestSendToExtensionActionWithoutUniqueNameShouldFail() { + RegisterRestActionsRequest registerRestActionRequest = new RegisterRestActionsRequest( + "uniqueid1", + List.of("GET /foo", "PUT /bar"), + List.of() + ); + expectThrows( + IllegalArgumentException.class, + () -> new RestSendToExtensionAction(registerRestActionRequest, discoveryExtensionNode, transportService, dynamicActionRegistry) + ); } public void testRestSendToExtensionMultipleNamedRoutesWithSameName() throws Exception { @@ -211,6 +287,18 @@ public void testRestSendToExtensionMultipleNamedRoutesWithSameName() throws Exce ); } + public void testRestSendToExtensionMultipleNamedRoutesWithSameLegacyActionName() throws Exception { + RegisterRestActionsRequest registerRestActionRequest = new RegisterRestActionsRequest( + "uniqueid1", + List.of("GET /foo foo cluster:admin/opensearch/abc/foo", "PUT /bar bar cluster:admin/opensearch/abc/foo"), + List.of() + ); + expectThrows( + IllegalArgumentException.class, + () -> new RestSendToExtensionAction(registerRestActionRequest, discoveryExtensionNode, transportService, dynamicActionRegistry) + ); + } + public void testRestSendToExtensionMultipleRoutesWithSameMethodAndPath() throws Exception { RegisterRestActionsRequest registerRestActionRequest = new RegisterRestActionsRequest( "uniqueid1", @@ -226,7 +314,7 @@ public void testRestSendToExtensionMultipleRoutesWithSameMethodAndPath() throws public void testRestSendToExtensionMultipleRoutesWithSameMethodAndPathWithDifferentPathParams() throws Exception { RegisterRestActionsRequest registerRestActionRequest = new RegisterRestActionsRequest( "uniqueid1", - List.of("GET /foo/{path_param1}", "GET /foo/{path_param2}"), + List.of("GET /foo/{path_param1} fooWithParam", "GET /foo/{path_param2} listFooWithParam"), List.of() ); expectThrows( @@ -235,12 +323,13 @@ public void testRestSendToExtensionMultipleRoutesWithSameMethodAndPathWithDiffer ); } - public void testRestSendToExtensionMultipleRoutesWithSameMethodAndPathWithPathParams() throws Exception { + public void testRestSendToExtensionMultipleRoutesWithSameMethodAndPathWithPathParams() { RegisterRestActionsRequest registerRestActionRequest = new RegisterRestActionsRequest( "uniqueid1", - List.of("GET /foo/{path_param}", "GET /foo/{path_param}/list"), + List.of("GET /foo/{path_param} fooWithParam", "GET /foo/{path_param}/list listFooWithParam"), List.of() ); + try { new RestSendToExtensionAction(registerRestActionRequest, discoveryExtensionNode, transportService, dynamicActionRegistry); } catch (IllegalArgumentException e) { @@ -285,8 +374,8 @@ public void testRestSendToExtensionWithNamedRouteCollidingWithNativeTransportAct public void testRestSendToExtensionActionFilterHeaders() throws Exception { RegisterRestActionsRequest registerRestActionRequest = new RegisterRestActionsRequest( "uniqueid1", - List.of("GET /foo", "PUT /bar", "POST /baz"), - List.of("GET /deprecated/foo", "It's deprecated!") + List.of("GET /foo foo", "PUT /bar bar", "POST /baz baz"), + List.of("GET /deprecated/foo foo_deprecated", "It's deprecated!") ); RestSendToExtensionAction restSendToExtensionAction = new RestSendToExtensionAction( registerRestActionRequest, diff --git a/server/src/test/java/org/opensearch/extensions/rest/RouteHandlerTests.java b/server/src/test/java/org/opensearch/extensions/rest/RouteHandlerTests.java deleted file mode 100644 index 855296b2038f0..0000000000000 --- a/server/src/test/java/org/opensearch/extensions/rest/RouteHandlerTests.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.extensions.rest; - -import org.opensearch.rest.RestRequest; -import org.opensearch.rest.RestStatus; -import org.opensearch.test.OpenSearchTestCase; - -public class RouteHandlerTests extends OpenSearchTestCase { - public void testUnnamedRouteHandler() { - RouteHandler rh = new RouteHandler( - RestRequest.Method.GET, - "/foo/bar", - req -> new ExtensionRestResponse(req, RestStatus.OK, "content") - ); - - assertEquals(null, rh.name()); - } - - public void testNamedRouteHandler() { - RouteHandler rh = new RouteHandler( - "foo", - RestRequest.Method.GET, - "/foo/bar", - req -> new ExtensionRestResponse(req, RestStatus.OK, "content") - ); - - assertEquals("foo", rh.name()); - } -} diff --git a/server/src/test/java/org/opensearch/index/IndexModuleTests.java b/server/src/test/java/org/opensearch/index/IndexModuleTests.java index d9d87196ca289..32b8fb5a4dc62 100644 --- a/server/src/test/java/org/opensearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/opensearch/index/IndexModuleTests.java @@ -252,7 +252,7 @@ private IndexService newIndexService(IndexModule module) throws IOException { writableRegistry(), () -> false, null, - new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService), + new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService, threadPool), translogFactorySupplier ); } diff --git a/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldTypeTests.java new file mode 100644 index 0000000000000..9ec053dc59d10 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/mapper/FlatObjectFieldTypeTests.java @@ -0,0 +1,134 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.mapper; + +import org.apache.lucene.index.Term; +import org.apache.lucene.search.TermQuery; +import org.opensearch.Version; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.analysis.AnalyzerScope; +import org.opensearch.index.analysis.NamedAnalyzer; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +public class FlatObjectFieldTypeTests extends FieldTypeTestCase { + private static MappedFieldType getFlatParentFieldType(String fieldName) { + Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT.id).build(); + Mapper.BuilderContext context = new Mapper.BuilderContext(settings, new ContentPath()); + MappedFieldType flatParentFieldType = new FlatObjectFieldMapper.Builder(fieldName).build(context).fieldType(); + return flatParentFieldType; + } + + public void testFetchSourceValue() throws IOException { + MappedFieldType mapper = getFlatParentFieldType("field"); + + Map jsonPoint = new HashMap<>(); + jsonPoint.put("type", "flat_object"); + jsonPoint.put("coordinates", Arrays.asList(42.0, 27.1)); + Map otherJsonPoint = new HashMap<>(); + otherJsonPoint.put("type", "Point"); + otherJsonPoint.put("coordinates", Arrays.asList(30.0, 50.0)); + + ArrayList jsonPointList = new ArrayList<>(); + jsonPointList.add(jsonPoint.toString()); + + ArrayList otherJsonPointList = new ArrayList<>(); + otherJsonPointList.add(otherJsonPoint.toString()); + + assertEquals(jsonPointList, fetchSourceValue(mapper, jsonPoint, null)); + assertEquals(otherJsonPointList, fetchSourceValue(mapper, otherJsonPoint, null)); + + } + + public void testDirectSubfield() { + { + MappedFieldType flatParentFieldType = getFlatParentFieldType("field"); + + // when searching for "foo" in "field", the directSubfield is field._value field + String searchFieldName = ((FlatObjectFieldMapper.FlatObjectFieldType) flatParentFieldType).directSubfield(); + assertEquals("field._value", searchFieldName); + + MappedFieldType dynamicMappedFieldType = new FlatObjectFieldMapper.FlatObjectFieldType("bar", flatParentFieldType.name()); + // when searching for "foo" in "field.bar", the directSubfield is field._valueAndPath field + String searchFieldNameDocPath = ((FlatObjectFieldMapper.FlatObjectFieldType) dynamicMappedFieldType).directSubfield(); + assertEquals("field._valueAndPath", searchFieldNameDocPath); + } + { + NamedAnalyzer analyzer = new NamedAnalyzer("default", AnalyzerScope.INDEX, null); + MappedFieldType ft = new FlatObjectFieldMapper.FlatObjectFieldType("field", analyzer); + assertEquals("field._value", ((FlatObjectFieldMapper.FlatObjectFieldType) ft).directSubfield()); + } + } + + public void testRewriteValue() { + MappedFieldType flatParentFieldType = getFlatParentFieldType("field"); + + // when searching for "foo" in "field", the rewrite value is "foo" + String searchValues = ((FlatObjectFieldMapper.FlatObjectFieldType) flatParentFieldType).rewriteValue("foo"); + assertEquals("foo", searchValues); + + MappedFieldType dynamicMappedFieldType = new FlatObjectFieldMapper.FlatObjectFieldType("field.bar", flatParentFieldType.name()); + + // when searching for "foo" in "field.bar", the rewrite value is "field.bar=foo" + String searchFieldNameDocPath = ((FlatObjectFieldMapper.FlatObjectFieldType) dynamicMappedFieldType).directSubfield(); + String searchValuesDocPath = ((FlatObjectFieldMapper.FlatObjectFieldType) dynamicMappedFieldType).rewriteValue("foo"); + assertEquals("field.bar=foo", searchValuesDocPath); + } + + public void testTermQuery() { + + MappedFieldType flatParentFieldType = getFlatParentFieldType("field"); + + // when searching for "foo" in "field", the term query is directed to search "foo" in field._value field + String searchFieldName = ((FlatObjectFieldMapper.FlatObjectFieldType) flatParentFieldType).directSubfield(); + String searchValues = ((FlatObjectFieldMapper.FlatObjectFieldType) flatParentFieldType).rewriteValue("foo"); + assertEquals("foo", searchValues); + assertEquals(new TermQuery(new Term(searchFieldName, searchValues)), flatParentFieldType.termQuery(searchValues, null)); + + MappedFieldType dynamicMappedFieldType = new FlatObjectFieldMapper.FlatObjectFieldType("field.bar", flatParentFieldType.name()); + + // when searching for "foo" in "field.bar", the term query is directed to search in field._valueAndPath field + String searchFieldNameDocPath = ((FlatObjectFieldMapper.FlatObjectFieldType) dynamicMappedFieldType).directSubfield(); + String searchValuesDocPath = ((FlatObjectFieldMapper.FlatObjectFieldType) dynamicMappedFieldType).rewriteValue("foo"); + assertEquals("field.bar=foo", searchValuesDocPath); + assertEquals(new TermQuery(new Term(searchFieldNameDocPath, searchValuesDocPath)), dynamicMappedFieldType.termQuery("foo", null)); + + MappedFieldType unsearchable = new FlatObjectFieldMapper.FlatObjectFieldType("field", false, true, Collections.emptyMap()); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> unsearchable.termQuery("bar", null)); + assertEquals("Cannot search on field [field] since it is not indexed.", e.getMessage()); + } + + public void testExistsQuery() { + { + MappedFieldType ft = getFlatParentFieldType("field"); + // when checking on the flat_object field name "field", check if exist in the field mapper names + assertEquals(new TermQuery(new Term(FieldNamesFieldMapper.NAME, "field")), ft.existsQuery(null)); + + // when checking if a subfield within the flat_object, for example, "field.bar", use term query in the flat_object field + MappedFieldType dynamicMappedFieldType = new FlatObjectFieldMapper.FlatObjectFieldType("field.bar", ft.name()); + assertEquals(new TermQuery(new Term("field", "field.bar")), dynamicMappedFieldType.existsQuery(null)); + + } + { + FlatObjectFieldMapper.FlatObjectFieldType ft = new FlatObjectFieldMapper.FlatObjectFieldType( + "field", + true, + false, + Collections.emptyMap() + ); + assertEquals(new TermQuery(new Term(FieldNamesFieldMapper.NAME, "field")), ft.existsQuery(null)); + } + } +} diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java new file mode 100644 index 0000000000000..5b9135afb66f3 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/remote/RemoteStoreUtilsTests.java @@ -0,0 +1,41 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.remote; + +import org.opensearch.test.OpenSearchTestCase; + +public class RemoteStoreUtilsTests extends OpenSearchTestCase { + + public void testInvertToStrInvalid() { + assertThrows(IllegalArgumentException.class, () -> RemoteStoreUtils.invertLong(-1)); + } + + public void testInvertToStrValid() { + assertEquals("9223372036854774573", RemoteStoreUtils.invertLong(1234)); + assertEquals("0000000000000001234", RemoteStoreUtils.invertLong(9223372036854774573L)); + } + + public void testInvertToLongInvalid() { + assertThrows(IllegalArgumentException.class, () -> RemoteStoreUtils.invertLong("-5")); + } + + public void testInvertToLongValid() { + assertEquals(1234, RemoteStoreUtils.invertLong("9223372036854774573")); + assertEquals(9223372036854774573L, RemoteStoreUtils.invertLong("0000000000000001234")); + } + + public void testinvert() { + assertEquals(0, RemoteStoreUtils.invertLong(RemoteStoreUtils.invertLong(0))); + assertEquals(Long.MAX_VALUE, RemoteStoreUtils.invertLong(RemoteStoreUtils.invertLong(Long.MAX_VALUE))); + for (int i = 0; i < 10; i++) { + long num = randomLongBetween(1, Long.MAX_VALUE); + assertEquals(num, RemoteStoreUtils.invertLong(RemoteStoreUtils.invertLong(num))); + } + } +} diff --git a/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java index 15f1585bd1477..8ee5fcf0da9d7 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java @@ -12,6 +12,8 @@ import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; import org.junit.Before; +import org.opensearch.action.ActionListener; +import org.opensearch.action.LatchedActionListener; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.support.PlainBlobMetadata; @@ -23,15 +25,19 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; import java.util.stream.Stream; -import static org.mockito.Mockito.mock; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.when; +import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.doAnswer; public class RemoteDirectoryTests extends OpenSearchTestCase { private BlobContainer blobContainer; @@ -146,6 +152,54 @@ public void testFileLengthIOException() throws IOException { assertThrows(IOException.class, () -> remoteDirectory.fileLength("segment_1")); } + public void testListFilesByPrefixInLexicographicOrder() throws IOException { + doAnswer(invocation -> { + LatchedActionListener> latchedActionListener = invocation.getArgument(3); + latchedActionListener.onResponse(List.of(new PlainBlobMetadata("metadata_1", 1))); + return null; + }).when(blobContainer) + .listBlobsByPrefixInSortedOrder( + eq("metadata"), + eq(1), + eq(BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC), + any(ActionListener.class) + ); + + assertEquals(List.of("metadata_1"), remoteDirectory.listFilesByPrefixInLexicographicOrder("metadata", 1)); + } + + public void testListFilesByPrefixInLexicographicOrderEmpty() throws IOException { + doAnswer(invocation -> { + LatchedActionListener> latchedActionListener = invocation.getArgument(3); + latchedActionListener.onResponse(List.of()); + return null; + }).when(blobContainer) + .listBlobsByPrefixInSortedOrder( + eq("metadata"), + eq(1), + eq(BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC), + any(ActionListener.class) + ); + + assertEquals(List.of(), remoteDirectory.listFilesByPrefixInLexicographicOrder("metadata", 1)); + } + + public void testListFilesByPrefixInLexicographicOrderException() { + doAnswer(invocation -> { + LatchedActionListener> latchedActionListener = invocation.getArgument(3); + latchedActionListener.onFailure(new IOException("Error")); + return null; + }).when(blobContainer) + .listBlobsByPrefixInSortedOrder( + eq("metadata"), + eq(1), + eq(BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC), + any(ActionListener.class) + ); + + assertThrows(IOException.class, () -> remoteDirectory.listFilesByPrefixInLexicographicOrder("metadata", 1)); + } + public void testGetPendingDeletions() { assertThrows(UnsupportedOperationException.class, () -> remoteDirectory.getPendingDeletions()); } @@ -165,5 +219,4 @@ public void testRename() { public void testObtainLock() { assertThrows(UnsupportedOperationException.class, () -> remoteDirectory.obtainLock("segment_1")); } - } diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java index 7a9cbc12d823b..bf4b2a14f2567 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactoryTests.java @@ -11,8 +11,11 @@ import org.apache.lucene.store.Directory; import org.junit.Before; import org.mockito.ArgumentCaptor; +import org.opensearch.action.ActionListener; +import org.opensearch.action.LatchedActionListener; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; import org.opensearch.common.settings.Settings; @@ -24,31 +27,35 @@ import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.test.IndexSettingsModule; import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.ThreadPool; import java.io.IOException; import java.nio.file.Path; -import java.util.Collections; import java.util.List; import java.util.function.Supplier; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.times; +import static org.mockito.Mockito.doAnswer; public class RemoteSegmentStoreDirectoryFactoryTests extends OpenSearchTestCase { private Supplier repositoriesServiceSupplier; private RepositoriesService repositoriesService; + private ThreadPool threadPool; private RemoteSegmentStoreDirectoryFactory remoteSegmentStoreDirectoryFactory; @Before public void setup() { repositoriesServiceSupplier = mock(Supplier.class); repositoriesService = mock(RepositoriesService.class); + threadPool = mock(ThreadPool.class); when(repositoriesServiceSupplier.get()).thenReturn(repositoriesService); - remoteSegmentStoreDirectoryFactory = new RemoteSegmentStoreDirectoryFactory(repositoriesServiceSupplier); + remoteSegmentStoreDirectoryFactory = new RemoteSegmentStoreDirectoryFactory(repositoriesServiceSupplier, threadPool); } public void testNewDirectory() throws IOException { @@ -65,7 +72,12 @@ public void testNewDirectory() throws IOException { when(repository.blobStore()).thenReturn(blobStore); when(repository.basePath()).thenReturn(new BlobPath().add("base_path")); when(blobStore.blobContainer(any())).thenReturn(blobContainer); - when(blobContainer.listBlobs()).thenReturn(Collections.emptyMap()); + doAnswer(invocation -> { + LatchedActionListener> latchedActionListener = invocation.getArgument(3); + latchedActionListener.onResponse(List.of()); + return null; + }).when(blobContainer) + .listBlobsByPrefixInSortedOrder(any(), eq(1), eq(BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC), any(ActionListener.class)); when(repositoriesService.repository("remote_store_repository")).thenReturn(repository); @@ -78,7 +90,12 @@ public void testNewDirectory() throws IOException { assertEquals("base_path/uuid_1/0/segments/metadata/", blobPaths.get(1).buildAsString()); assertEquals("base_path/uuid_1/0/segments/lock_files/", blobPaths.get(2).buildAsString()); - verify(blobContainer).listBlobsByPrefix(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX); + verify(blobContainer).listBlobsByPrefixInSortedOrder( + eq(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX), + eq(1), + eq(BlobContainer.BlobNameSortOrder.LEXICOGRAPHIC), + any() + ); verify(repositoriesService, times(2)).repository("remote_store_repository"); } } diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java index 3417e7b0aee04..c37893877253e 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -30,12 +30,15 @@ import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.lucene.store.ByteArrayIndexInput; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.index.engine.NRTReplicationEngineFactory; +import org.opensearch.index.remote.RemoteStoreUtils; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; import org.opensearch.index.store.lockmanager.RemoteStoreMetadataLockManager; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadataHandler; +import org.opensearch.threadpool.ThreadPool; import java.io.IOException; import java.nio.file.NoSuchFileException; @@ -45,6 +48,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.ExecutorService; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -56,6 +60,7 @@ import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.doReturn; +import static org.hamcrest.CoreMatchers.is; public class RemoteSegmentStoreDirectoryTests extends IndexShardTestCase { private RemoteDirectory remoteDataDirectory; @@ -65,21 +70,35 @@ public class RemoteSegmentStoreDirectoryTests extends IndexShardTestCase { private RemoteSegmentStoreDirectory remoteSegmentStoreDirectory; private IndexShard indexShard; private SegmentInfos segmentInfos; + private ThreadPool threadPool; + + private final String metadataFilename = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(12, 23, 34, 1, 1); + private final String metadataFilename2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(12, 13, 34, 1, 1); + private final String metadataFilename3 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(10, 38, 34, 1, 1); @Before public void setup() throws IOException { remoteDataDirectory = mock(RemoteDirectory.class); remoteMetadataDirectory = mock(RemoteDirectory.class); mdLockManager = mock(RemoteStoreMetadataLockManager.class); + threadPool = mock(ThreadPool.class); - remoteSegmentStoreDirectory = new RemoteSegmentStoreDirectory(remoteDataDirectory, remoteMetadataDirectory, mdLockManager); + remoteSegmentStoreDirectory = new RemoteSegmentStoreDirectory( + remoteDataDirectory, + remoteMetadataDirectory, + mdLockManager, + threadPool + ); Settings indexSettings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, org.opensearch.Version.CURRENT).build(); + ExecutorService executorService = OpenSearchExecutors.newDirectExecutorService(); indexShard = newStartedShard(false, indexSettings, new NRTReplicationEngineFactory()); try (Store store = indexShard.store()) { segmentInfos = store.readLastCommittedSegmentsInfo(); } + + when(threadPool.executor(ThreadPool.Names.REMOTE_PURGE)).thenReturn(executorService); } @After @@ -105,50 +124,16 @@ public void testUploadedSegmentMetadataFromString() { assertEquals("_0.cfe::_0.cfe__uuidxyz::4567::372000", metadata.toString()); } - public void testGetMetadataFilename() { - // Generation 23 is replaced by n due to radix 32 - assertEquals( - RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX + "__12__n__uuid1", - RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(12, 23, "uuid1") - ); - } - public void testGetPrimaryTermGenerationUuid() { - String[] filenameTokens = "abc__12__n__uuid_xyz".split(RemoteSegmentStoreDirectory.MetadataFilenameUtils.SEPARATOR); + String[] filenameTokens = "abc__9223372036854775795__9223372036854775784__uuid_xyz".split( + RemoteSegmentStoreDirectory.MetadataFilenameUtils.SEPARATOR + ); assertEquals(12, RemoteSegmentStoreDirectory.MetadataFilenameUtils.getPrimaryTerm(filenameTokens)); assertEquals(23, RemoteSegmentStoreDirectory.MetadataFilenameUtils.getGeneration(filenameTokens)); - assertEquals("uuid_xyz", RemoteSegmentStoreDirectory.MetadataFilenameUtils.getUuid(filenameTokens)); - } - - public void testMetadataFilenameComparator() { - List metadataFilenames = new ArrayList<>( - List.of( - "abc__10__20__uuid1", - "abc__12__2__uuid2", - "pqr__1__1__uuid0", - "abc__3__n__uuid3", - "abc__10__8__uuid8", - "abc__3__a__uuid4", - "abc__3__a__uuid5" - ) - ); - metadataFilenames.sort(RemoteSegmentStoreDirectory.METADATA_FILENAME_COMPARATOR); - assertEquals( - List.of( - "abc__3__a__uuid4", - "abc__3__a__uuid5", - "abc__3__n__uuid3", - "abc__10__8__uuid8", - "abc__10__20__uuid1", - "abc__12__2__uuid2", - "pqr__1__1__uuid0" - ), - metadataFilenames - ); } public void testInitException() throws IOException { - when(remoteMetadataDirectory.listFilesByPrefix(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX)).thenThrow( + when(remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, 1)).thenThrow( new IOException("Error") ); @@ -248,29 +233,42 @@ private ByteArrayIndexInput createMetadataFileBytes(Map segmentF } private Map> populateMetadata() throws IOException { - List metadataFiles = List.of("metadata__1__5__abc", "metadata__1__6__pqr", "metadata__2__1__zxv"); - when(remoteMetadataDirectory.listFilesByPrefix(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX)).thenReturn( - metadataFiles - ); + List metadataFiles = new ArrayList<>(); + + metadataFiles.add(metadataFilename); + metadataFiles.add(metadataFilename2); + metadataFiles.add(metadataFilename3); + + when( + remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( + RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, + 1 + ) + ).thenReturn(List.of(metadataFilename)); + when( + remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( + RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, + Integer.MAX_VALUE + ) + ).thenReturn(metadataFiles); Map> metadataFilenameContentMapping = Map.of( - "metadata__1__5__abc", + metadataFilename, getDummyMetadata("_0", 1), - "metadata__1__6__pqr", + metadataFilename2, getDummyMetadata("_0", 1), - "metadata__2__1__zxv", + metadataFilename3, getDummyMetadata("_0", 1) ); - when(remoteMetadataDirectory.openInput("metadata__1__5__abc", IOContext.DEFAULT)).thenReturn( - createMetadataFileBytes(metadataFilenameContentMapping.get("metadata__1__5__abc"), 1, 5) + when(remoteMetadataDirectory.openInput(metadataFilename, IOContext.DEFAULT)).thenAnswer( + I -> createMetadataFileBytes(metadataFilenameContentMapping.get(metadataFilename), 23, 12) ); - when(remoteMetadataDirectory.openInput("metadata__1__6__pqr", IOContext.DEFAULT)).thenReturn( - createMetadataFileBytes(metadataFilenameContentMapping.get("metadata__1__6__pqr"), 1, 6) + when(remoteMetadataDirectory.openInput(metadataFilename2, IOContext.DEFAULT)).thenAnswer( + I -> createMetadataFileBytes(metadataFilenameContentMapping.get(metadataFilename2), 13, 12) ); - when(remoteMetadataDirectory.openInput("metadata__2__1__zxv", IOContext.DEFAULT)).thenReturn( - createMetadataFileBytes(metadataFilenameContentMapping.get("metadata__2__1__zxv"), 1, 2), - createMetadataFileBytes(metadataFilenameContentMapping.get("metadata__2__1__zxv"), 1, 2) + when(remoteMetadataDirectory.openInput(metadataFilename3, IOContext.DEFAULT)).thenAnswer( + I -> createMetadataFileBytes(metadataFilenameContentMapping.get(metadataFilename3), 38, 10) ); return metadataFilenameContentMapping; @@ -279,9 +277,12 @@ private Map> populateMetadata() throws IOException { public void testInit() throws IOException { populateMetadata(); - when(remoteMetadataDirectory.listFilesByPrefix(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX)).thenReturn( - List.of("metadata__1__5__abc", "metadata__1__6__pqr", "metadata__2__1__zxv") - ); + when( + remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( + RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, + 1 + ) + ).thenReturn(List.of(metadataFilename)); remoteSegmentStoreDirectory.init(); @@ -385,15 +386,15 @@ public void testOpenInputException() throws IOException { public void testAcquireLock() throws IOException { populateMetadata(); remoteSegmentStoreDirectory.init(); - String mdFile = "xyz"; String acquirerId = "test-acquirer"; long testPrimaryTerm = 1; long testGeneration = 5; List metadataFiles = List.of("metadata__1__5__abc"); when( - remoteMetadataDirectory.listFilesByPrefix( - RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilePrefixForCommit(testPrimaryTerm, testGeneration) + remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( + RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilePrefixForCommit(testPrimaryTerm, testGeneration), + 1 ) ).thenReturn(metadataFiles); @@ -423,8 +424,9 @@ public void testReleaseLock() throws IOException { List metadataFiles = List.of("metadata__1__5__abc"); when( - remoteMetadataDirectory.listFilesByPrefix( - RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilePrefixForCommit(testPrimaryTerm, testGeneration) + remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( + RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilePrefixForCommit(testPrimaryTerm, testGeneration), + 1 ) ).thenReturn(metadataFiles); @@ -440,8 +442,9 @@ public void testIsAcquired() throws IOException { List metadataFiles = List.of("metadata__1__5__abc"); when( - remoteMetadataDirectory.listFilesByPrefix( - RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilePrefixForCommit(testPrimaryTerm, testGeneration) + remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( + RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilePrefixForCommit(testPrimaryTerm, testGeneration), + 1 ) ).thenReturn(metadataFiles); @@ -457,8 +460,9 @@ public void testIsAcquiredException() throws IOException { List metadataFiles = new ArrayList<>(); when( - remoteMetadataDirectory.listFilesByPrefix( - RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilePrefixForCommit(testPrimaryTerm, testGeneration) + remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( + RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilePrefixForCommit(testPrimaryTerm, testGeneration), + 1 ) ).thenReturn(metadataFiles); @@ -468,14 +472,10 @@ public void testIsAcquiredException() throws IOException { public void testGetMetadataFileForCommit() throws IOException { long testPrimaryTerm = 2; long testGeneration = 3; - List metadataFiles = List.of( - "metadata__1__5__abc", - "metadata__" + testPrimaryTerm + "__" + testGeneration + "__pqr", - "metadata__2__1__zxv" - ); when( - remoteMetadataDirectory.listFilesByPrefix( - RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilePrefixForCommit(testPrimaryTerm, testGeneration) + remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( + RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilePrefixForCommit(testPrimaryTerm, testGeneration), + 1 ) ).thenReturn(List.of("metadata__" + testPrimaryTerm + "__" + testGeneration + "__pqr")); @@ -484,33 +484,6 @@ public void testGetMetadataFileForCommit() throws IOException { } - public void testGetSegmentsUploadedToRemoteStore() throws IOException { - long testPrimaryTerm = 1; - long testGeneration = 5; - - List metadataFiles = List.of("metadata__1__5__abc"); - when( - remoteMetadataDirectory.listFilesByPrefix( - RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilePrefixForCommit(testPrimaryTerm, testGeneration) - ) - ).thenReturn(metadataFiles); - - Map> metadataFilenameContentMapping = Map.of( - "metadata__1__5__abc", - getDummyMetadata("_0", 5), - "metadata__1__6__pqr", - getDummyMetadata("_0", 6), - "metadata__2__1__zxv", - getDummyMetadata("_0", 1) - ); - - when(remoteMetadataDirectory.openInput("metadata__1__5__abc", IOContext.DEFAULT)).thenReturn( - createMetadataFileBytes(metadataFilenameContentMapping.get("metadata__1__5__abc"), 1, 5) - ); - - assert (remoteSegmentStoreDirectory.getSegmentsUploadedToRemoteStore(testPrimaryTerm, testGeneration).containsKey("segments_5")); - } - public void testCopyFrom() throws IOException { String filename = "_100.si"; populateMetadata(); @@ -542,46 +515,20 @@ public void testCopyFromException() throws IOException { storeDirectory.close(); } - public void testCopyFromOverride() throws IOException { - String filename = "_100.si"; - populateMetadata(); - remoteSegmentStoreDirectory.init(); - - Directory storeDirectory = LuceneTestCase.newDirectory(); - IndexOutput indexOutput = storeDirectory.createOutput(filename, IOContext.DEFAULT); - indexOutput.writeString("Hello World!"); - CodecUtil.writeFooter(indexOutput); - indexOutput.close(); - storeDirectory.sync(List.of(filename)); - - assertFalse(remoteSegmentStoreDirectory.getSegmentsUploadedToRemoteStore().containsKey(filename)); - remoteSegmentStoreDirectory.copyFrom(storeDirectory, filename, filename, IOContext.DEFAULT, true); - RemoteSegmentStoreDirectory.UploadedSegmentMetadata uploadedSegmentMetadata = remoteSegmentStoreDirectory - .getSegmentsUploadedToRemoteStore() - .get(filename); - assertNotNull(uploadedSegmentMetadata); - remoteSegmentStoreDirectory.copyFrom(storeDirectory, filename, filename, IOContext.DEFAULT, true); - assertEquals( - uploadedSegmentMetadata.toString(), - remoteSegmentStoreDirectory.getSegmentsUploadedToRemoteStore().get(filename).toString() - ); - - storeDirectory.close(); - } - public void testContainsFile() throws IOException { - List metadataFiles = List.of("metadata__1__5__abc"); - when(remoteMetadataDirectory.listFilesByPrefix(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX)).thenReturn( - metadataFiles - ); + List metadataFiles = List.of(metadataFilename); + when( + remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( + RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, + 1 + ) + ).thenReturn(metadataFiles); Map metadata = new HashMap<>(); metadata.put("_0.cfe", "_0.cfe::_0.cfe__" + UUIDs.base64UUID() + "::1234::512"); metadata.put("_0.cfs", "_0.cfs::_0.cfs__" + UUIDs.base64UUID() + "::2345::1024"); - when(remoteMetadataDirectory.openInput("metadata__1__5__abc", IOContext.DEFAULT)).thenReturn( - createMetadataFileBytes(metadata, 1, 5) - ); + when(remoteMetadataDirectory.openInput(metadataFilename, IOContext.DEFAULT)).thenReturn(createMetadataFileBytes(metadata, 1, 5)); remoteSegmentStoreDirectory.init(); @@ -611,7 +558,7 @@ public void testUploadMetadataEmpty() throws IOException { Collection segmentFiles = List.of("s1", "s2", "s3"); assertThrows( NoSuchFileException.class, - () -> remoteSegmentStoreDirectory.uploadMetadata(segmentFiles, segmentInfos, storeDirectory, 12L) + () -> remoteSegmentStoreDirectory.uploadMetadata(segmentFiles, segmentInfos, storeDirectory, 12L, 34L) ); } @@ -623,16 +570,19 @@ public void testUploadMetadataNonEmpty() throws IOException { BytesStreamOutput output = new BytesStreamOutput(); IndexOutput indexOutput = new OutputStreamIndexOutput("segment metadata", "metadata output stream", output, 4096); - long generation = segmentInfos.getGeneration(); - when(storeDirectory.createOutput(startsWith("metadata__12__" + generation), eq(IOContext.DEFAULT))).thenReturn(indexOutput); + String generation = RemoteStoreUtils.invertLong(segmentInfos.getGeneration()); + String primaryTerm = RemoteStoreUtils.invertLong(12); + when(storeDirectory.createOutput(startsWith("metadata__" + primaryTerm + "__" + generation), eq(IOContext.DEFAULT))).thenReturn( + indexOutput + ); Collection segmentFiles = List.of("_0.si", "_0.cfe", "_0.cfs", "segments_1"); - remoteSegmentStoreDirectory.uploadMetadata(segmentFiles, segmentInfos, storeDirectory, 12L); + remoteSegmentStoreDirectory.uploadMetadata(segmentFiles, segmentInfos, storeDirectory, 12L, 34L); verify(remoteMetadataDirectory).copyFrom( eq(storeDirectory), - startsWith("metadata__12__" + generation), - startsWith("metadata__12__" + generation), + startsWith("metadata__" + primaryTerm + "__" + generation), + startsWith("metadata__" + primaryTerm + "__" + generation), eq(IOContext.DEFAULT) ); @@ -655,10 +605,13 @@ public void testUploadMetadataNonEmpty() throws IOException { } public void testNoMetadataHeaderCorruptIndexException() throws IOException { - List metadataFiles = List.of("metadata__1__5__abc"); - when(remoteMetadataDirectory.listFilesByPrefix(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX)).thenReturn( - metadataFiles - ); + List metadataFiles = List.of(metadataFilename); + when( + remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( + RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, + 1 + ) + ).thenReturn(metadataFiles); Map metadata = new HashMap<>(); metadata.put("_0.cfe", "_0.cfe::_0.cfe__" + UUIDs.base64UUID() + "::1234"); @@ -669,16 +622,19 @@ public void testNoMetadataHeaderCorruptIndexException() throws IOException { indexOutput.writeMapOfStrings(metadata); indexOutput.close(); ByteArrayIndexInput byteArrayIndexInput = new ByteArrayIndexInput("segment metadata", BytesReference.toBytes(output.bytes())); - when(remoteMetadataDirectory.openInput("metadata__1__5__abc", IOContext.DEFAULT)).thenReturn(byteArrayIndexInput); + when(remoteMetadataDirectory.openInput(metadataFilename, IOContext.DEFAULT)).thenReturn(byteArrayIndexInput); assertThrows(CorruptIndexException.class, () -> remoteSegmentStoreDirectory.init()); } public void testInvalidCodecHeaderCorruptIndexException() throws IOException { - List metadataFiles = List.of("metadata__1__5__abc"); - when(remoteMetadataDirectory.listFilesByPrefix(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX)).thenReturn( - metadataFiles - ); + List metadataFiles = List.of(metadataFilename); + when( + remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( + RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, + 1 + ) + ).thenReturn(metadataFiles); Map metadata = new HashMap<>(); metadata.put("_0.cfe", "_0.cfe::_0.cfe__" + UUIDs.base64UUID() + "::1234"); @@ -691,16 +647,19 @@ public void testInvalidCodecHeaderCorruptIndexException() throws IOException { CodecUtil.writeFooter(indexOutput); indexOutput.close(); ByteArrayIndexInput byteArrayIndexInput = new ByteArrayIndexInput("segment metadata", BytesReference.toBytes(output.bytes())); - when(remoteMetadataDirectory.openInput("metadata__1__5__abc", IOContext.DEFAULT)).thenReturn(byteArrayIndexInput); + when(remoteMetadataDirectory.openInput(metadataFilename, IOContext.DEFAULT)).thenReturn(byteArrayIndexInput); assertThrows(CorruptIndexException.class, () -> remoteSegmentStoreDirectory.init()); } public void testHeaderMinVersionCorruptIndexException() throws IOException { - List metadataFiles = List.of("metadata__1__5__abc"); - when(remoteMetadataDirectory.listFilesByPrefix(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX)).thenReturn( - metadataFiles - ); + List metadataFiles = List.of(metadataFilename); + when( + remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( + RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, + 1 + ) + ).thenReturn(metadataFiles); Map metadata = new HashMap<>(); metadata.put("_0.cfe", "_0.cfe::_0.cfe__" + UUIDs.base64UUID() + "::1234"); @@ -713,16 +672,19 @@ public void testHeaderMinVersionCorruptIndexException() throws IOException { CodecUtil.writeFooter(indexOutput); indexOutput.close(); ByteArrayIndexInput byteArrayIndexInput = new ByteArrayIndexInput("segment metadata", BytesReference.toBytes(output.bytes())); - when(remoteMetadataDirectory.openInput("metadata__1__5__abc", IOContext.DEFAULT)).thenReturn(byteArrayIndexInput); + when(remoteMetadataDirectory.openInput(metadataFilename, IOContext.DEFAULT)).thenReturn(byteArrayIndexInput); assertThrows(IndexFormatTooOldException.class, () -> remoteSegmentStoreDirectory.init()); } public void testHeaderMaxVersionCorruptIndexException() throws IOException { - List metadataFiles = List.of("metadata__1__5__abc"); - when(remoteMetadataDirectory.listFilesByPrefix(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX)).thenReturn( - metadataFiles - ); + List metadataFiles = List.of(metadataFilename); + when( + remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( + RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, + 1 + ) + ).thenReturn(metadataFiles); Map metadata = new HashMap<>(); metadata.put("_0.cfe", "_0.cfe::_0.cfe__" + UUIDs.base64UUID() + "::1234"); @@ -735,16 +697,19 @@ public void testHeaderMaxVersionCorruptIndexException() throws IOException { CodecUtil.writeFooter(indexOutput); indexOutput.close(); ByteArrayIndexInput byteArrayIndexInput = new ByteArrayIndexInput("segment metadata", BytesReference.toBytes(output.bytes())); - when(remoteMetadataDirectory.openInput("metadata__1__5__abc", IOContext.DEFAULT)).thenReturn(byteArrayIndexInput); + when(remoteMetadataDirectory.openInput(metadataFilename, IOContext.DEFAULT)).thenReturn(byteArrayIndexInput); assertThrows(IndexFormatTooNewException.class, () -> remoteSegmentStoreDirectory.init()); } public void testIncorrectChecksumCorruptIndexException() throws IOException { - List metadataFiles = List.of("metadata__1__5__abc"); - when(remoteMetadataDirectory.listFilesByPrefix(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX)).thenReturn( - metadataFiles - ); + List metadataFiles = List.of(metadataFilename); + when( + remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( + RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, + 1 + ) + ).thenReturn(metadataFiles); Map metadata = new HashMap<>(); metadata.put("_0.cfe", "_0.cfe::_0.cfe__" + UUIDs.base64UUID() + "::1234::512"); @@ -761,50 +726,88 @@ public void testIncorrectChecksumCorruptIndexException() throws IOException { indexOutputSpy.close(); ByteArrayIndexInput byteArrayIndexInput = new ByteArrayIndexInput("segment metadata", BytesReference.toBytes(output.bytes())); - when(remoteMetadataDirectory.openInput("metadata__1__5__abc", IOContext.DEFAULT)).thenReturn(byteArrayIndexInput); + when(remoteMetadataDirectory.openInput(metadataFilename, IOContext.DEFAULT)).thenReturn(byteArrayIndexInput); assertThrows(CorruptIndexException.class, () -> remoteSegmentStoreDirectory.init()); } - public void testDeleteStaleCommitsException() throws IOException { - when(remoteMetadataDirectory.listFilesByPrefix(RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX)).thenThrow( - new IOException("Error reading") - ); + public void testDeleteStaleCommitsException() throws Exception { + populateMetadata(); + when( + remoteMetadataDirectory.listFilesByPrefixInLexicographicOrder( + RemoteSegmentStoreDirectory.MetadataFilenameUtils.METADATA_PREFIX, + Integer.MAX_VALUE + ) + ).thenThrow(new IOException("Error reading")); + + // popluateMetadata() adds stub to return 3 metadata files + // We are passing lastNMetadataFilesToKeep=2 here to validate that in case of exception deleteFile is not + // invoked + remoteSegmentStoreDirectory.deleteStaleSegmentsAsync(2); + + assertBusy(() -> assertThat(remoteSegmentStoreDirectory.canDeleteStaleCommits.get(), is(true))); + verify(remoteMetadataDirectory, times(0)).deleteFile(any(String.class)); + } + + public void testDeleteStaleCommitsExceptionWhileScheduling() throws Exception { + populateMetadata(); + doThrow(new IllegalArgumentException()).when(threadPool).executor(any(String.class)); + + // popluateMetadata() adds stub to return 3 metadata files + // We are passing lastNMetadataFilesToKeep=2 here to validate that in case of exception deleteFile is not + // invoked + remoteSegmentStoreDirectory.deleteStaleSegmentsAsync(2); - assertThrows(IOException.class, () -> remoteSegmentStoreDirectory.deleteStaleSegments(5)); + assertBusy(() -> assertThat(remoteSegmentStoreDirectory.canDeleteStaleCommits.get(), is(true))); + verify(remoteMetadataDirectory, times(0)).deleteFile(any(String.class)); } - public void testDeleteStaleCommitsWithinThreshold() throws IOException { + public void testDeleteStaleCommitsWithDeletionAlreadyInProgress() throws Exception { + populateMetadata(); + remoteSegmentStoreDirectory.canDeleteStaleCommits.set(false); + + // popluateMetadata() adds stub to return 3 metadata files + // We are passing lastNMetadataFilesToKeep=2 here to validate that in case of exception deleteFile is not + // invoked + remoteSegmentStoreDirectory.deleteStaleSegmentsAsync(2); + + assertBusy(() -> assertThat(remoteSegmentStoreDirectory.canDeleteStaleCommits.get(), is(false))); + verify(remoteMetadataDirectory, times(0)).deleteFile(any(String.class)); + } + + public void testDeleteStaleCommitsWithinThreshold() throws Exception { populateMetadata(); // popluateMetadata() adds stub to return 3 metadata files // We are passing lastNMetadataFilesToKeep=5 here so that none of the metadata files will be deleted - remoteSegmentStoreDirectory.deleteStaleSegments(5); + remoteSegmentStoreDirectory.deleteStaleSegmentsAsync(5); + assertBusy(() -> assertThat(remoteSegmentStoreDirectory.canDeleteStaleCommits.get(), is(true))); verify(remoteMetadataDirectory, times(0)).openInput(any(String.class), eq(IOContext.DEFAULT)); } - public void testDeleteStaleCommitsActualDelete() throws IOException { + public void testDeleteStaleCommitsActualDelete() throws Exception { Map> metadataFilenameContentMapping = populateMetadata(); remoteSegmentStoreDirectory.init(); // popluateMetadata() adds stub to return 3 metadata files // We are passing lastNMetadataFilesToKeep=2 here so that oldest 1 metadata file will be deleted - remoteSegmentStoreDirectory.deleteStaleSegments(2); + remoteSegmentStoreDirectory.deleteStaleSegmentsAsync(2); - for (String metadata : metadataFilenameContentMapping.get("metadata__1__5__abc").values()) { + for (String metadata : metadataFilenameContentMapping.get(metadataFilename3).values()) { String uploadedFilename = metadata.split(RemoteSegmentStoreDirectory.UploadedSegmentMetadata.SEPARATOR)[1]; verify(remoteDataDirectory).deleteFile(uploadedFilename); } ; - verify(remoteMetadataDirectory).deleteFile("metadata__1__5__abc"); + assertBusy(() -> assertThat(remoteSegmentStoreDirectory.canDeleteStaleCommits.get(), is(true))); + verify(remoteMetadataDirectory).deleteFile(metadataFilename3); } - public void testDeleteStaleCommitsActualDeleteIOException() throws IOException { + public void testDeleteStaleCommitsActualDeleteIOException() throws Exception { Map> metadataFilenameContentMapping = populateMetadata(); remoteSegmentStoreDirectory.init(); - String segmentFileWithException = metadataFilenameContentMapping.get("metadata__1__5__abc") + String segmentFileWithException = metadataFilenameContentMapping.get(metadataFilename3) .values() .stream() .findAny() @@ -813,21 +816,21 @@ public void testDeleteStaleCommitsActualDeleteIOException() throws IOException { doThrow(new IOException("Error")).when(remoteDataDirectory).deleteFile(segmentFileWithException); // popluateMetadata() adds stub to return 3 metadata files // We are passing lastNMetadataFilesToKeep=2 here so that oldest 1 metadata file will be deleted - remoteSegmentStoreDirectory.deleteStaleSegments(2); + remoteSegmentStoreDirectory.deleteStaleSegmentsAsync(2); - for (String metadata : metadataFilenameContentMapping.get("metadata__1__5__abc").values()) { + for (String metadata : metadataFilenameContentMapping.get(metadataFilename3).values()) { String uploadedFilename = metadata.split(RemoteSegmentStoreDirectory.UploadedSegmentMetadata.SEPARATOR)[1]; verify(remoteDataDirectory).deleteFile(uploadedFilename); } - ; - verify(remoteMetadataDirectory, times(0)).deleteFile("metadata__1__5__abc"); + assertBusy(() -> assertThat(remoteSegmentStoreDirectory.canDeleteStaleCommits.get(), is(true))); + verify(remoteMetadataDirectory, times(0)).deleteFile(metadataFilename3); } - public void testDeleteStaleCommitsActualDeleteNoSuchFileException() throws IOException { + public void testDeleteStaleCommitsActualDeleteNoSuchFileException() throws Exception { Map> metadataFilenameContentMapping = populateMetadata(); remoteSegmentStoreDirectory.init(); - String segmentFileWithException = metadataFilenameContentMapping.get("metadata__1__5__abc") + String segmentFileWithException = metadataFilenameContentMapping.get(metadataFilename) .values() .stream() .findAny() @@ -836,14 +839,14 @@ public void testDeleteStaleCommitsActualDeleteNoSuchFileException() throws IOExc doThrow(new NoSuchFileException(segmentFileWithException)).when(remoteDataDirectory).deleteFile(segmentFileWithException); // popluateMetadata() adds stub to return 3 metadata files // We are passing lastNMetadataFilesToKeep=2 here so that oldest 1 metadata file will be deleted - remoteSegmentStoreDirectory.deleteStaleSegments(2); + remoteSegmentStoreDirectory.deleteStaleSegmentsAsync(2); - for (String metadata : metadataFilenameContentMapping.get("metadata__1__5__abc").values()) { + for (String metadata : metadataFilenameContentMapping.get(metadataFilename3).values()) { String uploadedFilename = metadata.split(RemoteSegmentStoreDirectory.UploadedSegmentMetadata.SEPARATOR)[1]; verify(remoteDataDirectory).deleteFile(uploadedFilename); } - ; - verify(remoteMetadataDirectory).deleteFile("metadata__1__5__abc"); + assertBusy(() -> assertThat(remoteSegmentStoreDirectory.canDeleteStaleCommits.get(), is(true))); + verify(remoteMetadataDirectory).deleteFile(metadataFilename3); } public void testSegmentMetadataCurrentVersion() { @@ -858,6 +861,20 @@ public void testSegmentMetadataCurrentVersion() { assertEquals(RemoteSegmentMetadata.CURRENT_VERSION, 1); } + public void testMetadataFileNameOrder() { + String file1 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 21, 23, 1, 1); + String file2 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 38, 38, 1, 1); + String file3 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(18, 12, 26, 1, 1); + String file4 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 38, 32, 10, 1); + String file5 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 38, 32, 1, 1); + String file6 = RemoteSegmentStoreDirectory.MetadataFilenameUtils.getMetadataFilename(15, 38, 32, 5, 1); + + List actualList = new ArrayList<>(List.of(file1, file2, file3, file4, file5, file6)); + actualList.sort(String::compareTo); + + assertEquals(List.of(file3, file2, file4, file6, file5, file1), actualList); + } + private static class WrapperIndexOutput extends IndexOutput { public IndexOutput indexOutput; diff --git a/server/src/test/java/org/opensearch/index/store/lockmanager/FileLockInfoTests.java b/server/src/test/java/org/opensearch/index/store/lockmanager/FileLockInfoTests.java index 95af53cb6e5ec..f3a2f1859923e 100644 --- a/server/src/test/java/org/opensearch/index/store/lockmanager/FileLockInfoTests.java +++ b/server/src/test/java/org/opensearch/index/store/lockmanager/FileLockInfoTests.java @@ -10,7 +10,7 @@ import org.opensearch.test.OpenSearchTestCase; -import java.util.List; +import java.nio.file.NoSuchFileException; public class FileLockInfoTests extends OpenSearchTestCase { String testMetadata = "testMetadata"; @@ -41,16 +41,13 @@ public void testGetLockPrefixFailureCase() { assertThrows(IllegalArgumentException.class, fileLockInfo::getLockPrefix); } - public void testGetLocksForAcquirer() { + public void testGetLocksForAcquirer() throws NoSuchFileException { String[] locks = new String[] { FileLockInfo.LockFileUtils.generateLockName(testMetadata, testAcquirerId), FileLockInfo.LockFileUtils.generateLockName(testMetadata, "acquirerId2") }; FileLockInfo fileLockInfo = FileLockInfo.getLockInfoBuilder().withAcquirerId(testAcquirerId).build(); - assertEquals( - fileLockInfo.getLocksForAcquirer(locks), - List.of(FileLockInfo.LockFileUtils.generateLockName(testMetadata, testAcquirerId)) - ); + assertEquals(fileLockInfo.getLockForAcquirer(locks), FileLockInfo.LockFileUtils.generateLockName(testMetadata, testAcquirerId)); } } diff --git a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java index f5295bead19a4..edf5b6c84bc54 100644 --- a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java +++ b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java @@ -60,6 +60,7 @@ import org.opensearch.index.snapshots.IndexShardSnapshotStatus; import org.opensearch.index.snapshots.blobstore.RemoteStoreShardShallowCopySnapshot; import org.opensearch.index.store.Store; +import org.opensearch.index.store.lockmanager.RemoteStoreLockManagerFactory; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.repositories.blobstore.MeteredBlobStoreRepository; @@ -378,6 +379,18 @@ public void cloneShardSnapshot( } + @Override + public void cloneRemoteStoreIndexShardSnapshot( + SnapshotId source, + SnapshotId target, + RepositoryShardId shardId, + String shardGeneration, + RemoteStoreLockManagerFactory remoteStoreLockManagerFactory, + ActionListener listener + ) { + + } + @Override public Lifecycle.State lifecycleState() { return null; diff --git a/server/src/test/java/org/opensearch/rest/NamedRouteTests.java b/server/src/test/java/org/opensearch/rest/NamedRouteTests.java index d489321ea5dc6..cf3e2b5b858bf 100644 --- a/server/src/test/java/org/opensearch/rest/NamedRouteTests.java +++ b/server/src/test/java/org/opensearch/rest/NamedRouteTests.java @@ -11,22 +11,17 @@ import org.opensearch.OpenSearchException; import org.opensearch.test.OpenSearchTestCase; +import java.util.Set; +import java.util.function.Function; + import static org.opensearch.rest.NamedRoute.MAX_LENGTH_OF_ACTION_NAME; +import static org.opensearch.rest.RestRequest.Method.GET; public class NamedRouteTests extends OpenSearchTestCase { - public void testNamedRouteWithNullName() { - try { - NamedRoute r = new NamedRoute(RestRequest.Method.GET, "foo/bar", null); - fail("Expected NamedRoute to throw exception on null name provided"); - } catch (OpenSearchException e) { - assertTrue(e.getMessage().contains("Invalid route name specified")); - } - } - public void testNamedRouteWithEmptyName() { try { - NamedRoute r = new NamedRoute(RestRequest.Method.GET, "foo/bar", ""); + NamedRoute r = new NamedRoute.Builder().method(GET).path("foo/bar").uniqueName("").build(); fail("Expected NamedRoute to throw exception on empty name provided"); } catch (OpenSearchException e) { assertTrue(e.getMessage().contains("Invalid route name specified")); @@ -35,7 +30,7 @@ public void testNamedRouteWithEmptyName() { public void testNamedRouteWithNameContainingSpace() { try { - NamedRoute r = new NamedRoute(RestRequest.Method.GET, "foo/bar", "foo bar"); + NamedRoute r = new NamedRoute.Builder().method(GET).path("foo/bar").uniqueName("foo bar").build(); fail("Expected NamedRoute to throw exception on name containing space name provided"); } catch (OpenSearchException e) { assertTrue(e.getMessage().contains("Invalid route name specified")); @@ -44,7 +39,7 @@ public void testNamedRouteWithNameContainingSpace() { public void testNamedRouteWithNameContainingInvalidCharacters() { try { - NamedRoute r = new NamedRoute(RestRequest.Method.GET, "foo/bar", "foo@bar!"); + NamedRoute r = new NamedRoute.Builder().method(GET).path("foo/bar").uniqueName("foo@bar!").build(); fail("Expected NamedRoute to throw exception on name containing invalid characters name provided"); } catch (OpenSearchException e) { assertTrue(e.getMessage().contains("Invalid route name specified")); @@ -54,7 +49,7 @@ public void testNamedRouteWithNameContainingInvalidCharacters() { public void testNamedRouteWithNameOverMaximumLength() { try { String repeated = new String(new char[MAX_LENGTH_OF_ACTION_NAME + 1]).replace("\0", "x"); - NamedRoute r = new NamedRoute(RestRequest.Method.GET, "foo/bar", repeated); + NamedRoute r = new NamedRoute.Builder().method(GET).path("foo/bar").uniqueName(repeated).build(); fail("Expected NamedRoute to throw exception on name over maximum length supplied"); } catch (OpenSearchException e) { assertTrue(e.getMessage().contains("Invalid route name specified")); @@ -63,7 +58,7 @@ public void testNamedRouteWithNameOverMaximumLength() { public void testNamedRouteWithValidActionName() { try { - NamedRoute r = new NamedRoute(RestRequest.Method.GET, "foo/bar", "foo:bar"); + NamedRoute r = new NamedRoute.Builder().method(GET).path("foo/bar").uniqueName("foo:bar").build(); } catch (OpenSearchException e) { fail("Did not expect NamedRoute to throw exception on valid action name"); } @@ -71,7 +66,7 @@ public void testNamedRouteWithValidActionName() { public void testNamedRouteWithValidActionNameWithForwardSlash() { try { - NamedRoute r = new NamedRoute(RestRequest.Method.GET, "foo/bar", "foo:bar/baz"); + NamedRoute r = new NamedRoute.Builder().method(GET).path("foo/bar").uniqueName("foo:bar:baz").build(); } catch (OpenSearchException e) { fail("Did not expect NamedRoute to throw exception on valid action name"); } @@ -79,7 +74,7 @@ public void testNamedRouteWithValidActionNameWithForwardSlash() { public void testNamedRouteWithValidActionNameWithWildcard() { try { - NamedRoute r = new NamedRoute(RestRequest.Method.GET, "foo/bar", "foo:bar/*"); + NamedRoute r = new NamedRoute.Builder().method(GET).path("foo/bar").uniqueName("foo:bar/*").build(); } catch (OpenSearchException e) { fail("Did not expect NamedRoute to throw exception on valid action name"); } @@ -87,9 +82,82 @@ public void testNamedRouteWithValidActionNameWithWildcard() { public void testNamedRouteWithValidActionNameWithUnderscore() { try { - NamedRoute r = new NamedRoute(RestRequest.Method.GET, "foo/bar", "foo:bar_baz"); + NamedRoute r = new NamedRoute.Builder().method(GET).path("foo/bar").uniqueName("foo:bar_baz").build(); } catch (OpenSearchException e) { fail("Did not expect NamedRoute to throw exception on valid action name"); } } + + public void testNamedRouteWithNullLegacyActionNames() { + try { + NamedRoute r = new NamedRoute.Builder().method(GET).path("foo/bar").uniqueName("foo:bar").legacyActionNames(null).build(); + assertTrue(r.actionNames().isEmpty()); + } catch (OpenSearchException e) { + fail("Did not expect NamedRoute to pass with an invalid legacy action name"); + } + } + + public void testNamedRouteWithInvalidLegacyActionNames() { + try { + NamedRoute r = new NamedRoute.Builder().method(GET) + .path("foo/bar") + .uniqueName("foo:bar") + .legacyActionNames(Set.of("foo:bar-legacy")) + .build(); + fail("Did not expect NamedRoute to pass with an invalid legacy action name"); + } catch (OpenSearchException e) { + assertTrue(e.getMessage().contains("Invalid action name [foo:bar-legacy]. It must start with one of:")); + } + } + + public void testNamedRouteWithHandler() { + Function fooHandler = restRequest -> null; + try { + NamedRoute r = new NamedRoute.Builder().method(GET).path("foo/bar").uniqueName("foo:bar_baz").handler(fooHandler).build(); + assertEquals(r.handler(), fooHandler); + } catch (OpenSearchException e) { + fail("Did not expect NamedRoute to throw exception"); + } + } + + public void testNamedRouteNullChecks() { + try { + NamedRoute r = new NamedRoute.Builder().method(null).path("foo/bar").uniqueName("foo:bar_baz").build(); + fail("Expected NamedRoute to throw exception as method should not be null"); + } catch (NullPointerException e) { + assertEquals("REST method must not be null.", e.getMessage()); + } + + try { + NamedRoute r = new NamedRoute.Builder().method(GET).path(null).uniqueName("foo:bar_baz").build(); + fail("Expected NamedRoute to throw exception as path should not be null"); + } catch (NullPointerException e) { + assertEquals("REST path must not be null.", e.getMessage()); + } + + try { + NamedRoute r = new NamedRoute.Builder().method(GET).path("foo/bar").uniqueName(null).build(); + fail("Expected NamedRoute to throw exception as route name should not be null"); + } catch (NullPointerException e) { + assertEquals("REST route name must not be null.", e.getMessage()); + } + + try { + NamedRoute r = new NamedRoute.Builder().method(GET).path("foo/bar").uniqueName("foo:bar_baz").handler(null).build(); + fail("Expected NamedRoute to throw exception as handler should not be null"); + } catch (NullPointerException e) { + assertEquals("Route handler must not be null.", e.getMessage()); + } + } + + public void testNamedRouteEmptyBuild() { + try { + NamedRoute r = new NamedRoute.Builder().build(); + fail("Expected NamedRoute to throw exception as fields should not be null"); + } catch (IllegalStateException e) { + assertEquals("REST method, path and uniqueName are required.", e.getMessage()); + } + + } + } diff --git a/server/src/test/java/org/opensearch/search/SearchServiceTests.java b/server/src/test/java/org/opensearch/search/SearchServiceTests.java index 8f8789a3a0323..74ef289c4b75f 100644 --- a/server/src/test/java/org/opensearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/opensearch/search/SearchServiceTests.java @@ -1748,4 +1748,21 @@ public void testCanMatchSearchAfterDescEqualMin() throws IOException { primarySort.order(SortOrder.DESC); assertEquals(SearchService.canMatchSearchAfter(searchAfter, minMax, primarySort), true); } + + /** + * Test canMatchSearchAfter with missing value, even if min/max is out of range + * Min = 0L, Max = 9L, search_after = -1L + * Expected result is canMatch = true + */ + public void testCanMatchSearchAfterWithMissing() throws IOException { + FieldDoc searchAfter = new FieldDoc(0, 0, new Long[] { -1L }); + MinAndMax minMax = new MinAndMax(0L, 9L); + FieldSortBuilder primarySort = new FieldSortBuilder("test"); + primarySort.order(SortOrder.DESC); + // Should be false without missing values + assertEquals(SearchService.canMatchSearchAfter(searchAfter, minMax, primarySort), false); + primarySort.missing("_last"); + // Should be true with missing values + assertEquals(SearchService.canMatchSearchAfter(searchAfter, minMax, primarySort), true); + } } diff --git a/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineServiceTests.java b/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineServiceTests.java index 2ac0b2136ddd9..84f39e4bdab42 100644 --- a/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineServiceTests.java +++ b/server/src/test/java/org/opensearch/search/pipeline/SearchPipelineServiceTests.java @@ -77,19 +77,17 @@ public class SearchPipelineServiceTests extends OpenSearchTestCase { private static final SearchPipelinePlugin DUMMY_PLUGIN = new SearchPipelinePlugin() { @Override - public Map> getRequestProcessors(Processor.Parameters parameters) { - return Map.of("foo", (factories, tag, description, config) -> null); + public Map> getRequestProcessors(Parameters parameters) { + return Map.of("foo", (factories, tag, description, config, ctx) -> null); } - public Map> getResponseProcessors(Processor.Parameters parameters) { - return Map.of("bar", (factories, tag, description, config) -> null); + public Map> getResponseProcessors(Parameters parameters) { + return Map.of("bar", (factories, tag, description, config, ctx) -> null); } @Override - public Map> getSearchPhaseResultsProcessors( - Processor.Parameters parameters - ) { - return Map.of("zoe", (factories, tag, description, config) -> null); + public Map> getSearchPhaseResultsProcessors(Parameters parameters) { + return Map.of("zoe", (factories, tag, description, config, ctx) -> null); } }; @@ -303,7 +301,7 @@ public SearchPhaseName getAfterPhase() { private SearchPipelineService createWithProcessors() { Map> requestProcessors = new HashMap<>(); - requestProcessors.put("scale_request_size", (processorFactories, tag, description, config) -> { + requestProcessors.put("scale_request_size", (processorFactories, tag, description, config, ctx) -> { float scale = ((Number) config.remove("scale")).floatValue(); return new FakeRequestProcessor( "scale_request_size", @@ -313,13 +311,13 @@ private SearchPipelineService createWithProcessors() { ); }); Map> responseProcessors = new HashMap<>(); - responseProcessors.put("fixed_score", (processorFactories, tag, description, config) -> { + responseProcessors.put("fixed_score", (processorFactories, tag, description, config, ctx) -> { float score = ((Number) config.remove("score")).floatValue(); return new FakeResponseProcessor("fixed_score", tag, description, rsp -> rsp.getHits().forEach(h -> h.score(score))); }); Map> searchPhaseProcessors = new HashMap<>(); - searchPhaseProcessors.put("max_score", (processorFactories, tag, description, config) -> { + searchPhaseProcessors.put("max_score", (processorFactories, tag, description, config, context) -> { final float finalScore = config.containsKey("score") ? ((Number) config.remove("score")).floatValue() : 100f; final Consumer querySearchResultConsumer = (result) -> result.queryResult().topDocs().maxScore = finalScore; return new FakeSearchPhaseResultsProcessor("max_score", tag, description, querySearchResultConsumer); @@ -354,19 +352,17 @@ private SearchPipelineService createWithProcessors( this.writableRegistry(), Collections.singletonList(new SearchPipelinePlugin() { @Override - public Map> getRequestProcessors(Processor.Parameters parameters) { + public Map> getRequestProcessors(Parameters parameters) { return requestProcessors; } @Override - public Map> getResponseProcessors(Processor.Parameters parameters) { + public Map> getResponseProcessors(Parameters parameters) { return responseProcessors; } @Override - public Map> getSearchPhaseResultsProcessors( - Processor.Parameters parameters - ) { + public Map> getSearchPhaseResultsProcessors(Parameters parameters) { return phaseProcessors; } @@ -897,10 +893,9 @@ public void testInfo() { } public void testExceptionOnPipelineCreation() { - Map> badFactory = Map.of( - "bad_factory", - (pf, t, f, c) -> { throw new RuntimeException(); } - ); + Map> badFactory = Map.of("bad_factory", (pf, t, f, c, ctx) -> { + throw new RuntimeException(); + }); SearchPipelineService searchPipelineService = createWithProcessors(badFactory, Collections.emptyMap(), Collections.emptyMap()); Map pipelineSourceMap = new HashMap<>(); @@ -920,7 +915,7 @@ public void testExceptionOnRequestProcessing() { }); Map> throwingRequestProcessorFactory = Map.of( "throwing_request", - (pf, t, f, c) -> throwingRequestProcessor + (pf, t, f, c, ctx) -> throwingRequestProcessor ); SearchPipelineService searchPipelineService = createWithProcessors( @@ -945,7 +940,7 @@ public void testExceptionOnResponseProcessing() throws Exception { }); Map> throwingResponseProcessorFactory = Map.of( "throwing_response", - (pf, t, f, c) -> throwingResponseProcessor + (pf, t, f, c, ctx) -> throwingResponseProcessor ); SearchPipelineService searchPipelineService = createWithProcessors( @@ -955,7 +950,7 @@ public void testExceptionOnResponseProcessing() throws Exception { ); Map pipelineSourceMap = new HashMap<>(); - pipelineSourceMap.put(Pipeline.RESPONSE_PROCESSORS_KEY, List.of(Map.of("throwing_response", Collections.emptyMap()))); + pipelineSourceMap.put(Pipeline.RESPONSE_PROCESSORS_KEY, List.of(Map.of("throwing_response", new HashMap<>()))); SearchSourceBuilder sourceBuilder = SearchSourceBuilder.searchSource().size(100).searchPipelineSource(pipelineSourceMap); SearchRequest searchRequest = new SearchRequest().source(sourceBuilder); @@ -973,18 +968,18 @@ public void testStats() throws Exception { }); Map> requestProcessors = Map.of( "successful_request", - (pf, t, f, c) -> new FakeRequestProcessor("successful_request", "2", null, r -> {}), + (pf, t, f, c, ctx) -> new FakeRequestProcessor("successful_request", "2", null, r -> {}), "throwing_request", - (pf, t, f, c) -> throwingRequestProcessor + (pf, t, f, c, ctx) -> throwingRequestProcessor ); SearchResponseProcessor throwingResponseProcessor = new FakeResponseProcessor("throwing_response", "3", null, r -> { throw new RuntimeException(); }); Map> responseProcessors = Map.of( "successful_response", - (pf, t, f, c) -> new FakeResponseProcessor("successful_response", "4", null, r -> {}), + (pf, t, f, c, ctx) -> new FakeResponseProcessor("successful_response", "4", null, r -> {}), "throwing_response", - (pf, t, f, c) -> throwingResponseProcessor + (pf, t, f, c, ctx) -> throwingResponseProcessor ); SearchPipelineService searchPipelineService = createWithProcessors(requestProcessors, responseProcessors, Collections.emptyMap()); @@ -1088,4 +1083,64 @@ private static void assertPipelineStats(OperationStats stats, long count, long f assertEquals(stats.getCount(), count); assertEquals(stats.getFailedCount(), failed); } + + public void testAdHocRejectingProcessor() { + String processorType = "ad_hoc_rejecting"; + Map> requestProcessorFactories = Map.of(processorType, (pf, t, d, c, ctx) -> { + if (ctx.getPipelineSource() == Processor.PipelineSource.SEARCH_REQUEST) { + throw new IllegalArgumentException(processorType + " cannot be created as part of a pipeline defined in a search request"); + } + return new FakeRequestProcessor(processorType, t, d, r -> {}); + }); + + SearchPipelineService searchPipelineService = createWithProcessors( + requestProcessorFactories, + Collections.emptyMap(), + Collections.emptyMap() + ); + + String id = "_id"; + SearchPipelineService.PipelineHolder pipeline = searchPipelineService.getPipelines().get(id); + assertNull(pipeline); + ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); + PutSearchPipelineRequest putRequest = new PutSearchPipelineRequest( + id, + new BytesArray("{\"request_processors\":[" + " { \"" + processorType + "\": {}}" + "]}"), + XContentType.JSON + ); + ClusterState previousClusterState = clusterState; + clusterState = SearchPipelineService.innerPut(putRequest, clusterState); + // The following line successfully creates the pipeline: + searchPipelineService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); + + Map pipelineSourceMap = new HashMap<>(); + pipelineSourceMap.put(Pipeline.REQUEST_PROCESSORS_KEY, List.of(Map.of(processorType, Collections.emptyMap()))); + + SearchSourceBuilder sourceBuilder = SearchSourceBuilder.searchSource().searchPipelineSource(pipelineSourceMap); + SearchRequest searchRequest = new SearchRequest().source(sourceBuilder); + expectThrows(SearchPipelineProcessingException.class, () -> searchPipelineService.resolvePipeline(searchRequest)); + } + + public void testExtraParameterInProcessorConfig() { + SearchPipelineService searchPipelineService = createWithProcessors(); + + Map pipelineSourceMap = new HashMap<>(); + Map processorConfig = new HashMap<>( + Map.of("score", 1.0f, "tag", "my_tag", "comment", "I just like to add extra parameters so that I feel like I'm being heard.") + ); + pipelineSourceMap.put(Pipeline.RESPONSE_PROCESSORS_KEY, List.of(Map.of("fixed_score", processorConfig))); + SearchSourceBuilder sourceBuilder = SearchSourceBuilder.searchSource().searchPipelineSource(pipelineSourceMap); + SearchRequest searchRequest = new SearchRequest().source(sourceBuilder); + try { + searchPipelineService.resolvePipeline(searchRequest); + fail("Exception should have been thrown"); + } catch (SearchPipelineProcessingException e) { + assertTrue( + e.getMessage() + .contains("processor [fixed_score:my_tag] doesn't support one or more provided configuration parameters: [comment]") + ); + } catch (Exception e) { + fail("Wrong exception type: " + e.getClass()); + } + } } diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 0bb2b604e8f1a..88899a1b282af 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -1835,7 +1835,7 @@ public void onFailure(final Exception e) { emptyMap(), null, emptyMap(), - new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService), + new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService, threadPool), repositoriesServiceReference::get, fileCacheCleaner ); diff --git a/server/src/test/java/org/opensearch/telemetry/tracing/TracerFactoryTests.java b/server/src/test/java/org/opensearch/telemetry/tracing/TracerFactoryTests.java index 7968c6c43afb4..df9cdd6669d23 100644 --- a/server/src/test/java/org/opensearch/telemetry/tracing/TracerFactoryTests.java +++ b/server/src/test/java/org/opensearch/telemetry/tracing/TracerFactoryTests.java @@ -45,7 +45,7 @@ public void testGetTracerWithTracingDisabledReturnsNoopTracer() { Tracer tracer = tracerFactory.getTracer(); assertTrue(tracer instanceof NoopTracer); - assertTrue(tracer.startSpan("foo") == Scope.NO_OP); + assertTrue(tracer.startSpan("foo") == SpanScope.NO_OP); } public void testGetTracerWithTracingEnabledReturnsDefaultTracer() { diff --git a/settings.gradle b/settings.gradle index bf899d04c1e08..b7d47cd9b745e 100644 --- a/settings.gradle +++ b/settings.gradle @@ -10,7 +10,7 @@ */ plugins { - id "com.gradle.enterprise" version "3.11.1" + id "com.gradle.enterprise" version "3.13.3" } buildCache { @@ -87,7 +87,8 @@ List projects = [ 'test:fixtures:minio-fixture', 'test:fixtures:old-elasticsearch', 'test:fixtures:s3-fixture', - 'test:logger-usage' + 'test:logger-usage', + 'test:telemetry' ] /** diff --git a/test/framework/build.gradle b/test/framework/build.gradle index 255f554db7a79..2532fdf1938fd 100644 --- a/test/framework/build.gradle +++ b/test/framework/build.gradle @@ -38,6 +38,7 @@ dependencies { api project(':libs:opensearch-nio') api project(":server") api project(":libs:opensearch-cli") + api project(":test:telemetry") api "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" api "junit:junit:${versions.junit}" api "org.hamcrest:hamcrest:${versions.hamcrest}" diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index ea9e9342673db..7f3819563dcbd 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -666,7 +666,7 @@ protected RemoteSegmentStoreDirectory createRemoteSegmentStoreDirectory(ShardId RemoteStoreLockManager remoteStoreLockManager = new RemoteStoreMetadataLockManager( new RemoteBufferedOutputDirectory(getBlobContainer(remoteShardPath.resolveIndex())) ); - return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, remoteStoreLockManager); + return new RemoteSegmentStoreDirectory(dataDirectory, metadataDirectory, remoteStoreLockManager, threadPool); } private RemoteDirectory newRemoteDirectory(Path f) throws IOException { diff --git a/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java b/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java index 2a85fffa8699a..67f3f7fc1f50c 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java @@ -44,6 +44,7 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.index.snapshots.IndexShardSnapshotStatus; import org.opensearch.index.store.Store; +import org.opensearch.index.store.lockmanager.RemoteStoreLockManagerFactory; import org.opensearch.repositories.IndexId; import org.opensearch.repositories.IndexMetaDataGenerations; import org.opensearch.repositories.Repository; @@ -205,4 +206,16 @@ public void cloneShardSnapshot( ) { throw new UnsupportedOperationException("Unsupported for restore-only repository"); } + + @Override + public void cloneRemoteStoreIndexShardSnapshot( + SnapshotId source, + SnapshotId target, + RepositoryShardId shardId, + String shardGeneration, + RemoteStoreLockManagerFactory remoteStoreLockManagerFactory, + ActionListener listener + ) { + throw new UnsupportedOperationException("Unsupported for restore-only repository"); + } } diff --git a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java index ddf9f3e96b9b4..83051c7fed4e4 100644 --- a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -51,6 +51,8 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Strings; import org.opensearch.common.UUIDs; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.compress.CompressorType; import org.opensearch.common.settings.Settings; @@ -61,6 +63,9 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.index.IndexModule; +import org.opensearch.index.store.RemoteBufferedOutputDirectory; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.node.NodeClosedException; import org.opensearch.plugins.Plugin; import org.opensearch.repositories.RepositoriesService; @@ -511,6 +516,26 @@ protected void indexRandomDocs(String index, int numdocs) throws InterruptedExce assertDocCount(index, numdocs); } + protected Settings getRemoteStoreBackedIndexSettings(String remoteStoreRepo) { + return Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "1") + .put("index.refresh_interval", "300s") + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") + .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.FS.getSettingsKey()) + .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), false) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) + .put(IndexMetadata.SETTING_REMOTE_STORE_REPOSITORY, remoteStoreRepo) + .build(); + } + + protected Settings.Builder snapshotRepoSettingsForShallowCopy(Path path) { + final Settings.Builder settings = Settings.builder(); + settings.put("location", path); + settings.put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), Boolean.TRUE); + return settings; + } + protected long getCountForIndex(String indexName) { return client().search( new SearchRequest(new SearchRequest(indexName).source(new SearchSourceBuilder().size(0).trackTotalHits(true))) @@ -521,6 +546,21 @@ protected void assertDocCount(String index, long count) { assertEquals(getCountForIndex(index), count); } + protected String[] getLockFilesInRemoteStore(String remoteStoreIndex, String remoteStoreRepositoryName) throws IOException { + String indexUUID = client().admin() + .indices() + .prepareGetSettings(remoteStoreIndex) + .get() + .getSetting(remoteStoreIndex, IndexMetadata.SETTING_INDEX_UUID); + final RepositoriesService repositoriesService = internalCluster().getCurrentClusterManagerNodeInstance(RepositoriesService.class); + final BlobStoreRepository remoteStoreRepository = (BlobStoreRepository) repositoriesService.repository(remoteStoreRepositoryName); + BlobPath shardLevelBlobPath = remoteStoreRepository.basePath().add(indexUUID).add("0").add("segments").add("lock_files"); + BlobContainer blobContainer = remoteStoreRepository.blobStore().blobContainer(shardLevelBlobPath); + try (RemoteBufferedOutputDirectory lockDirectory = new RemoteBufferedOutputDirectory(blobContainer)) { + return lockDirectory.listAll(); + } + } + /** * Adds a snapshot in state {@link SnapshotState#FAILED} to the given repository. * diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index caa5b90016740..fec45219ace81 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -109,6 +109,7 @@ import org.opensearch.common.unit.ByteSizeUnit; import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.xcontent.XContentType; @@ -152,6 +153,7 @@ import org.opensearch.test.disruption.ServiceDisruptionScheme; import org.opensearch.test.store.MockFSIndexStore; import org.opensearch.test.transport.MockTransportService; +import org.opensearch.test.telemetry.MockTelemetryPlugin; import org.opensearch.transport.TransportInterceptor; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestHandler; @@ -776,6 +778,7 @@ protected Settings featureFlagSettings() { for (Setting builtInFlag : FeatureFlagSettings.BUILT_IN_FEATURE_FLAGS) { featureSettings.put(builtInFlag.getKey(), builtInFlag.getDefaultRaw(Settings.EMPTY)); } + featureSettings.put(FeatureFlags.TELEMETRY_SETTING.getKey(), true); return featureSettings.build(); } @@ -2101,6 +2104,7 @@ protected Collection> getMockPlugins() { if (addMockGeoShapeFieldMapper()) { mocks.add(TestGeoShapeFieldMapperPlugin.class); } + mocks.add(MockTelemetryPlugin.class); return Collections.unmodifiableList(mocks); } diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java index bf797ef6b310b..91c48f1679f9a 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java @@ -48,6 +48,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.io.IOUtils; import org.opensearch.core.common.Strings; @@ -66,6 +67,8 @@ import org.opensearch.plugins.Plugin; import org.opensearch.script.MockScriptService; import org.opensearch.search.internal.SearchContext; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.test.telemetry.MockTelemetryPlugin; import org.opensearch.transport.TransportSettings; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -242,6 +245,8 @@ private Node newNode() { .put(HierarchyCircuitBreakerService.USE_REAL_MEMORY_USAGE_SETTING.getKey(), false) .putList(DISCOVERY_SEED_HOSTS_SETTING.getKey()) // empty list disables a port scan for other nodes .putList(INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey(), nodeName) + .put(FeatureFlags.TELEMETRY_SETTING.getKey(), true) + .put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), true) .put(nodeSettings()) // allow test cases to provide their own settings or override these .build(); @@ -254,6 +259,7 @@ private Node newNode() { plugins.add(MockHttpTransport.TestPlugin.class); } plugins.add(MockScriptService.TestPlugin.class); + plugins.add(MockTelemetryPlugin.class); Node node = new MockNode(settings, plugins, forbidPrivateIndexSettings()); try { node.start(); diff --git a/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetry.java b/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetry.java new file mode 100644 index 0000000000000..c02ab1d737303 --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetry.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test.telemetry; + +import org.opensearch.telemetry.Telemetry; +import org.opensearch.telemetry.TelemetrySettings; +import org.opensearch.telemetry.metrics.MetricsTelemetry; +import org.opensearch.test.telemetry.tracing.MockTracingTelemetry; +import org.opensearch.telemetry.tracing.TracingTelemetry; + +/** + * Mock {@link Telemetry} implementation for testing. + */ +public class MockTelemetry implements Telemetry { + + private final TelemetrySettings settings; + + /** + * Constructor with settings. + * @param settings telemetry settings. + */ + public MockTelemetry(TelemetrySettings settings) { + this.settings = settings; + } + + @Override + public TracingTelemetry getTracingTelemetry() { + return new MockTracingTelemetry(); + } + + @Override + public MetricsTelemetry getMetricsTelemetry() { + return new MetricsTelemetry() { + }; + } +} diff --git a/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetryPlugin.java b/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetryPlugin.java new file mode 100644 index 0000000000000..41cc5c1e77a34 --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/test/telemetry/MockTelemetryPlugin.java @@ -0,0 +1,39 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test.telemetry; + +import java.util.Optional; +import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.TelemetryPlugin; +import org.opensearch.telemetry.Telemetry; +import org.opensearch.telemetry.TelemetrySettings; + +/** + * Mock {@link TelemetryPlugin} implementation for testing. + */ +public class MockTelemetryPlugin extends Plugin implements TelemetryPlugin { + private static final String MOCK_TRACER_NAME = "mock"; + + /** + * Base constructor. + */ + public MockTelemetryPlugin() { + + } + + @Override + public Optional getTelemetry(TelemetrySettings settings) { + return Optional.of(new MockTelemetry(settings)); + } + + @Override + public String getName() { + return MOCK_TRACER_NAME; + } +} diff --git a/test/telemetry/build.gradle b/test/telemetry/build.gradle new file mode 100644 index 0000000000000..fbabe43aa5e5a --- /dev/null +++ b/test/telemetry/build.gradle @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +apply plugin: 'opensearch.build' +apply plugin: 'opensearch.publish' + +dependencies { + api project(":libs:opensearch-common") + api project(":libs:opensearch-telemetry") +} + +tasks.named('forbiddenApisMain').configure { + //package does not depend on core, so only jdk signatures should be checked + replaceSignatureFiles 'jdk-signatures' +} + +test.enabled = false diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockSpan.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockSpan.java new file mode 100644 index 0000000000000..876145f6bf653 --- /dev/null +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockSpan.java @@ -0,0 +1,163 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test.telemetry.tracing; + +import java.util.HashMap; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.ThreadLocalRandom; +import java.util.function.Supplier; +import org.opensearch.telemetry.tracing.AbstractSpan; +import org.opensearch.telemetry.tracing.Span; + +/** + * MockSpan for testing and strict check validations. Not to be used for production cases. + */ +public class MockSpan extends AbstractSpan { + private final SpanProcessor spanProcessor; + private final Map metadata; + private final String traceId; + private final String spanId; + private boolean hasEnded; + private final Long startTime; + private Long endTime; + + private final Object lock = new Object(); + + private static final Supplier randomSupplier = ThreadLocalRandom::current; + + /** + * Base Constructor. + * @param spanName span name + * @param parentSpan parent span + * @param spanProcessor span processor + */ + public MockSpan(String spanName, Span parentSpan, SpanProcessor spanProcessor) { + this( + spanName, + parentSpan, + parentSpan != null ? parentSpan.getTraceId() : IdGenerator.generateTraceId(), + IdGenerator.generateSpanId(), + spanProcessor + ); + } + + /** + * Constructor with traceId and SpanIds + * @param spanName Span Name + * @param parentSpan Parent Span + * @param traceId Trace ID + * @param spanId Span ID + * @param spanProcessor Span Processor + */ + public MockSpan(String spanName, Span parentSpan, String traceId, String spanId, SpanProcessor spanProcessor) { + super(spanName, parentSpan); + this.spanProcessor = spanProcessor; + this.metadata = new HashMap<>(); + this.traceId = traceId; + this.spanId = spanId; + this.startTime = System.nanoTime(); + } + + @Override + public void endSpan() { + synchronized (lock) { + if (hasEnded) { + return; + } + endTime = System.nanoTime(); + hasEnded = true; + } + spanProcessor.onEnd(this); + } + + @Override + public void addAttribute(String key, String value) { + putMetadata(key, value); + } + + @Override + public void addAttribute(String key, Long value) { + putMetadata(key, value); + } + + @Override + public void addAttribute(String key, Double value) { + putMetadata(key, value); + } + + @Override + public void addAttribute(String key, Boolean value) { + putMetadata(key, value); + } + + @Override + public void addEvent(String event) { + putMetadata(event, null); + } + + private void putMetadata(String key, Object value) { + metadata.put(key, value); + } + + @Override + public String getTraceId() { + return traceId; + } + + @Override + public String getSpanId() { + return spanId; + } + + /** + * Returns whether the span is ended or not. + * @return span end status. + */ + public boolean hasEnded() { + synchronized (lock) { + return hasEnded; + } + } + + /** + * Returns the start time of the span. + * @return start time of the span. + */ + public Long getStartTime() { + return startTime; + } + + /** + * Returns the start time of the span. + * @return end time of the span. + */ + public Long getEndTime() { + return endTime; + } + + public void setError(Exception exception) { + putMetadata("ERROR", exception.getMessage()); + } + + private static class IdGenerator { + private static String generateSpanId() { + long id = randomSupplier.get().nextLong(); + return Long.toHexString(id); + } + + private static String generateTraceId() { + long idHi = randomSupplier.get().nextLong(); + long idLo = randomSupplier.get().nextLong(); + long result = idLo | (idHi << 32); + return Long.toHexString(result); + } + + } +} diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockTracingContextPropagator.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockTracingContextPropagator.java new file mode 100644 index 0000000000000..7e3f5a9031100 --- /dev/null +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockTracingContextPropagator.java @@ -0,0 +1,56 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test.telemetry.tracing; + +import java.util.Locale; +import java.util.Map; +import java.util.function.BiConsumer; +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.TracingContextPropagator; + +/** + * Mock {@link TracingContextPropagator} to persist the span for internode communication. + */ +public class MockTracingContextPropagator implements TracingContextPropagator { + + private static final String TRACE_PARENT = "traceparent"; + private static final String SEPARATOR = "~"; + private final SpanProcessor spanProcessor; + + /** + * Constructor + * @param spanProcessor span processor. + */ + public MockTracingContextPropagator(SpanProcessor spanProcessor) { + this.spanProcessor = spanProcessor; + } + + @Override + public Span extract(Map props) { + String value = props.get(TRACE_PARENT); + if (value != null) { + String[] values = value.split(SEPARATOR); + String traceId = values[0]; + String spanId = values[1]; + return new MockSpan(null, null, traceId, spanId, spanProcessor); + } else { + return null; + } + } + + @Override + public void inject(Span currentSpan, BiConsumer setter) { + if (currentSpan instanceof MockSpan) { + String traceId = currentSpan.getTraceId(); + String spanId = currentSpan.getSpanId(); + String traceParent = String.format(Locale.ROOT, "%s%s%s", traceId, SEPARATOR, spanId); + setter.accept(TRACE_PARENT, traceParent); + } + } +} diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockTracingTelemetry.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockTracingTelemetry.java new file mode 100644 index 0000000000000..531b4ce36c36a --- /dev/null +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/MockTracingTelemetry.java @@ -0,0 +1,46 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test.telemetry.tracing; + +import org.opensearch.telemetry.tracing.Span; +import org.opensearch.telemetry.tracing.TracingContextPropagator; +import org.opensearch.telemetry.tracing.TracingTelemetry; + +/** + * Mock {@link TracingTelemetry} implementation for testing. + */ +public class MockTracingTelemetry implements TracingTelemetry { + + private final SpanProcessor spanProcessor = new StrictCheckSpanProcessor(); + + /** + * Base constructor. + */ + public MockTracingTelemetry() { + + } + + @Override + public Span createSpan(String spanName, Span parentSpan) { + Span span = new MockSpan(spanName, parentSpan, spanProcessor); + spanProcessor.onStart(span); + return span; + } + + @Override + public TracingContextPropagator getContextPropagator() { + return new MockTracingContextPropagator(spanProcessor); + } + + @Override + public void close() { + ((StrictCheckSpanProcessor) spanProcessor).ensureAllSpansAreClosed(); + ((StrictCheckSpanProcessor) spanProcessor).clear(); + } +} diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/SpanProcessor.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/SpanProcessor.java new file mode 100644 index 0000000000000..cb9d0dbd428e4 --- /dev/null +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/SpanProcessor.java @@ -0,0 +1,28 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test.telemetry.tracing; + +import org.opensearch.telemetry.tracing.Span; + +/** + * Processes the span and can perform any action on the span start and end. + */ +public interface SpanProcessor { + /** + * Logic to be executed on span start. + * @param span span which is starting. + */ + void onStart(Span span); + + /** + * Logic to be executed on span end. + * @param span span which is ending. + */ + void onEnd(Span span); +} diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/StrictCheckSpanProcessor.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/StrictCheckSpanProcessor.java new file mode 100644 index 0000000000000..34d4d96809755 --- /dev/null +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/StrictCheckSpanProcessor.java @@ -0,0 +1,84 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test.telemetry.tracing; + +import java.util.Arrays; +import java.util.Locale; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import org.opensearch.telemetry.tracing.Span; + +/** + * Strict check span processor to validate the spans. + */ +public class StrictCheckSpanProcessor implements SpanProcessor { + private final Map spanMap = new ConcurrentHashMap<>(); + + /** + * Base constructor. + */ + public StrictCheckSpanProcessor() { + + } + + @Override + public void onStart(Span span) { + spanMap.put(span.getSpanId(), Thread.currentThread().getStackTrace()); + } + + @Override + public void onEnd(Span span) { + spanMap.remove(span.getSpanId()); + } + + /** + * Ensures that all the spans are closed. Throws exception message with stack trace of the method form + * where the span was created. We can enhance it to print all the failed spans in a single go based on + * the usability. + */ + public void ensureAllSpansAreClosed() { + if (!spanMap.isEmpty()) { + for (Map.Entry entry : spanMap.entrySet()) { + StackTraceElement[] filteredStackTrace = getFilteredStackTrace(entry.getValue()); + AssertionError error = new AssertionError( + String.format( + Locale.ROOT, + " Total [%d] spans are not ended properly. " + "Find below the stack trace for one of the un-ended span", + spanMap.size() + ) + ); + error.setStackTrace(filteredStackTrace); + spanMap.clear(); + throw error; + } + } + } + + /** + * Clears the state. + */ + public void clear() { + spanMap.clear(); + } + + private StackTraceElement[] getFilteredStackTrace(StackTraceElement[] stackTraceElements) { + int filteredElementsCount = 0; + while (filteredElementsCount < stackTraceElements.length) { + String className = stackTraceElements[filteredElementsCount].getClassName(); + if (className.startsWith("java.lang.Thread") + || className.startsWith("org.opensearch.telemetry") + || className.startsWith("org.opensearch.tracing")) { + filteredElementsCount++; + } else { + break; + } + } + return Arrays.copyOfRange(stackTraceElements, filteredElementsCount, stackTraceElements.length); + } +} diff --git a/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/package-info.java b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/package-info.java new file mode 100644 index 0000000000000..83e2b4035acbf --- /dev/null +++ b/test/telemetry/src/main/java/org/opensearch/test/telemetry/tracing/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Base opensearch package. */ +package org.opensearch.test.telemetry.tracing;