diff --git a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java index 3822455b83c3a..d648146a47208 100644 --- a/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java +++ b/core/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java @@ -703,6 +703,7 @@ public void testTrackScores() throws Exception { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/26738") public void testTopHitsInNestedSimple() throws Exception { SearchResponse searchResponse = client().prepareSearch("articles") .setQuery(matchQuery("title", "title")) diff --git a/docs/plugins/repository-azure.asciidoc b/docs/plugins/repository-azure.asciidoc index ea413719f2446..583217324edda 100644 --- a/docs/plugins/repository-azure.asciidoc +++ b/docs/plugins/repository-azure.asciidoc @@ -44,15 +44,18 @@ The initial backoff period is defined by Azure SDK as `30s`. Which means `30s` o before retrying after a first timeout or failure. The maximum backoff period is defined by Azure SDK as `90s`. +`endpoint_suffix` can be used to specify Azure endpoint suffix explicitly. Defaults to `core.windows.net`. + [source,yaml] ---- azure.client.default.timeout: 10s azure.client.default.max_retries: 7 +azure.client.default.endpoint_suffix: core.chinacloudapi.cn azure.client.secondary.timeout: 30s ---- In this example, timeout will be `10s` per try for `default` with `7` retries before failing -and `30s` per try for `secondary` with `3` retries. +and endpoint suffix will be `core.chinacloudapi.cn` and `30s` per try for `secondary` with `3` retries. [IMPORTANT] .Supported Azure Storage Account types diff --git a/docs/plugins/repository-gcs.asciidoc b/docs/plugins/repository-gcs.asciidoc index fa4e8ca39807f..90c6e8f0ba4eb 100644 --- a/docs/plugins/repository-gcs.asciidoc +++ b/docs/plugins/repository-gcs.asciidoc @@ -93,7 +93,7 @@ A service account file looks like this: // NOTCONSOLE This file must be stored in the {ref}/secure-settings.html[elasticsearch keystore], under a setting name -of the form `gcs.client.NAME.credentials_file`, where `NAME` is the name of the client congiguration. +of the form `gcs.client.NAME.credentials_file`, where `NAME` is the name of the client configuration. The default client name is `default`, but a different client name can be specified in repository settings using `client`. diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java index 3fd09962aedcb..c0126cb8df065 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepositoryPlugin.java @@ -60,6 +60,7 @@ public List> getSettings() { return Arrays.asList( AzureStorageSettings.ACCOUNT_SETTING, AzureStorageSettings.KEY_SETTING, + AzureStorageSettings.ENDPOINT_SUFFIX_SETTING, AzureStorageSettings.TIMEOUT_SETTING, AzureStorageSettings.PROXY_TYPE_SETTING, AzureStorageSettings.PROXY_HOST_SETTING, diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageServiceImpl.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageServiceImpl.java index 03a590867a7ed..a4b415351b76e 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageServiceImpl.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageServiceImpl.java @@ -79,14 +79,18 @@ public AzureStorageServiceImpl(Settings settings, Map Setting.intSetting(key, RetryPolicy.DEFAULT_CLIENT_RETRY_COUNT, Setting.Property.NodeScope)); + /** + * Azure endpoint suffix. Default to core.windows.net (CloudStorageAccount.DEFAULT_DNS). + */ + public static final Setting ENDPOINT_SUFFIX_SETTING = Setting.affixKeySetting(PREFIX, "endpoint_suffix", + key -> Setting.simpleString(key, Property.NodeScope)); /** Azure key */ public static final AffixSetting KEY_SETTING = Setting.affixKeySetting(PREFIX, "key", @@ -74,15 +79,17 @@ public final class AzureStorageSettings { private final String account; private final String key; + private final String endpointSuffix; private final TimeValue timeout; private final int maxRetries; private final Proxy proxy; - public AzureStorageSettings(String account, String key, TimeValue timeout, int maxRetries, Proxy.Type proxyType, String proxyHost, - Integer proxyPort) { + public AzureStorageSettings(String account, String key, String endpointSuffix, TimeValue timeout, int maxRetries, + Proxy.Type proxyType, String proxyHost, Integer proxyPort) { this.account = account; this.key = key; + this.endpointSuffix = endpointSuffix; this.timeout = timeout; this.maxRetries = maxRetries; @@ -114,6 +121,10 @@ public String getAccount() { return account; } + public String getEndpointSuffix() { + return endpointSuffix; + } + public TimeValue getTimeout() { return timeout; } @@ -132,6 +143,7 @@ public String toString() { sb.append(", account='").append(account).append('\''); sb.append(", key='").append(key).append('\''); sb.append(", timeout=").append(timeout); + sb.append(", endpointSuffix='").append(endpointSuffix).append('\''); sb.append(", maxRetries=").append(maxRetries); sb.append(", proxy=").append(proxy); sb.append('}'); @@ -166,6 +178,7 @@ static AzureStorageSettings getClientSettings(Settings settings, String clientNa try (SecureString account = getConfigValue(settings, clientName, ACCOUNT_SETTING); SecureString key = getConfigValue(settings, clientName, KEY_SETTING)) { return new AzureStorageSettings(account.toString(), key.toString(), + getValue(settings, clientName, ENDPOINT_SUFFIX_SETTING), getValue(settings, clientName, TIMEOUT_SETTING), getValue(settings, clientName, MAX_RETRIES_SETTING), getValue(settings, clientName, PROXY_TYPE_SETTING), diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java index 76a99fc174c58..72cd015f14847 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceTests.java @@ -22,6 +22,8 @@ import com.microsoft.azure.storage.LocationMode; import com.microsoft.azure.storage.RetryExponentialRetry; import com.microsoft.azure.storage.blob.CloudBlobClient; +import com.microsoft.azure.storage.core.Base64; + import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsException; @@ -33,12 +35,15 @@ import java.net.URI; import java.net.URISyntaxException; import java.net.UnknownHostException; +import java.nio.charset.StandardCharsets; import java.util.Map; import static org.elasticsearch.repositories.azure.AzureStorageServiceImpl.blobNameFromUri; import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.isEmptyString; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -69,10 +74,31 @@ public void testReadSecuredSettings() { secureSettings.setString("azure.client.azure2.key", "mykey2"); secureSettings.setString("azure.client.azure3.account", "myaccount3"); secureSettings.setString("azure.client.azure3.key", "mykey3"); - Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); + Settings settings = Settings.builder().setSecureSettings(secureSettings) + .put("azure.client.azure3.endpoint_suffix", "my_endpoint_suffix").build(); Map loadedSettings = AzureStorageSettings.load(settings); assertThat(loadedSettings.keySet(), containsInAnyOrder("azure1","azure2","azure3","default")); + + assertThat(loadedSettings.get("azure1").getEndpointSuffix(), isEmptyString()); + assertThat(loadedSettings.get("azure2").getEndpointSuffix(), isEmptyString()); + assertThat(loadedSettings.get("azure3").getEndpointSuffix(), equalTo("my_endpoint_suffix")); + } + + public void testCreateClientWithEndpointSuffix() { + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("azure.client.azure1.account", "myaccount1"); + secureSettings.setString("azure.client.azure1.key", Base64.encode("mykey1".getBytes(StandardCharsets.UTF_8))); + secureSettings.setString("azure.client.azure2.account", "myaccount2"); + secureSettings.setString("azure.client.azure2.key", Base64.encode("mykey2".getBytes(StandardCharsets.UTF_8))); + Settings settings = Settings.builder().setSecureSettings(secureSettings) + .put("azure.client.azure1.endpoint_suffix", "my_endpoint_suffix").build(); + AzureStorageServiceImpl azureStorageService = new AzureStorageServiceImpl(settings, AzureStorageSettings.load(settings)); + CloudBlobClient client1 = azureStorageService.getSelectedClient("azure1", LocationMode.PRIMARY_ONLY); + assertThat(client1.getEndpoint().toString(), equalTo("https://myaccount1.blob.my_endpoint_suffix")); + + CloudBlobClient client2 = azureStorageService.getSelectedClient("azure2", LocationMode.PRIMARY_ONLY); + assertThat(client2.getEndpoint().toString(), equalTo("https://myaccount2.blob.core.windows.net")); } public void testGetSelectedClientWithNoPrimaryAndSecondary() { diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java index 4649cf858d254..f160f4c4ead8e 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobContainer.java @@ -23,6 +23,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.Options.CreateOpts; import org.apache.hadoop.fs.Path; +import org.elasticsearch.SpecialPermission; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; @@ -45,12 +46,14 @@ final class HdfsBlobContainer extends AbstractBlobContainer { private final HdfsBlobStore store; + private final HdfsSecurityContext securityContext; private final Path path; private final int bufferSize; - HdfsBlobContainer(BlobPath blobPath, HdfsBlobStore store, Path path, int bufferSize) { + HdfsBlobContainer(BlobPath blobPath, HdfsBlobStore store, Path path, int bufferSize, HdfsSecurityContext hdfsSecurityContext) { super(blobPath); this.store = store; + this.securityContext = hdfsSecurityContext; this.path = path; this.bufferSize = bufferSize; } @@ -90,7 +93,9 @@ public InputStream readBlob(String blobName) throws IOException { // FSDataInputStream can open connections on read() or skip() so we wrap in // HDFSPrivilegedInputSteam which will ensure that underlying methods will // be called with the proper privileges. - return store.execute(fileContext -> new HDFSPrivilegedInputSteam(fileContext.open(new Path(path, blobName), bufferSize))); + return store.execute(fileContext -> + new HDFSPrivilegedInputSteam(fileContext.open(new Path(path, blobName), bufferSize), securityContext) + ); } @Override @@ -144,8 +149,11 @@ public Map listBlobs() throws IOException { */ private static class HDFSPrivilegedInputSteam extends FilterInputStream { - HDFSPrivilegedInputSteam(InputStream in) { + private final HdfsSecurityContext securityContext; + + HDFSPrivilegedInputSteam(InputStream in, HdfsSecurityContext hdfsSecurityContext) { super(in); + this.securityContext = hdfsSecurityContext; } public int read() throws IOException { @@ -175,9 +183,10 @@ public synchronized void reset() throws IOException { }); } - private static T doPrivilegedOrThrow(PrivilegedExceptionAction action) throws IOException { + private T doPrivilegedOrThrow(PrivilegedExceptionAction action) throws IOException { + SpecialPermission.check(); try { - return AccessController.doPrivileged(action); + return AccessController.doPrivileged(action, null, securityContext.getRestrictedExecutionPermissions()); } catch (PrivilegedActionException e) { throw (IOException) e.getCause(); } diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java index 8d88b7fd07422..fb26bd4675428 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java @@ -75,7 +75,7 @@ public String toString() { @Override public BlobContainer blobContainer(BlobPath path) { - return new HdfsBlobContainer(path, this, buildHdfsPath(path), bufferSize); + return new HdfsBlobContainer(path, this, buildHdfsPath(path), bufferSize, securityContext); } private Path buildHdfsPath(BlobPath blobPath) { diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java index 16ed9d06a5e8a..66975e4dcc6d7 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java @@ -132,7 +132,7 @@ private FileContext createContext(URI uri, Settings repositorySettings) { hadoopConfiguration.setBoolean("fs.hdfs.impl.disable.cache", true); // Create the filecontext with our user information - // This will correctly configure the filecontext to have our UGI as it's internal user. + // This will correctly configure the filecontext to have our UGI as its internal user. return ugi.doAs((PrivilegedAction) () -> { try { AbstractFileSystem fs = AbstractFileSystem.get(uri, hadoopConfiguration); diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsSecurityContext.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsSecurityContext.java index 3cd1a5a40fdc0..bd16d87d87923 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsSecurityContext.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsSecurityContext.java @@ -56,7 +56,9 @@ class HdfsSecurityContext { // 1) hadoop dynamic proxy is messy with access rules new ReflectPermission("suppressAccessChecks"), // 2) allow hadoop to add credentials to our Subject - new AuthPermission("modifyPrivateCredentials") + new AuthPermission("modifyPrivateCredentials"), + // 3) RPC Engine requires this for re-establishing pooled connections over the lifetime of the client + new PrivateCredentialPermission("org.apache.hadoop.security.Credentials * \"*\"", "read") }; // If Security is enabled, we need all the following elevated permissions: diff --git a/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/30_snapshot_readonly.yml b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/30_snapshot_readonly.yml new file mode 100644 index 0000000000000..c2a37964e70a7 --- /dev/null +++ b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/hdfs_repository/30_snapshot_readonly.yml @@ -0,0 +1,29 @@ +# Integration tests for HDFS Repository plugin +# +# Tests retrieving information about snapshot +# +--- +"Get a snapshot - readonly": + # Create repository + - do: + snapshot.create_repository: + repository: test_snapshot_repository_ro + body: + type: hdfs + settings: + uri: "hdfs://localhost:9999" + path: "/user/elasticsearch/existing/readonly-repository" + readonly: true + + # List snapshot info + - do: + snapshot.get: + repository: test_snapshot_repository_ro + snapshot: "_all" + + - length: { snapshots: 1 } + + # Remove our repository + - do: + snapshot.delete_repository: + repository: test_snapshot_repository_ro diff --git a/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot_readonly.yml b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot_readonly.yml new file mode 100644 index 0000000000000..8c4c0347a156a --- /dev/null +++ b/plugins/repository-hdfs/src/test/resources/rest-api-spec/test/secure_hdfs_repository/30_snapshot_readonly.yml @@ -0,0 +1,31 @@ +# Integration tests for HDFS Repository plugin +# +# Tests retrieving information about snapshot +# +--- +"Get a snapshot - readonly": + # Create repository + - do: + snapshot.create_repository: + repository: test_snapshot_repository_ro + body: + type: hdfs + settings: + uri: "hdfs://localhost:9998" + path: "/user/elasticsearch/existing/readonly-repository" + security: + principal: "elasticsearch@BUILD.ELASTIC.CO" + readonly: true + + # List snapshot info + - do: + snapshot.get: + repository: test_snapshot_repository_ro + snapshot: "_all" + + - length: { snapshots: 1 } + + # Remove our repository + - do: + snapshot.delete_repository: + repository: test_snapshot_repository_ro diff --git a/test/fixtures/hdfs-fixture/src/main/java/hdfs/MiniHDFS.java b/test/fixtures/hdfs-fixture/src/main/java/hdfs/MiniHDFS.java index 7d41d94e99a3d..73f4e443b0769 100644 --- a/test/fixtures/hdfs-fixture/src/main/java/hdfs/MiniHDFS.java +++ b/test/fixtures/hdfs-fixture/src/main/java/hdfs/MiniHDFS.java @@ -19,7 +19,9 @@ package hdfs; +import java.io.File; import java.lang.management.ManagementFactory; +import java.net.URL; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; @@ -29,9 +31,11 @@ import java.util.Arrays; import java.util.List; +import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntryType; import org.apache.hadoop.fs.permission.FsAction; @@ -100,15 +104,35 @@ public static void main(String[] args) throws Exception { } MiniDFSCluster dfs = builder.build(); - // Set the elasticsearch user directory up - if (UserGroupInformation.isSecurityEnabled()) { - FileSystem fs = dfs.getFileSystem(); - org.apache.hadoop.fs.Path esUserPath = new org.apache.hadoop.fs.Path("/user/elasticsearch"); + // Configure contents of the filesystem + org.apache.hadoop.fs.Path esUserPath = new org.apache.hadoop.fs.Path("/user/elasticsearch"); + try (FileSystem fs = dfs.getFileSystem()) { + + // Set the elasticsearch user directory up fs.mkdirs(esUserPath); - List acls = new ArrayList<>(); - acls.add(new AclEntry.Builder().setType(AclEntryType.USER).setName("elasticsearch").setPermission(FsAction.ALL).build()); - fs.modifyAclEntries(esUserPath, acls); - fs.close(); + if (UserGroupInformation.isSecurityEnabled()) { + List acls = new ArrayList<>(); + acls.add(new AclEntry.Builder().setType(AclEntryType.USER).setName("elasticsearch").setPermission(FsAction.ALL).build()); + fs.modifyAclEntries(esUserPath, acls); + } + + // Install a pre-existing repository into HDFS + String directoryName = "readonly-repository"; + String archiveName = directoryName + ".tar.gz"; + URL readOnlyRepositoryArchiveURL = MiniHDFS.class.getClassLoader().getResource(archiveName); + if (readOnlyRepositoryArchiveURL != null) { + Path tempDirectory = Files.createTempDirectory(MiniHDFS.class.getName()); + File readOnlyRepositoryArchive = tempDirectory.resolve(archiveName).toFile(); + FileUtils.copyURLToFile(readOnlyRepositoryArchiveURL, readOnlyRepositoryArchive); + FileUtil.unTar(readOnlyRepositoryArchive, tempDirectory.toFile()); + + fs.copyFromLocalFile(true, true, + new org.apache.hadoop.fs.Path(tempDirectory.resolve(directoryName).toAbsolutePath().toUri()), + esUserPath.suffix("/existing/" + directoryName) + ); + + FileUtils.deleteDirectory(tempDirectory.toFile()); + } } // write our PID file diff --git a/test/fixtures/hdfs-fixture/src/main/resources/readonly-repository.tar.gz b/test/fixtures/hdfs-fixture/src/main/resources/readonly-repository.tar.gz new file mode 100644 index 0000000000000..2cdb6d77c07d0 Binary files /dev/null and b/test/fixtures/hdfs-fixture/src/main/resources/readonly-repository.tar.gz differ