Skip to content

Commit

Permalink
Merge branch 'master' into global-checkpoint-sync
Browse files Browse the repository at this point in the history
* master:
  Add permission checks before reading from HDFS stream (elastic#26716)
  muted test
  [Docs] Fixed typo of *configuration* (elastic#25058)
  Add azure storage endpoint suffix elastic#26432 (elastic#26568)
  • Loading branch information
jasontedor committed Sep 21, 2017
2 parents b8adcce + c760eec commit c041ea2
Show file tree
Hide file tree
Showing 15 changed files with 166 additions and 23 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -703,6 +703,7 @@ public void testTrackScores() throws Exception {
}
}

@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/26738")
public void testTopHitsInNestedSimple() throws Exception {
SearchResponse searchResponse = client().prepareSearch("articles")
.setQuery(matchQuery("title", "title"))
Expand Down
5 changes: 4 additions & 1 deletion docs/plugins/repository-azure.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -44,15 +44,18 @@ The initial backoff period is defined by Azure SDK as `30s`. Which means `30s` o
before retrying after a first timeout or failure. The maximum backoff period is defined by Azure SDK as
`90s`.

`endpoint_suffix` can be used to specify Azure endpoint suffix explicitly. Defaults to `core.windows.net`.

[source,yaml]
----
azure.client.default.timeout: 10s
azure.client.default.max_retries: 7
azure.client.default.endpoint_suffix: core.chinacloudapi.cn
azure.client.secondary.timeout: 30s
----

In this example, timeout will be `10s` per try for `default` with `7` retries before failing
and `30s` per try for `secondary` with `3` retries.
and endpoint suffix will be `core.chinacloudapi.cn` and `30s` per try for `secondary` with `3` retries.

[IMPORTANT]
.Supported Azure Storage Account types
Expand Down
2 changes: 1 addition & 1 deletion docs/plugins/repository-gcs.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ A service account file looks like this:
// NOTCONSOLE

This file must be stored in the {ref}/secure-settings.html[elasticsearch keystore], under a setting name
of the form `gcs.client.NAME.credentials_file`, where `NAME` is the name of the client congiguration.
of the form `gcs.client.NAME.credentials_file`, where `NAME` is the name of the client configuration.
The default client name is `default`, but a different client name can be specified in repository
settings using `client`.

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ public List<Setting<?>> getSettings() {
return Arrays.asList(
AzureStorageSettings.ACCOUNT_SETTING,
AzureStorageSettings.KEY_SETTING,
AzureStorageSettings.ENDPOINT_SUFFIX_SETTING,
AzureStorageSettings.TIMEOUT_SETTING,
AzureStorageSettings.PROXY_TYPE_SETTING,
AzureStorageSettings.PROXY_HOST_SETTING,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -79,14 +79,18 @@ public AzureStorageServiceImpl(Settings settings, Map<String, AzureStorageSettin

void createClient(AzureStorageSettings azureStorageSettings) {
try {
logger.trace("creating new Azure storage client using account [{}], key [{}]",
azureStorageSettings.getAccount(), azureStorageSettings.getKey());
logger.trace("creating new Azure storage client using account [{}], key [{}], endpoint suffix [{}]",
azureStorageSettings.getAccount(), azureStorageSettings.getKey(), azureStorageSettings.getEndpointSuffix());

String storageConnectionString =
"DefaultEndpointsProtocol=https;"
+ "AccountName=" + azureStorageSettings.getAccount() + ";"
+ "AccountKey=" + azureStorageSettings.getKey();

String endpointSuffix = azureStorageSettings.getEndpointSuffix();
if (endpointSuffix != null && !endpointSuffix.isEmpty()) {
storageConnectionString += ";EndpointSuffix=" + endpointSuffix;
}
// Retrieve storage account from connection-string.
CloudStorageAccount storageAccount = CloudStorageAccount.parse(storageConnectionString);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,11 @@ public final class AzureStorageSettings {
private static final Setting<Integer> MAX_RETRIES_SETTING =
Setting.affixKeySetting(PREFIX, "max_retries",
(key) -> Setting.intSetting(key, RetryPolicy.DEFAULT_CLIENT_RETRY_COUNT, Setting.Property.NodeScope));
/**
* Azure endpoint suffix. Default to core.windows.net (CloudStorageAccount.DEFAULT_DNS).
*/
public static final Setting<String> ENDPOINT_SUFFIX_SETTING = Setting.affixKeySetting(PREFIX, "endpoint_suffix",
key -> Setting.simpleString(key, Property.NodeScope));

/** Azure key */
public static final AffixSetting<SecureString> KEY_SETTING = Setting.affixKeySetting(PREFIX, "key",
Expand All @@ -74,15 +79,17 @@ public final class AzureStorageSettings {

private final String account;
private final String key;
private final String endpointSuffix;
private final TimeValue timeout;
private final int maxRetries;
private final Proxy proxy;


public AzureStorageSettings(String account, String key, TimeValue timeout, int maxRetries, Proxy.Type proxyType, String proxyHost,
Integer proxyPort) {
public AzureStorageSettings(String account, String key, String endpointSuffix, TimeValue timeout, int maxRetries,
Proxy.Type proxyType, String proxyHost, Integer proxyPort) {
this.account = account;
this.key = key;
this.endpointSuffix = endpointSuffix;
this.timeout = timeout;
this.maxRetries = maxRetries;

Expand Down Expand Up @@ -114,6 +121,10 @@ public String getAccount() {
return account;
}

public String getEndpointSuffix() {
return endpointSuffix;
}

public TimeValue getTimeout() {
return timeout;
}
Expand All @@ -132,6 +143,7 @@ public String toString() {
sb.append(", account='").append(account).append('\'');
sb.append(", key='").append(key).append('\'');
sb.append(", timeout=").append(timeout);
sb.append(", endpointSuffix='").append(endpointSuffix).append('\'');
sb.append(", maxRetries=").append(maxRetries);
sb.append(", proxy=").append(proxy);
sb.append('}');
Expand Down Expand Up @@ -166,6 +178,7 @@ static AzureStorageSettings getClientSettings(Settings settings, String clientNa
try (SecureString account = getConfigValue(settings, clientName, ACCOUNT_SETTING);
SecureString key = getConfigValue(settings, clientName, KEY_SETTING)) {
return new AzureStorageSettings(account.toString(), key.toString(),
getValue(settings, clientName, ENDPOINT_SUFFIX_SETTING),
getValue(settings, clientName, TIMEOUT_SETTING),
getValue(settings, clientName, MAX_RETRIES_SETTING),
getValue(settings, clientName, PROXY_TYPE_SETTING),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@
import com.microsoft.azure.storage.LocationMode;
import com.microsoft.azure.storage.RetryExponentialRetry;
import com.microsoft.azure.storage.blob.CloudBlobClient;
import com.microsoft.azure.storage.core.Base64;

import org.elasticsearch.common.settings.MockSecureSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsException;
Expand All @@ -33,12 +35,15 @@
import java.net.URI;
import java.net.URISyntaxException;
import java.net.UnknownHostException;
import java.nio.charset.StandardCharsets;
import java.util.Map;

import static org.elasticsearch.repositories.azure.AzureStorageServiceImpl.blobNameFromUri;
import static org.hamcrest.Matchers.containsInAnyOrder;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.isEmptyString;
import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.nullValue;

Expand Down Expand Up @@ -69,10 +74,31 @@ public void testReadSecuredSettings() {
secureSettings.setString("azure.client.azure2.key", "mykey2");
secureSettings.setString("azure.client.azure3.account", "myaccount3");
secureSettings.setString("azure.client.azure3.key", "mykey3");
Settings settings = Settings.builder().setSecureSettings(secureSettings).build();
Settings settings = Settings.builder().setSecureSettings(secureSettings)
.put("azure.client.azure3.endpoint_suffix", "my_endpoint_suffix").build();

Map<String, AzureStorageSettings> loadedSettings = AzureStorageSettings.load(settings);
assertThat(loadedSettings.keySet(), containsInAnyOrder("azure1","azure2","azure3","default"));

assertThat(loadedSettings.get("azure1").getEndpointSuffix(), isEmptyString());
assertThat(loadedSettings.get("azure2").getEndpointSuffix(), isEmptyString());
assertThat(loadedSettings.get("azure3").getEndpointSuffix(), equalTo("my_endpoint_suffix"));
}

public void testCreateClientWithEndpointSuffix() {
MockSecureSettings secureSettings = new MockSecureSettings();
secureSettings.setString("azure.client.azure1.account", "myaccount1");
secureSettings.setString("azure.client.azure1.key", Base64.encode("mykey1".getBytes(StandardCharsets.UTF_8)));
secureSettings.setString("azure.client.azure2.account", "myaccount2");
secureSettings.setString("azure.client.azure2.key", Base64.encode("mykey2".getBytes(StandardCharsets.UTF_8)));
Settings settings = Settings.builder().setSecureSettings(secureSettings)
.put("azure.client.azure1.endpoint_suffix", "my_endpoint_suffix").build();
AzureStorageServiceImpl azureStorageService = new AzureStorageServiceImpl(settings, AzureStorageSettings.load(settings));
CloudBlobClient client1 = azureStorageService.getSelectedClient("azure1", LocationMode.PRIMARY_ONLY);
assertThat(client1.getEndpoint().toString(), equalTo("https://myaccount1.blob.my_endpoint_suffix"));

CloudBlobClient client2 = azureStorageService.getSelectedClient("azure2", LocationMode.PRIMARY_ONLY);
assertThat(client2.getEndpoint().toString(), equalTo("https://myaccount2.blob.core.windows.net"));
}

public void testGetSelectedClientWithNoPrimaryAndSecondary() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Options.CreateOpts;
import org.apache.hadoop.fs.Path;
import org.elasticsearch.SpecialPermission;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.blobstore.BlobMetaData;
import org.elasticsearch.common.blobstore.BlobPath;
Expand All @@ -45,12 +46,14 @@

final class HdfsBlobContainer extends AbstractBlobContainer {
private final HdfsBlobStore store;
private final HdfsSecurityContext securityContext;
private final Path path;
private final int bufferSize;

HdfsBlobContainer(BlobPath blobPath, HdfsBlobStore store, Path path, int bufferSize) {
HdfsBlobContainer(BlobPath blobPath, HdfsBlobStore store, Path path, int bufferSize, HdfsSecurityContext hdfsSecurityContext) {
super(blobPath);
this.store = store;
this.securityContext = hdfsSecurityContext;
this.path = path;
this.bufferSize = bufferSize;
}
Expand Down Expand Up @@ -90,7 +93,9 @@ public InputStream readBlob(String blobName) throws IOException {
// FSDataInputStream can open connections on read() or skip() so we wrap in
// HDFSPrivilegedInputSteam which will ensure that underlying methods will
// be called with the proper privileges.
return store.execute(fileContext -> new HDFSPrivilegedInputSteam(fileContext.open(new Path(path, blobName), bufferSize)));
return store.execute(fileContext ->
new HDFSPrivilegedInputSteam(fileContext.open(new Path(path, blobName), bufferSize), securityContext)
);
}

@Override
Expand Down Expand Up @@ -144,8 +149,11 @@ public Map<String, BlobMetaData> listBlobs() throws IOException {
*/
private static class HDFSPrivilegedInputSteam extends FilterInputStream {

HDFSPrivilegedInputSteam(InputStream in) {
private final HdfsSecurityContext securityContext;

HDFSPrivilegedInputSteam(InputStream in, HdfsSecurityContext hdfsSecurityContext) {
super(in);
this.securityContext = hdfsSecurityContext;
}

public int read() throws IOException {
Expand Down Expand Up @@ -175,9 +183,10 @@ public synchronized void reset() throws IOException {
});
}

private static <T> T doPrivilegedOrThrow(PrivilegedExceptionAction<T> action) throws IOException {
private <T> T doPrivilegedOrThrow(PrivilegedExceptionAction<T> action) throws IOException {
SpecialPermission.check();
try {
return AccessController.doPrivileged(action);
return AccessController.doPrivileged(action, null, securityContext.getRestrictedExecutionPermissions());
} catch (PrivilegedActionException e) {
throw (IOException) e.getCause();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ public String toString() {

@Override
public BlobContainer blobContainer(BlobPath path) {
return new HdfsBlobContainer(path, this, buildHdfsPath(path), bufferSize);
return new HdfsBlobContainer(path, this, buildHdfsPath(path), bufferSize, securityContext);
}

private Path buildHdfsPath(BlobPath blobPath) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ private FileContext createContext(URI uri, Settings repositorySettings) {
hadoopConfiguration.setBoolean("fs.hdfs.impl.disable.cache", true);

// Create the filecontext with our user information
// This will correctly configure the filecontext to have our UGI as it's internal user.
// This will correctly configure the filecontext to have our UGI as its internal user.
return ugi.doAs((PrivilegedAction<FileContext>) () -> {
try {
AbstractFileSystem fs = AbstractFileSystem.get(uri, hadoopConfiguration);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,9 @@ class HdfsSecurityContext {
// 1) hadoop dynamic proxy is messy with access rules
new ReflectPermission("suppressAccessChecks"),
// 2) allow hadoop to add credentials to our Subject
new AuthPermission("modifyPrivateCredentials")
new AuthPermission("modifyPrivateCredentials"),
// 3) RPC Engine requires this for re-establishing pooled connections over the lifetime of the client
new PrivateCredentialPermission("org.apache.hadoop.security.Credentials * \"*\"", "read")
};

// If Security is enabled, we need all the following elevated permissions:
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
# Integration tests for HDFS Repository plugin
#
# Tests retrieving information about snapshot
#
---
"Get a snapshot - readonly":
# Create repository
- do:
snapshot.create_repository:
repository: test_snapshot_repository_ro
body:
type: hdfs
settings:
uri: "hdfs://localhost:9999"
path: "/user/elasticsearch/existing/readonly-repository"
readonly: true

# List snapshot info
- do:
snapshot.get:
repository: test_snapshot_repository_ro
snapshot: "_all"

- length: { snapshots: 1 }

# Remove our repository
- do:
snapshot.delete_repository:
repository: test_snapshot_repository_ro
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
# Integration tests for HDFS Repository plugin
#
# Tests retrieving information about snapshot
#
---
"Get a snapshot - readonly":
# Create repository
- do:
snapshot.create_repository:
repository: test_snapshot_repository_ro
body:
type: hdfs
settings:
uri: "hdfs://localhost:9998"
path: "/user/elasticsearch/existing/readonly-repository"
security:
principal: "elasticsearch@BUILD.ELASTIC.CO"
readonly: true

# List snapshot info
- do:
snapshot.get:
repository: test_snapshot_repository_ro
snapshot: "_all"

- length: { snapshots: 1 }

# Remove our repository
- do:
snapshot.delete_repository:
repository: test_snapshot_repository_ro
Loading

0 comments on commit c041ea2

Please sign in to comment.