Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Allowlist tracestate header on remote server port #112649

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions docs/changelog/112649.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 112649
summary: Allowlist `tracestate` header on remote server port
area: Security
type: bug
issues: []
Original file line number Diff line number Diff line change
@@ -0,0 +1,104 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/

package org.elasticsearch.xpack.remotecluster;

import com.sun.net.httpserver.HttpExchange;
import com.sun.net.httpserver.HttpServer;

import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.core.SuppressForbidden;
import org.junit.rules.ExternalResource;

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.nio.charset.StandardCharsets;
import java.util.List;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;

@SuppressForbidden(reason = "Uses an HTTP server for testing")
class ConsumingTestServer extends ExternalResource {
private static final Logger logger = LogManager.getLogger(ConsumingTestServer.class);
final ArrayBlockingQueue<String> received = new ArrayBlockingQueue<>(1000);

private static HttpServer server;
private final Thread messageConsumerThread = consumerThread();
private volatile Consumer<String> consumer;
private volatile boolean consumerRunning = true;

@Override
protected void before() throws Throwable {
server = HttpServer.create();
server.bind(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0);
server.createContext("/", this::handle);
server.start();

messageConsumerThread.start();
}

private Thread consumerThread() {
return new Thread(() -> {
while (consumerRunning) {
if (consumer != null) {
try {
String msg = received.poll(1L, TimeUnit.SECONDS);
if (msg != null && msg.isEmpty() == false) {
consumer.accept(msg);
}

} catch (InterruptedException e) {
throw new RuntimeException(e);
}
}
}
});
}

@Override
protected void after() {
server.stop(1);
consumerRunning = false;
}

private void handle(HttpExchange exchange) throws IOException {
try (exchange) {
try {
try (InputStream requestBody = exchange.getRequestBody()) {
if (requestBody != null) {
var read = readJsonMessages(requestBody);
received.addAll(read);
}
}

} catch (RuntimeException e) {
logger.warn("failed to parse request", e);
}
exchange.sendResponseHeaders(201, 0);
}
}

private List<String> readJsonMessages(InputStream input) {
// parse NDJSON
return new BufferedReader(new InputStreamReader(input, StandardCharsets.UTF_8)).lines().toList();
}

public int getPort() {
return server.getAddress().getPort();
}

public void addMessageConsumer(Consumer<String> messageConsumer) {
this.consumer = messageConsumer;
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,201 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/

package org.elasticsearch.xpack.remotecluster;

import org.elasticsearch.client.Request;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.Response;
import org.elasticsearch.common.xcontent.support.XContentMapValues;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.test.cluster.ElasticsearchCluster;
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
import org.elasticsearch.test.cluster.util.resource.Resource;
import org.elasticsearch.xcontent.XContentParser;
import org.elasticsearch.xcontent.XContentParserConfiguration;
import org.elasticsearch.xcontent.spi.XContentProvider;
import org.hamcrest.Matcher;
import org.hamcrest.StringDescription;
import org.junit.ClassRule;
import org.junit.rules.RuleChain;
import org.junit.rules.TestRule;

import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Consumer;
import java.util.function.Predicate;
import java.util.stream.Collectors;

import static org.hamcrest.Matchers.equalTo;

public class RemoteClusterSecurityWithApmTracingRestIT extends AbstractRemoteClusterSecurityTestCase {
private static final AtomicReference<Map<String, Object>> API_KEY_MAP_REF = new AtomicReference<>();
private static final XContentProvider.FormatProvider XCONTENT = XContentProvider.provider().getJsonXContent();
final String traceIdValue = "0af7651916cd43dd8448eb211c80319c";
final String traceParentValue = "00-" + traceIdValue + "-b7ad6b7169203331-01";

private static final ConsumingTestServer mockApmServer = new ConsumingTestServer();

static {
fulfillingCluster = ElasticsearchCluster.local()
.distribution(DistributionType.DEFAULT)
.name("fulfilling-cluster")
.apply(commonClusterConfig)
.setting("telemetry.metrics.enabled", "false")
.setting("telemetry.tracing.enabled", "true")
.setting("telemetry.agent.metrics_interval", "1s")
.setting("telemetry.agent.server_url", () -> "http://127.0.0.1:" + mockApmServer.getPort())
// to ensure tracestate header is always set to cover RCS 2.0 handling of the tracestate header
.setting("telemetry.agent.transaction_sample_rate", "1.0")
.setting("remote_cluster_server.enabled", "true")
.setting("remote_cluster.port", "0")
.setting("xpack.security.remote_cluster_server.ssl.enabled", "true")
.setting("xpack.security.remote_cluster_server.ssl.key", "remote-cluster.key")
.setting("xpack.security.remote_cluster_server.ssl.certificate", "remote-cluster.crt")
.keystore("xpack.security.remote_cluster_server.ssl.secure_key_passphrase", "remote-cluster-password")
.rolesFile(Resource.fromClasspath("roles.yml"))
.build();

queryCluster = ElasticsearchCluster.local()
.distribution(DistributionType.DEFAULT)
.name("query-cluster")
.apply(commonClusterConfig)
.setting("telemetry.metrics.enabled", "false")
.setting("telemetry.tracing.enabled", "true")
// to ensure tracestate header is always set to cover RCS 2.0 handling of the tracestate header
.setting("telemetry.agent.transaction_sample_rate", "1.0")
.setting("telemetry.agent.metrics_interval", "1s")
.setting("telemetry.agent.server_url", () -> "http://127.0.0.1:" + mockApmServer.getPort())
.setting("xpack.security.remote_cluster_client.ssl.enabled", "true")
.setting("xpack.security.remote_cluster_client.ssl.certificate_authorities", "remote-cluster-ca.crt")
.keystore("cluster.remote.my_remote_cluster.credentials", () -> {
if (API_KEY_MAP_REF.get() == null) {
final Map<String, Object> apiKeyMap = createCrossClusterAccessApiKey("""
{
"search": [
{
"names": ["*"]
}
]
}""");
API_KEY_MAP_REF.set(apiKeyMap);
}
return (String) API_KEY_MAP_REF.get().get("encoded");
})
.rolesFile(Resource.fromClasspath("roles.yml"))
.user(REMOTE_METRIC_USER, PASS.toString(), "read_remote_shared_metrics", false)
.build();
}

@ClassRule
// Use a RuleChain to ensure that fulfilling cluster is started before query cluster
public static TestRule clusterRule = RuleChain.outerRule(mockApmServer).around(fulfillingCluster).around(queryCluster);

@SuppressWarnings("unchecked")
public void testTracingCrossCluster() throws Exception {
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Note: even though the PR is a fix for the tracestate header I'm not asserting anything specific around tracestate here; the purpose of this test is to exercise and ensure that tracing works with RCS 2.0 end to end without honing in any too many specific details.

(Without the prod code fix in this PR, this test fails with a header check failure, as expected).

configureRemoteCluster();
Set<Predicate<Map<String, Object>>> assertions = new HashSet<>(
Set.of(
// REST action on query cluster
allTrue(
transactionValue("name", equalTo("GET /_resolve/cluster/{name}")),
transactionValue("trace_id", equalTo(traceIdValue))
),
// transport action on fulfilling cluster
allTrue(
transactionValue("name", equalTo("indices:admin/resolve/cluster")),
transactionValue("trace_id", equalTo(traceIdValue))
)
)
);

CountDownLatch finished = new CountDownLatch(1);

// a consumer that will remove the assertions from a map once it matched
Consumer<String> messageConsumer = (String message) -> {
var apmMessage = parseMap(message);
if (isTransactionTraceMessage(apmMessage)) {
logger.info("Apm transaction message received: {}", message);
assertions.removeIf(e -> e.test(apmMessage));
}

if (assertions.isEmpty()) {
finished.countDown();
}
};

mockApmServer.addMessageConsumer(messageConsumer);

// Trigger an action that we know will cross clusters -- doesn't much matter which one
final Request resolveRequest = new Request("GET", "/_resolve/cluster/my_remote_cluster:*");
resolveRequest.setOptions(
RequestOptions.DEFAULT.toBuilder()
.addHeader("Authorization", headerFromRandomAuthMethod(REMOTE_METRIC_USER, PASS))
.addHeader(Task.TRACE_PARENT_HTTP_HEADER, traceParentValue)
);
final Response response = client().performRequest(resolveRequest);
assertOK(response);

finished.await(30, TimeUnit.SECONDS);
assertThat(assertions, equalTo(Collections.emptySet()));
}

private boolean isTransactionTraceMessage(Map<String, Object> apmMessage) {
return apmMessage.containsKey("transaction");
}

@SuppressWarnings("unchecked")
private Predicate<Map<String, Object>> allTrue(Predicate<Map<String, Object>>... predicates) {
var allTrueTest = Arrays.stream(predicates).reduce(v -> true, Predicate::and);
return new Predicate<>() {
@Override
public boolean test(Map<String, Object> map) {
return allTrueTest.test(map);
}

@Override
public String toString() {
return Arrays.stream(predicates).map(Object::toString).collect(Collectors.joining(" and "));
}
};
}

@SuppressWarnings("unchecked")
private <T> Predicate<Map<String, Object>> transactionValue(String path, Matcher<T> expected) {
return new Predicate<>() {
@Override
public boolean test(Map<String, Object> map) {
var transaction = (Map<String, Object>) map.get("transaction");
var value = XContentMapValues.extractValue(path, transaction);
return expected.matches((T) value);
}

@Override
public String toString() {
StringDescription matcherDescription = new StringDescription();
expected.describeTo(matcherDescription);
return path + " " + matcherDescription;
}
};
}

private Map<String, Object> parseMap(String message) {
try (XContentParser parser = XCONTENT.XContent().createParser(XContentParserConfiguration.EMPTY, message)) {
return parser.map();
} catch (IOException e) {
fail(e);
return Collections.emptyMap();
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ final class CrossClusterAccessServerTransportFilter extends ServerTransportFilte
Set.of(CROSS_CLUSTER_ACCESS_CREDENTIALS_HEADER_KEY, CROSS_CLUSTER_ACCESS_SUBJECT_INFO_HEADER_KEY)
);
allowedHeaders.add(AuditUtil.AUDIT_REQUEST_ID);
allowedHeaders.add(Task.TRACE_STATE);
allowedHeaders.addAll(Task.HEADERS_TO_COPY);
ALLOWED_TRANSPORT_HEADERS = Set.copyOf(allowedHeaders);
}
Expand Down