Skip to content

Commit

Permalink
Merge branch 'main' of https://github.com/elastic/elasticsearch into …
Browse files Browse the repository at this point in the history
…make_fieldcaps_lighter
  • Loading branch information
astefan committed Nov 23, 2023
2 parents 78b4d07 + d835ebf commit a8bd04c
Show file tree
Hide file tree
Showing 614 changed files with 7,150 additions and 7,601 deletions.
83 changes: 61 additions & 22 deletions .ci/init.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@ initscript {
}
}

boolean USE_ARTIFACTORY = false

if (System.getenv('VAULT_ADDR') == null) {
// When trying to reproduce errors outside of CI, it can be useful to allow this to just return rather than blowing up
if (System.getenv('CI') == null) {
Expand Down Expand Up @@ -48,38 +50,75 @@ final Vault vault = new Vault(
.engineVersion(1)
.token(vaultToken)
.build()
).withRetries(5, 1000)
)
.withRetries(5, 1000)


if (USE_ARTIFACTORY) {
final Map<String, String> artifactoryCredentials = vault.logical()
.read("${vaultPathPrefix}/artifactory.elstc.co")
.getData()
logger.info("Using elastic artifactory repos")
Closure configCache = {
return {
name "artifactory-gradle-release"
url "https://artifactory.elstc.co/artifactory/gradle-release"
credentials {
username artifactoryCredentials.get("username")
password artifactoryCredentials.get("token")
}
}
}
settingsEvaluated { settings ->
settings.pluginManagement {
repositories {
maven configCache()
}
}
}
projectsLoaded {
allprojects {
buildscript {
repositories {
maven configCache()
}
}
repositories {
maven configCache()
}
}
}
}

gradle.settingsEvaluated { settings ->
settings.pluginManager.withPlugin("com.gradle.enterprise") {
configureGradleEnterprise(settings)
settings.gradleEnterprise {
server = 'https://gradle-enterprise.elastic.co'
}
}
}

void configureGradleEnterprise(def settings) {
settings.gradleEnterprise {
server = 'https://gradle-enterprise.elastic.co'
buildScan.publishAlways()
}

def isCI = System.getenv("CI") == "true"
settings.buildCache {
local {
// Disable the local build cache in CI since we use ephemeral workers and it incurs an IO penalty
enabled = isCI == false
}
remote(settings.gradleEnterprise.buildCache) {
if (isCI) {
final boolean buildCachePush = Boolean.valueOf(System.getProperty('org.elasticsearch.build.cache.push', 'false'))
final Map<String, String> buildCacheCredentials = System.getenv("GRADLE_BUILD_CACHE_USERNAME") ? [:] : vault.logical()
.read("${vaultPathPrefix}/gradle-build-cache")
.getData()
def username = System.getenv("GRADLE_BUILD_CACHE_USERNAME") ?: buildCacheCredentials.get("username")
def password = System.getenv("GRADLE_BUILD_CACHE_PASSWORD") ?: buildCacheCredentials.get("password")
final String buildCacheUrl = System.getProperty('org.elasticsearch.build.cache.url')
final boolean buildCachePush = Boolean.valueOf(System.getProperty('org.elasticsearch.build.cache.push', 'false'))

if (buildCacheUrl) {
final Map<String, String> buildCacheCredentials = System.getenv("GRADLE_BUILD_CACHE_USERNAME") ? [:] : vault.logical()
.read("${vaultPathPrefix}/gradle-build-cache")
.getData()
gradle.settingsEvaluated { settings ->
settings.buildCache {
local {
// Disable the local build cache in CI since we use ephemeral workers and it incurs an IO penalty
enabled = false
}
remote(HttpBuildCache) {
url = buildCacheUrl
push = buildCachePush
usernameAndPassword(username, password)
credentials {
username = System.getenv("GRADLE_BUILD_CACHE_USERNAME") ?: buildCacheCredentials.get("username")
password = System.getenv("GRADLE_BUILD_CACHE_PASSWORD") ?: buildCacheCredentials.get("password")
}
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,12 @@ buildScan {
URL jenkinsUrl = System.getenv('JENKINS_URL') ? new URL(System.getenv('JENKINS_URL')) : null
String buildKiteUrl = System.getenv('BUILDKITE_BUILD_URL') ? System.getenv('BUILDKITE_BUILD_URL') : null

// Automatically publish scans from Elasticsearch CI
if (jenkinsUrl?.host?.endsWith('elastic.co') || jenkinsUrl?.host?.endsWith('elastic.dev') || System.getenv('BUILDKITE') == 'true') {
publishAlways()
buildScan.server = 'https://gradle-enterprise.elastic.co'
}

background {
tag OS.current().name()
tag Architecture.current().name()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -83,11 +83,6 @@ private TaskProvider<LoggedExec> createRunBwcGradleTask(
return getJavaHome(Integer.parseInt(minimumCompilerVersion));
}));

// temporally workaround for reworked gradle enterprise setup
// removed when PR https://github.com/elastic/elasticsearch/pull/102180 backported
// to all BWC branches
loggedExec.getEnvironment().put("BUILDKITE", "false");

if (BuildParams.isCi() && OS.current() != OS.WINDOWS) {
// TODO: Disabled for now until we can figure out why files are getting corrupted
// loggedExec.getEnvironment().put("GRADLE_RO_DEP_CACHE", System.getProperty("user.home") + "/gradle_ro_cache");
Expand Down
5 changes: 5 additions & 0 deletions docs/changelog/100938.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 100938
summary: "Set includeShardsStats = false in NodesStatsRequest where the caller does not use shards-level statistics"
area: Stats
type: enhancement
issues: []
5 changes: 5 additions & 0 deletions docs/changelog/101979.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 101979
summary: Calculate CO2 and emmission and costs
area: Application
type: enhancement
issues: []
5 changes: 5 additions & 0 deletions docs/changelog/102311.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 102311
summary: Upgrade reactor netty http version
area: Snapshot/Restore
type: upgrade
issues: []
5 changes: 5 additions & 0 deletions docs/changelog/102461.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 102461
summary: Enable concurrency for scripted metric agg
area: Aggregations
type: enhancement
issues: []
5 changes: 5 additions & 0 deletions docs/changelog/102511.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 102511
summary: Trigger parent circuit breaker when building scorers in filters aggregation
area: Aggregations
type: bug
issues: []
6 changes: 6 additions & 0 deletions docs/changelog/102512.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
pr: 102512
summary: Implement exponential backoff for transform state persistence retrying
area: Transform
type: enhancement
issues:
- 102528
Original file line number Diff line number Diff line change
@@ -1,20 +1,36 @@
[role="xpack"]
[[tutorial-migrate-data-stream-from-ilm-to-dsl]]
=== Tutorial: Migrate ILM managed data stream to Data stream lifecycle
=== Tutorial: Migrate ILM managed data stream to data stream lifecycle

preview::[]

In this tutorial we'll look at migrating an existing data stream from {ilm-init} to
Data stream lifecycle. The existing {ilm-init} managed backing indices will continue
In this tutorial we'll look at migrating an existing data stream from Index Lifecycle Management ({ilm-init}) to
data stream lifecycle. The existing {ilm-init} managed backing indices will continue
to be managed by {ilm-init} until they age out and get deleted by {ilm-init}; however,
the new backing indices will be managed by Data stream lifecycle.
This way, a data stream is gradually migrated away from being managed by {ilm-cap} to
being managed by Data stream lifecycle. As we'll see, {ilm-cap} and Data stream lifecycle
can co-manage a data stream; however, an index can only be managed by one system at
the new backing indices will be managed by data stream lifecycle.
This way, a data stream is gradually migrated away from being managed by {ilm-init} to
being managed by data stream lifecycle. As we'll see, {ilm-init} and data stream lifecycle
can co-manage a data stream; however, an index can only be managed by one system at
a time.

Let's first create a data stream with two backing indices managed by {ilm-cap}.
We first create an {ilm-cap} policy:
[discrete]
[[migrate-dsl-ilm-tldr]]
==== TL;DR
To migrate a data stream from {ilm-init} to data stream lifecycle we'll have to execute
two steps:

1. Update the index template that's backing the data stream to set <<index-lifecycle-prefer-ilm, prefer_ilm>>
to `false`, and to configure data stream lifecycle.
2. Configure the data stream lifecycle for the _existing_ data stream using
the <<data-streams-put-lifecycle, lifecycle API>>.

For more details see the <<migrate-from-ilm-to-dsl, migrate to data stream lifecycle>> section.

[discrete]
[[setup-test-data]]
==== Setup ILM managed data stream
Let's first create a data stream with two backing indices managed by {ilm-init}.
We first create an {ilm-init} policy:

[source,console]
----
Expand All @@ -40,7 +56,7 @@ PUT _ilm/policy/pre-dsl-ilm-policy
}
----

And let's create an index template that'll back the data stream and configures {ilm-cap}:
And let's create an index template that'll back the data stream and configures {ilm-init}:

[source,console]
----
Expand Down Expand Up @@ -153,11 +169,14 @@ currently richer in features).
stream (which will be configured on all the new backing indices, as long as it exists
in the index template).

[discrete]
[[migrate-from-ilm-to-dsl]]
==== Migrate data stream to data stream lifecycle
To migrate the `dsl-data-stream` to data stream lifecycle we'll have to execute
two steps:

1. Update the index template that's backing the index template to configure <<index-lifecycle-prefer-ilm, prefer_ilm>>
to `false`, and to configure data stream lifecycle.
1. Update the index template that's backing the data stream to set <<index-lifecycle-prefer-ilm, prefer_ilm>>
to `false`, and to configure data stream lifecycle.
2. Configure the data stream lifecycle for the _existing_ `dsl-data-stream` using
the <<data-streams-put-lifecycle, lifecycle API>>.

Expand Down Expand Up @@ -191,14 +210,14 @@ PUT _index_template/dsl-data-stream-template
// TEST[continued]

<1> The `prefer_ilm` setting will now be configured on the **new** backing indices
(created by rolling over the data stream) such that {ilm-init} does _not_ take
precedence over Data stream lifecycle.
<2> We're configuring the data stream lifecycle so _new_ data streams will be
managed by Data stream lifecycle.
(created by rolling over the data stream) such that {ilm-init} does _not_ take
precedence over data stream lifecycle.
<2> We're configuring the data stream lifecycle so _new_ data streams will be
managed by data stream lifecycle.

We've now made sure that new data streams will be managed by Data stream lifecycle.
We've now made sure that new data streams will be managed by data stream lifecycle.

Let's update our existing `dsl-data-stream` and configure Data stream lifecycle:
Let's update our existing `dsl-data-stream` and configure data stream lifecycle:

[source,console]
----
Expand All @@ -209,8 +228,8 @@ PUT _data_stream/dsl-data-stream/_lifecycle
----
// TEST[continued]

We can inspect the data stream to check that the next generation will indeed be
managed by Data stream lifecycle:
We can inspect the data stream to check that the next generation will indeed be
managed by data stream lifecycle:

[source,console]
--------------------------------------------------
Expand Down Expand Up @@ -273,8 +292,8 @@ GET _data_stream/dsl-data-stream
<4> The `prefer_ilm` setting value we configured in the index template is reflected
and will be configured accordingly for new backing indices.

We'll now rollover the data stream to see the new generation index being managed by
Data stream lifecycle:
We'll now rollover the data stream to see the new generation index being managed by
data stream lifecycle:

[source,console]
----
Expand Down Expand Up @@ -352,14 +371,17 @@ GET _data_stream/dsl-data-stream
in the index template
<4> The new write index is managed by `Data stream lifecycle`

We can easily change this data stream to be managed by {ilm-cap} because we didn't remove
the {ilm-cap} policy when we <<update-index-template-for-dsl, updated
the index template>>.
[discrete]
[[migrate-from-dsl-to-ilm]]
==== Migrate data stream back to ILM
We can easily change this data stream to be managed by {ilm-init} because we didn't remove
the {ilm-init} policy when we <<update-index-template-for-dsl, updated
the index template>>.

We can achieve this in two ways:

1. <<data-streams-delete-lifecycle, Delete the lifecycle>> from the data streams
2. Disable Data stream lifecycle by configuring the `enabled` flag to `false`.
2. Disable data stream lifecycle by configuring the `enabled` flag to `false`.

Let's implement option 2 and disable the data stream lifecycle:

Expand Down Expand Up @@ -439,13 +461,13 @@ GET _data_stream/dsl-data-stream
// TESTRESPONSE[s/"index_name": ".ds-dsl-data-stream-2023.10.19-000003"/"index_name": $body.data_streams.0.indices.2.index_name/]
// TESTRESPONSE[s/"index_uuid": "PA_JquKGSiKcAKBA8abcd1"/"index_uuid": $body.data_streams.0.indices.2.index_uuid/]
// TESTRESPONSE[s/"status": "GREEN"/"status": "YELLOW","failure_indices":[],"failure_store":false/]
<1> The write index is now managed by {ilm-cap}
<1> The write index is now managed by {ilm-init}
<2> The `lifecycle` configured on the data stream is now disabled.
<3> The next write index will be managed by {ilm-cap}
<3> The next write index will be managed by {ilm-init}

Had we removed the {ilm-cap} policy from the index template when we <<update-index-template-for-dsl, updated>>
Had we removed the {ilm-init} policy from the index template when we <<update-index-template-for-dsl, updated>>
it, the write index of the data stream will now be `Unmanaged` because the index
wouldn't have the {ilm-cap} policy configured to fallback onto.
wouldn't have the {ilm-init} policy configured to fallback onto.

//////////////////////////
[source,console]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,11 @@ When an alert occurs, it is always the same name as the job ID of the associated
them from generating actions. For more details, refer to
{kibana-ref}/create-and-manage-rules.html#controlling-rules[Snooze and disable rules].

You can also review how the alerts that are occured correlate with the
{anomaly-detect} results in the **Anomaly exloprer** by using the
**Anomaly timeline** swimlane and the **Alerts** panel.


[[creating-anomaly-jobs-health-rules]]
== {anomaly-jobs-cap} health rules

Expand Down
22 changes: 17 additions & 5 deletions docs/reference/modules/discovery/quorums.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ those of the other piece.

Elasticsearch allows you to add and remove master-eligible nodes to a running
cluster. In many cases you can do this simply by starting or stopping the nodes
as required. See <<add-elasticsearch-nodes>>.
as required. See <<add-elasticsearch-nodes>> for more information.

As nodes are added or removed Elasticsearch maintains an optimal level of fault
tolerance by updating the cluster's <<modules-discovery-voting,voting
Expand All @@ -26,17 +26,29 @@ voting configuration have responded. Usually the voting configuration is the
same as the set of all the master-eligible nodes that are currently in the
cluster. However, there are some situations in which they may be different.

// tag::quorums-and-availability[]
[IMPORTANT]
====
To be sure that the cluster remains available you **must not stop half or more
of the nodes in the voting configuration at the same time**. As long as more
than half of the voting nodes are available the cluster can still work normally.
This means that if there are three or four master-eligible nodes, the cluster
can tolerate one of them being unavailable. If there are two or fewer
master-eligible nodes, they must all remain available.
After a node has joined or left the cluster the elected master must issue a
cluster-state update that adjusts the voting configuration to match, and this
can take a short time to complete. It is important to wait for this adjustment
to complete before removing more nodes from the cluster.
If you stop half or more of the nodes in the voting configuration at the same
time then the cluster will be unavailable until you bring enough nodes back
online to form a quorum again. While the cluster is unavailable, any remaining
nodes will report in their logs that they cannot discover or elect a master
node. See <<discovery-troubleshooting>> for more information.
====
// end::quorums-and-availability[]

After a master-eligible node has joined or left the cluster the elected master
may issue a cluster-state update that adjusts the voting configuration to match,
and this can take a short time to complete. It is important to wait for this
adjustment to complete before removing more nodes from the cluster. See
<<modules-discovery-removing-nodes>> for more information.

[discrete]
==== Master elections
Expand Down
7 changes: 1 addition & 6 deletions docs/reference/modules/discovery/voting.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,7 @@ Usually the voting configuration is the same as the set of all the
master-eligible nodes that are currently in the cluster. However, there are some
situations in which they may be different.

IMPORTANT: To ensure the cluster remains available, you **must not stop half or
more of the nodes in the voting configuration at the same time**. As long as more
than half of the voting nodes are available, the cluster can work normally. For
example, if there are three or four master-eligible nodes, the cluster
can tolerate one unavailable node. If there are two or fewer master-eligible
nodes, they must all remain available.
include::quorums.asciidoc[tag=quorums-and-availability]

After a node joins or leaves the cluster, {es} reacts by automatically making
corresponding changes to the voting configuration in order to ensure that the
Expand Down
Loading

0 comments on commit a8bd04c

Please sign in to comment.